[Mlir-commits] [mlir] b1d2687 - [mlir][IR] Remove duplicate `isLastMemrefDimUnitStride` functions
Matthias Springer
llvmlistbot at llvm.org
Mon Jul 17 07:34:27 PDT 2023
Author: Matthias Springer
Date: 2023-07-17T16:31:04+02:00
New Revision: b1d2687501f87d7158289a90a864ddf32b843d49
URL: https://github.com/llvm/llvm-project/commit/b1d2687501f87d7158289a90a864ddf32b843d49
DIFF: https://github.com/llvm/llvm-project/commit/b1d2687501f87d7158289a90a864ddf32b843d49.diff
LOG: [mlir][IR] Remove duplicate `isLastMemrefDimUnitStride` functions
This function is duplicated in various dialects.
Differential Revision: https://reviews.llvm.org/D155462
Added:
Modified:
mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
mlir/include/mlir/IR/BuiltinTypes.h
mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
mlir/lib/Dialect/Vector/IR/VectorOps.cpp
mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
mlir/lib/IR/BuiltinTypes.cpp
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
index 49a235186ecd33..4a624bd5f1ccdc 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
@@ -90,10 +90,6 @@ ArrayAttr getVectorSubscriptAttr(Builder &b, ArrayRef<int64_t> values);
Value getVectorReductionOp(arith::AtomicRMWKind op, OpBuilder &builder,
Location loc, Value vector);
-/// Return true if the last dimension of the MemRefType has unit stride. Also
-/// return true for memrefs with no strides.
-bool isLastMemrefDimUnitStride(MemRefType type);
-
/// Build the default minor identity map suitable for a vector transfer. This
/// also handles the case memref<... x vector<...>> -> vector<...> in which the
/// rank of the identity map must take the vector element type into account.
diff --git a/mlir/include/mlir/IR/BuiltinTypes.h b/mlir/include/mlir/IR/BuiltinTypes.h
index c4a3c3e68b1ee7..de363fc6370c2c 100644
--- a/mlir/include/mlir/IR/BuiltinTypes.h
+++ b/mlir/include/mlir/IR/BuiltinTypes.h
@@ -534,9 +534,13 @@ AffineExpr makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
AffineExpr makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
MLIRContext *context);
-/// Return true if the layout for `t` is compatible with strided semantics.
+/// Return "true" if the layout for `t` is compatible with strided semantics.
bool isStrided(MemRefType t);
+/// Return "true" if the last dimension of the given type has a static unit
+/// stride. Also return "true" for types with no strides.
+bool isLastMemrefDimUnitStride(MemRefType type);
+
} // namespace mlir
#endif // MLIR_IR_BUILTINTYPES_H
diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 990138549abf4c..d0c0d8fa0540f9 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -92,13 +92,11 @@ LogicalResult getMemRefAlignment(LLVMTypeConverter &typeConverter,
// Check if the last stride is non-unit or the memory space is not zero.
static LogicalResult isMemRefTypeSupported(MemRefType memRefType,
LLVMTypeConverter &converter) {
- int64_t offset;
- SmallVector<int64_t, 4> strides;
- auto successStrides = getStridesAndOffset(memRefType, strides, offset);
+ if (!isLastMemrefDimUnitStride(memRefType))
+ return failure();
FailureOr<unsigned> addressSpace =
converter.getMemRefAddressSpace(memRefType);
- if (failed(successStrides) || strides.back() != 1 || failed(addressSpace) ||
- *addressSpace != 0)
+ if (failed(addressSpace) || *addressSpace != 0)
return failure();
return success();
}
diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
index 69366139f8a82c..fc274c989196ca 100644
--- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
+++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
@@ -1185,14 +1185,6 @@ struct Strategy1d<TransferWriteOp> {
}
};
-/// Return true if the last dimension of the MemRefType has unit stride.
-static bool isLastMemrefDimUnitStride(MemRefType type) {
- int64_t offset;
- SmallVector<int64_t, 4> strides;
- auto successStrides = getStridesAndOffset(type, strides, offset);
- return succeeded(successStrides) && (strides.empty() || strides.back() == 1);
-}
-
/// Lower a 1D vector transfer op to SCF using scalar loads/stores. This is
/// necessary in cases where a 1D vector transfer op cannot be lowered into
/// vector load/stores due to non-unit strides or broadcasts:
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 414b54f85758c4..f809a9627de26a 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -1546,17 +1546,6 @@ void MemcpyOp::getCanonicalizationPatterns(RewritePatternSet &results,
// GPU_SubgroupMmaLoadMatrixOp
//===----------------------------------------------------------------------===//
-/// Return true if the last dimension of the MemRefType has unit stride. Also
-/// return true for memrefs with no strides.
-static bool isLastMemrefDimUnitStride(MemRefType type) {
- int64_t offset;
- SmallVector<int64_t> strides;
- if (failed(getStridesAndOffset(type, strides, offset))) {
- return false;
- }
- return strides.back() == 1;
-}
-
LogicalResult SubgroupMmaLoadMatrixOp::verify() {
auto srcType = getSrcMemref().getType();
auto resType = getRes().getType();
diff --git a/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp b/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
index c3a62f468749ee..2868660dc656ee 100644
--- a/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
+++ b/mlir/lib/Dialect/NVGPU/IR/NVGPUDialect.cpp
@@ -53,17 +53,6 @@ bool nvgpu::NVGPUDialect::hasSharedMemoryAddressSpace(MemRefType type) {
// NVGPU_DeviceAsyncCopyOp
//===----------------------------------------------------------------------===//
-/// Return true if the last dimension of the MemRefType has unit stride. Also
-/// return true for memrefs with no strides.
-static bool isLastMemrefDimUnitStride(MemRefType type) {
- int64_t offset;
- SmallVector<int64_t> strides;
- if (failed(getStridesAndOffset(type, strides, offset))) {
- return false;
- }
- return strides.back() == 1;
-}
-
LogicalResult DeviceAsyncCopyOp::verify() {
auto srcMemref = llvm::cast<MemRefType>(getSrc().getType());
auto dstMemref = llvm::cast<MemRefType>(getDst().getType());
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index b64aec164bdca7..e4cf54cd6aaf4e 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -130,15 +130,6 @@ static bool isSupportedCombiningKind(CombiningKind combiningKind,
return false;
}
-/// Return true if the last dimension of the MemRefType has unit stride. Also
-/// return true for memrefs with no strides.
-bool mlir::vector::isLastMemrefDimUnitStride(MemRefType type) {
- int64_t offset;
- SmallVector<int64_t> strides;
- auto successStrides = getStridesAndOffset(type, strides, offset);
- return succeeded(successStrides) && (strides.empty() || strides.back() == 1);
-}
-
AffineMap mlir::vector::getTransferMinorIdentityMap(ShapedType shapedType,
VectorType vectorType) {
int64_t elementVectorRank = 0;
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index c880eb49198e1e..9589482cd8f8d2 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -419,7 +419,7 @@ struct TransferReadToVectorLoadLowering
return rewriter.notifyMatchFailure(read, "not a memref source");
// Non-unit strides are handled by VectorToSCF.
- if (!vector::isLastMemrefDimUnitStride(memRefType))
+ if (!isLastMemrefDimUnitStride(memRefType))
return rewriter.notifyMatchFailure(read, "!= 1 stride needs VectorToSCF");
// If there is broadcasting involved then we first load the unbroadcasted
@@ -567,7 +567,7 @@ struct TransferWriteToVectorStoreLowering
});
// Non-unit strides are handled by VectorToSCF.
- if (!vector::isLastMemrefDimUnitStride(memRefType))
+ if (!isLastMemrefDimUnitStride(memRefType))
return rewriter.notifyMatchFailure(write.getLoc(), [=](Diagnostic &diag) {
diag << "most minor stride is not 1: " << write;
});
diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp
index b5ebaa01e61bb4..60cff9d223d4b4 100644
--- a/mlir/lib/IR/BuiltinTypes.cpp
+++ b/mlir/lib/IR/BuiltinTypes.cpp
@@ -956,10 +956,16 @@ AffineExpr mlir::makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
return makeCanonicalStridedLayoutExpr(sizes, exprs, context);
}
-/// Return true if the layout for `t` is compatible with strided semantics.
bool mlir::isStrided(MemRefType t) {
int64_t offset;
SmallVector<int64_t, 4> strides;
auto res = getStridesAndOffset(t, strides, offset);
return succeeded(res);
}
+
+bool mlir::isLastMemrefDimUnitStride(MemRefType type) {
+ int64_t offset;
+ SmallVector<int64_t> strides;
+ auto successStrides = getStridesAndOffset(type, strides, offset);
+ return succeeded(successStrides) && (strides.empty() || strides.back() == 1);
+}
More information about the Mlir-commits
mailing list