[Mlir-commits] [mlir] [MLIR] VectorEmulateNarrowType support unaligned cases (PR #113411)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Tue Oct 22 20:11:45 PDT 2024
https://github.com/lialan created https://github.com/llvm/llvm-project/pull/113411
Previously the pass only supports emulation of vector sizes that are a multiple of emulated data type (i8). This patch expands its support of emulation which's size are not a multiple of byte sizes, such as `vector<3xi2>`.
A limitation of this patch is that the linearized index of the unaligned vector has to be known at compile time. Extra code needs to be emitted to handle it if the condition does not hold.
The following ops are updated:
* `vector::LoadOp`
* `vector::StoreOp`
* `vector::TransferReadOp`
>From ec5c0475f8a43eef19a1b166dc700ecd1b6daedd Mon Sep 17 00:00:00 2001
From: Ubuntu <450283+lialan at users.noreply.github.com>
Date: Sun, 20 Oct 2024 14:54:57 +0000
Subject: [PATCH] [MLIR] VectorEmulateNarrowType support unaligned cases
Previously the pass only supports emulation of vector sizes that are
a multiple of emulated data type (i8). This patch expands its support
of emulation which's size are not a multiple of byte
sizes, such as `vector<3xi2>`.
A limitation of this patch is that the linearized index of the unaligned
vector has to be known at compile time. Extra code needs to be emitted
to handle it if the condition does not hold.
The following ops are updated:
* `vector::LoadOp`
* `vector::StoreOp`
* `vector::TransferReadOp`
---
.../mlir/Dialect/MemRef/Utils/MemRefUtils.h | 9 +-
mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp | 9 +-
.../Transforms/VectorEmulateNarrowType.cpp | 162 +++++++++++++++---
.../vector-emulate-narrow-type-unaligned.mlir | 55 ++++++
4 files changed, 204 insertions(+), 31 deletions(-)
create mode 100644 mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
diff --git a/mlir/include/mlir/Dialect/MemRef/Utils/MemRefUtils.h b/mlir/include/mlir/Dialect/MemRef/Utils/MemRefUtils.h
index ca3326dbbef519..db32543162b781 100644
--- a/mlir/include/mlir/Dialect/MemRef/Utils/MemRefUtils.h
+++ b/mlir/include/mlir/Dialect/MemRef/Utils/MemRefUtils.h
@@ -32,7 +32,8 @@ namespace memref {
bool isStaticShapeAndContiguousRowMajor(MemRefType type);
/// For a `memref` with `offset`, `sizes` and `strides`, returns the
-/// offset and size to use for the linearized `memref`.
+/// offset, size, and potentially the size padded at the front to use for the
+/// linearized `memref`.
/// - If the linearization is done for emulating load/stores of
/// element type with bitwidth `srcBits` using element type with
/// bitwidth `dstBits`, the linearized offset and size are
@@ -42,9 +43,15 @@ bool isStaticShapeAndContiguousRowMajor(MemRefType type);
/// index to use in the linearized `memref`. The linearized index
/// is also scaled down by `dstBits`/`srcBits`. If `indices` is not provided
/// 0, is returned for the linearized index.
+/// - If the size of the load/store is smaller than the linearized memref
+/// load/store,
+/// the memory region emulated is larger than the actual memory region needed.
+/// `frontPaddingSize` returns the size of the irrelevant offset at the
+/// beginning.
struct LinearizedMemRefInfo {
OpFoldResult linearizedOffset;
OpFoldResult linearizedSize;
+ OpFoldResult frontPaddingSize;
};
std::pair<LinearizedMemRefInfo, OpFoldResult> getLinearizedMemRefOffsetAndSize(
OpBuilder &builder, Location loc, int srcBits, int dstBits,
diff --git a/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp b/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp
index 7321b19068016c..69724bec248827 100644
--- a/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp
+++ b/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp
@@ -81,11 +81,10 @@ std::pair<LinearizedMemRefInfo, OpFoldResult> getLinearizedMemRefOffsetAndSize(
// Adjust linearizedIndices and size by the scale factor (dstBits / srcBits).
int64_t scaler = dstBits / srcBits;
- addMulMap = addMulMap.floorDiv(scaler);
mulMap = mulMap.floorDiv(scaler);
OpFoldResult linearizedIndices = affine::makeComposedFoldedAffineApply(
- builder, loc, addMulMap, offsetValues);
+ builder, loc, addMulMap.floorDiv(scaler), offsetValues);
OpFoldResult linearizedSize =
affine::makeComposedFoldedAffineApply(builder, loc, mulMap, sizes);
@@ -95,7 +94,11 @@ std::pair<LinearizedMemRefInfo, OpFoldResult> getLinearizedMemRefOffsetAndSize(
OpFoldResult adjustBaseOffset = affine::makeComposedFoldedAffineApply(
builder, loc, s0.floorDiv(scaler), {offset});
- return {{adjustBaseOffset, linearizedSize}, linearizedIndices};
+ OpFoldResult frontPaddingSize = affine::makeComposedFoldedAffineApply(
+ builder, loc, addMulMap % scaler, offsetValues);
+
+ return {{adjustBaseOffset, linearizedSize, frontPaddingSize},
+ linearizedIndices};
}
LinearizedMemRefInfo
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 66362d3ca70fb6..42a9a2ab12196a 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -24,6 +24,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>
+#include <optional>
using namespace mlir;
@@ -102,6 +103,23 @@ static FailureOr<Operation *> getCompressedMaskOp(OpBuilder &rewriter,
return newMask;
}
+///
+static std::optional<int64_t>
+getFrontPaddingSize(ConversionPatternRewriter &rewriter, Location loc,
+ const memref::LinearizedMemRefInfo linearizedInfo,
+ bool isUnalignedEmulation) {
+ if (!isUnalignedEmulation)
+ return 0;
+ auto foldedFrontPaddingSize = getValueOrCreateConstantIndexOp(
+ rewriter, loc, linearizedInfo.frontPaddingSize);
+ // try to fold the front padding size into a constant
+ if (auto frontPadding = dyn_cast_or_null<arith::ConstantIndexOp>(
+ foldedFrontPaddingSize.getDefiningOp())) {
+ return frontPadding.value();
+ }
+ return std::nullopt;
+}
+
namespace {
//===----------------------------------------------------------------------===//
@@ -142,14 +160,17 @@ struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
// vector<4xi8>
auto origElements = op.getValueToStore().getType().getNumElements();
- if (origElements % scale != 0)
- return failure();
+
+ // if the size of vector we are loading is not byte-aligned, extra handling
+ // is needed
+ bool isUnalignedEmulation = origElements % scale != 0;
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -157,14 +178,48 @@ struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto numElements = origElements / scale;
- auto bitCast = rewriter.create<vector::BitCastOp>(
- loc, VectorType::get(numElements, newElementType),
- op.getValueToStore());
+ auto foldedFrontPaddingSize = getFrontPaddingSize(
+ rewriter, loc, linearizedInfo, isUnalignedEmulation);
- rewriter.replaceOpWithNewOp<vector::StoreOp>(
- op, bitCast.getResult(), adaptor.getBase(),
- getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ if (!foldedFrontPaddingSize) {
+ // unimplemented case for dynamic front padding size
+ return failure();
+ }
+
+ auto numElements =
+ (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
+ auto newVectorType = VectorType::get(numElements, newElementType);
+
+ if (isUnalignedEmulation) {
+ auto insertedVectorType =
+ VectorType::get(numElements * scale, oldElementType);
+
+ auto linearizedIndicesValue =
+ getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices);
+ auto passThru =
+ rewriter.create<vector::LoadOp>(loc, newVectorType, adaptor.getBase(),
+ ValueRange{linearizedIndicesValue});
+ auto bitcastedPassThru =
+ rewriter.create<vector::BitCastOp>(loc, insertedVectorType, passThru);
+
+ // just extract it and use it for the strided slice offset
+ auto insertStridedSlice = rewriter.create<vector::InsertStridedSliceOp>(
+ loc, insertedVectorType, op.getValueToStore(), bitcastedPassThru,
+ rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
+ rewriter.getI64ArrayAttr({1}));
+ // bit cast the vector to the original type
+ auto bitCast = rewriter.create<vector::BitCastOp>(loc, newVectorType,
+ insertStridedSlice);
+
+ rewriter.replaceOpWithNewOp<vector::StoreOp>(
+ op, bitCast.getResult(), adaptor.getBase(), linearizedIndicesValue);
+ } else {
+ auto bitCast = rewriter.create<vector::BitCastOp>(loc, newVectorType,
+ op.getValueToStore());
+ rewriter.replaceOpWithNewOp<vector::StoreOp>(
+ op, bitCast.getResult(), adaptor.getBase(),
+ getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ }
return success();
}
};
@@ -294,19 +349,31 @@ struct ConvertVectorLoad final : OpConversionPattern<vector::LoadOp> {
// %1 = vector.load %0[%linear_index] : memref<6xi8>, vector<2xi8>
// %2 = vector.bitcast %1 : vector<2xi8> to vector<4xi4>
//
- // TODO: Currently, only the even number of elements loading is supported.
- // To deal with the odd number of elements, one has to extract the
- // subvector at the proper offset after bit-casting.
+ // There are cases where the number of elements to load is not byte-aligned,
+ // for example:
+ //
+ // %1 = vector.load %0[%c1, %c0] : memref<3x3xi2>, vector<3xi2>
+ //
+ // we will have to load extra bytes and extract the exact slice in between.
+ //
+ // %1 = vector.load %0[%c2] : memref<3xi8>, vector<2xi8>
+ // %2 = vector.bitcast %1 : vector<2xi8> to vector<8xi2>
+ // %3 = vector.extract_strided_slice %1 {offsets = [2], sizes = [3], strides
+ // = [1]}
+ // : vector<8xi2> to vector<3xi2>
+ //
+ // TODO: Currently the extract_strided_slice's attributes must be known at
+ // compile time as they must be constants.
auto origElements = op.getVectorType().getNumElements();
- if (origElements % scale != 0)
- return failure();
+ bool isUnalignedEmulation = origElements % scale != 0;
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -314,15 +381,35 @@ struct ConvertVectorLoad final : OpConversionPattern<vector::LoadOp> {
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto numElements = (origElements + scale - 1) / scale;
+ auto foldedFrontPaddingSize = getFrontPaddingSize(
+ rewriter, loc, linearizedInfo, isUnalignedEmulation);
+
+ if (!foldedFrontPaddingSize) {
+ // unimplemented case for dynamic front padding size
+ return failure();
+ }
+
+ auto numElements =
+ (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
+ auto loadVectorType = VectorType::get(numElements, newElementType);
auto newLoad = rewriter.create<vector::LoadOp>(
- loc, VectorType::get(numElements, newElementType), adaptor.getBase(),
+ loc, loadVectorType, adaptor.getBase(),
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ auto newBitCastType = VectorType::get(numElements * scale, oldElementType);
auto bitCast =
- rewriter.create<vector::BitCastOp>(loc, op.getType(), newLoad);
-
- rewriter.replaceOp(op, bitCast->getResult(0));
+ rewriter.create<vector::BitCastOp>(loc, newBitCastType, newLoad);
+
+ if (newBitCastType.getNumElements() != origElements) {
+ auto extractStridedSlice = rewriter.create<vector::ExtractStridedSliceOp>(
+ loc, op.getType(), bitCast,
+ rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
+ rewriter.getI64ArrayAttr({origElements}),
+ rewriter.getI64ArrayAttr({1}));
+ rewriter.replaceOp(op, extractStridedSlice.getResult());
+ } else {
+ rewriter.replaceOp(op, bitCast->getResult(0));
+ }
return success();
}
};
@@ -464,8 +551,8 @@ struct ConvertVectorTransferRead final
int scale = dstBits / srcBits;
auto origElements = op.getVectorType().getNumElements();
- if (origElements % scale != 0)
- return failure();
+
+ bool isUnalignedEmulation = origElements % scale != 0;
auto newPadding = rewriter.create<arith::ExtUIOp>(loc, newElementType,
adaptor.getPadding());
@@ -474,7 +561,8 @@ struct ConvertVectorTransferRead final
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getSource());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -482,7 +570,16 @@ struct ConvertVectorTransferRead final
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto numElements = (origElements + scale - 1) / scale;
+ auto foldedFrontPaddingSize = getFrontPaddingSize(
+ rewriter, loc, linearizedInfo, isUnalignedEmulation);
+
+ if (!foldedFrontPaddingSize) {
+ // unimplemented case for dynamic front padding size
+ return failure();
+ }
+
+ auto numElements =
+ (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
auto newReadType = VectorType::get(numElements, newElementType);
auto newRead = rewriter.create<vector::TransferReadOp>(
@@ -490,10 +587,21 @@ struct ConvertVectorTransferRead final
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices),
newPadding);
+ auto bitCastType = VectorType::get(numElements * scale, oldElementType);
auto bitCast =
- rewriter.create<vector::BitCastOp>(loc, op.getType(), newRead);
+ rewriter.create<vector::BitCastOp>(loc, bitCastType, newRead);
+
+ if (isUnalignedEmulation) {
+ // we only extract a portion of the vector.
+ rewriter.replaceOpWithNewOp<vector::ExtractStridedSliceOp>(
+ op, op.getType(), bitCast,
+ rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
+ rewriter.getI64ArrayAttr({origElements}),
+ rewriter.getI64ArrayAttr({1}));
+ } else {
+ rewriter.replaceOp(op, bitCast->getResult(0));
+ }
- rewriter.replaceOp(op, bitCast->getResult(0));
return success();
}
};
diff --git a/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
new file mode 100644
index 00000000000000..eebd7c74f44766
--- /dev/null
+++ b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
@@ -0,0 +1,55 @@
+// RUN: mlir-opt --test-emulate-narrow-int="arith-compute-bitwidth=1 memref-load-bitwidth=8" --cse --split-input-file %s | FileCheck %s
+
+func.func @vector_load_i2(%arg1: index, %arg2: index) -> vector<3x3xi2> {
+ %0 = memref.alloc() : memref<3x3xi2>
+ %c0 = arith.constant 0 : index
+ %c2 = arith.constant 2 : index
+ %cst = arith.constant dense<0> : vector<3x3xi2>
+ %1 = vector.load %0[%c2, %c0] : memref<3x3xi2>, vector<3xi2>
+ %2 = vector.insert %1, %cst [0] : vector<3xi2> into vector<3x3xi2>
+ return %2 : vector<3x3xi2>
+}
+
+// CHECK: func @vector_load_i2
+// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<3xi8>
+// CHECK: %[[INDEX:.+]] = arith.constant 1 : index
+// CHECK: %[[VEC:.+]] = vector.load %[[ALLOC]][%[[INDEX]]] : memref<3xi8>, vector<2xi8>
+// CHECK: %[[VEC_I2:.+]] = vector.bitcast %[[VEC]] : vector<2xi8> to vector<8xi2>
+// CHECK: %[[EXCTRACT:.+]] = vector.extract_strided_slice %[[VEC_I2]] {offsets = [2], sizes = [3], strides = [1]} : vector<8xi2> to vector<3xi2>
+
+//-----
+
+func.func @vector_store_i2(%arg0: vector<3xi2>) {
+ %0 = memref.alloc() : memref<3x3xi2>
+ %c0 = arith.constant 0 : index
+ %c2 = arith.constant 2 : index
+ vector.store %arg0, %0[%c2, %c0] :memref<3x3xi2>, vector<3xi2>
+ return
+}
+
+// CHECK: func @vector_store_i2
+// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<3xi8>
+// CHECK: %[[INDEX:.+]] = arith.constant 1 : index
+// CHECK: %[[LOAD:.+]] = vector.load %[[ALLOC]][%[[INDEX]]] : memref<3xi8>, vector<2xi8>
+// CHECK: %[[BITCAST1:.+]] = vector.bitcast %[[LOAD]] : vector<2xi8> to vector<8xi2>
+// CHECK: %[[INSERT:.+]] = vector.insert_strided_slice %arg0, %[[BITCAST1]] {offsets = [2], strides = [1]} : vector<3xi2> into vector<8xi2>
+// CHECK: %[[BITCAST2:.+]] = vector.bitcast %[[INSERT]] : vector<8xi2> to vector<2xi8>
+// CHECK: vector.store %[[BITCAST2]], %[[ALLOC]][%[[INDEX]]] : memref<3xi8>, vector<2xi8>
+
+//-----
+
+func.func @vector_transfer_read_i2() -> vector<3xi2> {
+ %0 = memref.alloc() : memref<3x3xi2>
+ %c0i2 = arith.constant 0 : i2
+ %c0 = arith.constant 0 : index
+ %c2 = arith.constant 2 : index
+ %1 = vector.transfer_read %0[%c2, %c0], %c0i2 {in_bounds = [true]} : memref<3x3xi2>, vector<3xi2>
+ return %1 : vector<3xi2>
+}
+
+// CHECK: func @vector_transfer_read_i2
+// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<3xi8>
+// CHECK: %[[INDEX:.+]] = arith.constant 1 : index
+// CHECK: %[[READ:.+]] = vector.transfer_read %[[ALLOC]][%[[INDEX]]], %0 : memref<3xi8>, vector<2xi8>
+// CHECK: %[[BITCAST:.+]] = vector.bitcast %[[READ]] : vector<2xi8> to vector<8xi2>
+// CHECK: vector.extract_strided_slice %[[BITCAST]] {offsets = [2], sizes = [3], strides = [1]} : vector<8xi2> to vector<3xi2>
More information about the Mlir-commits
mailing list