[Mlir-commits] [mlir] [MLIR] VectorEmulateNarrowType to support loading of unaligned vectors (PR #113411)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Oct 24 09:35:29 PDT 2024
https://github.com/lialan updated https://github.com/llvm/llvm-project/pull/113411
>From cfc1e1d8ad20bcb7d549e5b479f8eb994d9d16ea Mon Sep 17 00:00:00 2001
From: Ubuntu <450283+lialan at users.noreply.github.com>
Date: Sun, 20 Oct 2024 14:54:57 +0000
Subject: [PATCH 1/4] [MLIR] VectorEmulateNarrowType support unaligned cases
Previously the pass only supports emulation of vector sizes that are
a multiple of emulated data type (i8). This patch expands its support
of emulation which's size are not a multiple of byte
sizes, such as `vector<3xi2>`.
A limitation of this patch is that the linearized index of the unaligned
vector has to be known at compile time. Extra code needs to be emitted
to handle it if the condition does not hold.
The following ops are updated:
* `vector::LoadOp`
* `vector::StoreOp`
* `vector::TransferReadOp`
---
.../mlir/Dialect/MemRef/Utils/MemRefUtils.h | 9 +-
mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp | 9 +-
.../Transforms/VectorEmulateNarrowType.cpp | 162 +++++++++++++++---
.../vector-emulate-narrow-type-unaligned.mlir | 55 ++++++
4 files changed, 204 insertions(+), 31 deletions(-)
create mode 100644 mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
diff --git a/mlir/include/mlir/Dialect/MemRef/Utils/MemRefUtils.h b/mlir/include/mlir/Dialect/MemRef/Utils/MemRefUtils.h
index ca3326dbbef519..db32543162b781 100644
--- a/mlir/include/mlir/Dialect/MemRef/Utils/MemRefUtils.h
+++ b/mlir/include/mlir/Dialect/MemRef/Utils/MemRefUtils.h
@@ -32,7 +32,8 @@ namespace memref {
bool isStaticShapeAndContiguousRowMajor(MemRefType type);
/// For a `memref` with `offset`, `sizes` and `strides`, returns the
-/// offset and size to use for the linearized `memref`.
+/// offset, size, and potentially the size padded at the front to use for the
+/// linearized `memref`.
/// - If the linearization is done for emulating load/stores of
/// element type with bitwidth `srcBits` using element type with
/// bitwidth `dstBits`, the linearized offset and size are
@@ -42,9 +43,15 @@ bool isStaticShapeAndContiguousRowMajor(MemRefType type);
/// index to use in the linearized `memref`. The linearized index
/// is also scaled down by `dstBits`/`srcBits`. If `indices` is not provided
/// 0, is returned for the linearized index.
+/// - If the size of the load/store is smaller than the linearized memref
+/// load/store,
+/// the memory region emulated is larger than the actual memory region needed.
+/// `frontPaddingSize` returns the size of the irrelevant offset at the
+/// beginning.
struct LinearizedMemRefInfo {
OpFoldResult linearizedOffset;
OpFoldResult linearizedSize;
+ OpFoldResult frontPaddingSize;
};
std::pair<LinearizedMemRefInfo, OpFoldResult> getLinearizedMemRefOffsetAndSize(
OpBuilder &builder, Location loc, int srcBits, int dstBits,
diff --git a/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp b/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp
index 7321b19068016c..69724bec248827 100644
--- a/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp
+++ b/mlir/lib/Dialect/MemRef/Utils/MemRefUtils.cpp
@@ -81,11 +81,10 @@ std::pair<LinearizedMemRefInfo, OpFoldResult> getLinearizedMemRefOffsetAndSize(
// Adjust linearizedIndices and size by the scale factor (dstBits / srcBits).
int64_t scaler = dstBits / srcBits;
- addMulMap = addMulMap.floorDiv(scaler);
mulMap = mulMap.floorDiv(scaler);
OpFoldResult linearizedIndices = affine::makeComposedFoldedAffineApply(
- builder, loc, addMulMap, offsetValues);
+ builder, loc, addMulMap.floorDiv(scaler), offsetValues);
OpFoldResult linearizedSize =
affine::makeComposedFoldedAffineApply(builder, loc, mulMap, sizes);
@@ -95,7 +94,11 @@ std::pair<LinearizedMemRefInfo, OpFoldResult> getLinearizedMemRefOffsetAndSize(
OpFoldResult adjustBaseOffset = affine::makeComposedFoldedAffineApply(
builder, loc, s0.floorDiv(scaler), {offset});
- return {{adjustBaseOffset, linearizedSize}, linearizedIndices};
+ OpFoldResult frontPaddingSize = affine::makeComposedFoldedAffineApply(
+ builder, loc, addMulMap % scaler, offsetValues);
+
+ return {{adjustBaseOffset, linearizedSize, frontPaddingSize},
+ linearizedIndices};
}
LinearizedMemRefInfo
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 66362d3ca70fb6..42a9a2ab12196a 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -24,6 +24,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>
+#include <optional>
using namespace mlir;
@@ -102,6 +103,23 @@ static FailureOr<Operation *> getCompressedMaskOp(OpBuilder &rewriter,
return newMask;
}
+///
+static std::optional<int64_t>
+getFrontPaddingSize(ConversionPatternRewriter &rewriter, Location loc,
+ const memref::LinearizedMemRefInfo linearizedInfo,
+ bool isUnalignedEmulation) {
+ if (!isUnalignedEmulation)
+ return 0;
+ auto foldedFrontPaddingSize = getValueOrCreateConstantIndexOp(
+ rewriter, loc, linearizedInfo.frontPaddingSize);
+ // try to fold the front padding size into a constant
+ if (auto frontPadding = dyn_cast_or_null<arith::ConstantIndexOp>(
+ foldedFrontPaddingSize.getDefiningOp())) {
+ return frontPadding.value();
+ }
+ return std::nullopt;
+}
+
namespace {
//===----------------------------------------------------------------------===//
@@ -142,14 +160,17 @@ struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
// vector<4xi8>
auto origElements = op.getValueToStore().getType().getNumElements();
- if (origElements % scale != 0)
- return failure();
+
+ // if the size of vector we are loading is not byte-aligned, extra handling
+ // is needed
+ bool isUnalignedEmulation = origElements % scale != 0;
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -157,14 +178,48 @@ struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto numElements = origElements / scale;
- auto bitCast = rewriter.create<vector::BitCastOp>(
- loc, VectorType::get(numElements, newElementType),
- op.getValueToStore());
+ auto foldedFrontPaddingSize = getFrontPaddingSize(
+ rewriter, loc, linearizedInfo, isUnalignedEmulation);
- rewriter.replaceOpWithNewOp<vector::StoreOp>(
- op, bitCast.getResult(), adaptor.getBase(),
- getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ if (!foldedFrontPaddingSize) {
+ // unimplemented case for dynamic front padding size
+ return failure();
+ }
+
+ auto numElements =
+ (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
+ auto newVectorType = VectorType::get(numElements, newElementType);
+
+ if (isUnalignedEmulation) {
+ auto insertedVectorType =
+ VectorType::get(numElements * scale, oldElementType);
+
+ auto linearizedIndicesValue =
+ getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices);
+ auto passThru =
+ rewriter.create<vector::LoadOp>(loc, newVectorType, adaptor.getBase(),
+ ValueRange{linearizedIndicesValue});
+ auto bitcastedPassThru =
+ rewriter.create<vector::BitCastOp>(loc, insertedVectorType, passThru);
+
+ // just extract it and use it for the strided slice offset
+ auto insertStridedSlice = rewriter.create<vector::InsertStridedSliceOp>(
+ loc, insertedVectorType, op.getValueToStore(), bitcastedPassThru,
+ rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
+ rewriter.getI64ArrayAttr({1}));
+ // bit cast the vector to the original type
+ auto bitCast = rewriter.create<vector::BitCastOp>(loc, newVectorType,
+ insertStridedSlice);
+
+ rewriter.replaceOpWithNewOp<vector::StoreOp>(
+ op, bitCast.getResult(), adaptor.getBase(), linearizedIndicesValue);
+ } else {
+ auto bitCast = rewriter.create<vector::BitCastOp>(loc, newVectorType,
+ op.getValueToStore());
+ rewriter.replaceOpWithNewOp<vector::StoreOp>(
+ op, bitCast.getResult(), adaptor.getBase(),
+ getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ }
return success();
}
};
@@ -294,19 +349,31 @@ struct ConvertVectorLoad final : OpConversionPattern<vector::LoadOp> {
// %1 = vector.load %0[%linear_index] : memref<6xi8>, vector<2xi8>
// %2 = vector.bitcast %1 : vector<2xi8> to vector<4xi4>
//
- // TODO: Currently, only the even number of elements loading is supported.
- // To deal with the odd number of elements, one has to extract the
- // subvector at the proper offset after bit-casting.
+ // There are cases where the number of elements to load is not byte-aligned,
+ // for example:
+ //
+ // %1 = vector.load %0[%c1, %c0] : memref<3x3xi2>, vector<3xi2>
+ //
+ // we will have to load extra bytes and extract the exact slice in between.
+ //
+ // %1 = vector.load %0[%c2] : memref<3xi8>, vector<2xi8>
+ // %2 = vector.bitcast %1 : vector<2xi8> to vector<8xi2>
+ // %3 = vector.extract_strided_slice %1 {offsets = [2], sizes = [3], strides
+ // = [1]}
+ // : vector<8xi2> to vector<3xi2>
+ //
+ // TODO: Currently the extract_strided_slice's attributes must be known at
+ // compile time as they must be constants.
auto origElements = op.getVectorType().getNumElements();
- if (origElements % scale != 0)
- return failure();
+ bool isUnalignedEmulation = origElements % scale != 0;
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -314,15 +381,35 @@ struct ConvertVectorLoad final : OpConversionPattern<vector::LoadOp> {
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto numElements = (origElements + scale - 1) / scale;
+ auto foldedFrontPaddingSize = getFrontPaddingSize(
+ rewriter, loc, linearizedInfo, isUnalignedEmulation);
+
+ if (!foldedFrontPaddingSize) {
+ // unimplemented case for dynamic front padding size
+ return failure();
+ }
+
+ auto numElements =
+ (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
+ auto loadVectorType = VectorType::get(numElements, newElementType);
auto newLoad = rewriter.create<vector::LoadOp>(
- loc, VectorType::get(numElements, newElementType), adaptor.getBase(),
+ loc, loadVectorType, adaptor.getBase(),
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ auto newBitCastType = VectorType::get(numElements * scale, oldElementType);
auto bitCast =
- rewriter.create<vector::BitCastOp>(loc, op.getType(), newLoad);
-
- rewriter.replaceOp(op, bitCast->getResult(0));
+ rewriter.create<vector::BitCastOp>(loc, newBitCastType, newLoad);
+
+ if (newBitCastType.getNumElements() != origElements) {
+ auto extractStridedSlice = rewriter.create<vector::ExtractStridedSliceOp>(
+ loc, op.getType(), bitCast,
+ rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
+ rewriter.getI64ArrayAttr({origElements}),
+ rewriter.getI64ArrayAttr({1}));
+ rewriter.replaceOp(op, extractStridedSlice.getResult());
+ } else {
+ rewriter.replaceOp(op, bitCast->getResult(0));
+ }
return success();
}
};
@@ -464,8 +551,8 @@ struct ConvertVectorTransferRead final
int scale = dstBits / srcBits;
auto origElements = op.getVectorType().getNumElements();
- if (origElements % scale != 0)
- return failure();
+
+ bool isUnalignedEmulation = origElements % scale != 0;
auto newPadding = rewriter.create<arith::ExtUIOp>(loc, newElementType,
adaptor.getPadding());
@@ -474,7 +561,8 @@ struct ConvertVectorTransferRead final
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getSource());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -482,7 +570,16 @@ struct ConvertVectorTransferRead final
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto numElements = (origElements + scale - 1) / scale;
+ auto foldedFrontPaddingSize = getFrontPaddingSize(
+ rewriter, loc, linearizedInfo, isUnalignedEmulation);
+
+ if (!foldedFrontPaddingSize) {
+ // unimplemented case for dynamic front padding size
+ return failure();
+ }
+
+ auto numElements =
+ (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
auto newReadType = VectorType::get(numElements, newElementType);
auto newRead = rewriter.create<vector::TransferReadOp>(
@@ -490,10 +587,21 @@ struct ConvertVectorTransferRead final
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices),
newPadding);
+ auto bitCastType = VectorType::get(numElements * scale, oldElementType);
auto bitCast =
- rewriter.create<vector::BitCastOp>(loc, op.getType(), newRead);
+ rewriter.create<vector::BitCastOp>(loc, bitCastType, newRead);
+
+ if (isUnalignedEmulation) {
+ // we only extract a portion of the vector.
+ rewriter.replaceOpWithNewOp<vector::ExtractStridedSliceOp>(
+ op, op.getType(), bitCast,
+ rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
+ rewriter.getI64ArrayAttr({origElements}),
+ rewriter.getI64ArrayAttr({1}));
+ } else {
+ rewriter.replaceOp(op, bitCast->getResult(0));
+ }
- rewriter.replaceOp(op, bitCast->getResult(0));
return success();
}
};
diff --git a/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
new file mode 100644
index 00000000000000..eebd7c74f44766
--- /dev/null
+++ b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
@@ -0,0 +1,55 @@
+// RUN: mlir-opt --test-emulate-narrow-int="arith-compute-bitwidth=1 memref-load-bitwidth=8" --cse --split-input-file %s | FileCheck %s
+
+func.func @vector_load_i2(%arg1: index, %arg2: index) -> vector<3x3xi2> {
+ %0 = memref.alloc() : memref<3x3xi2>
+ %c0 = arith.constant 0 : index
+ %c2 = arith.constant 2 : index
+ %cst = arith.constant dense<0> : vector<3x3xi2>
+ %1 = vector.load %0[%c2, %c0] : memref<3x3xi2>, vector<3xi2>
+ %2 = vector.insert %1, %cst [0] : vector<3xi2> into vector<3x3xi2>
+ return %2 : vector<3x3xi2>
+}
+
+// CHECK: func @vector_load_i2
+// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<3xi8>
+// CHECK: %[[INDEX:.+]] = arith.constant 1 : index
+// CHECK: %[[VEC:.+]] = vector.load %[[ALLOC]][%[[INDEX]]] : memref<3xi8>, vector<2xi8>
+// CHECK: %[[VEC_I2:.+]] = vector.bitcast %[[VEC]] : vector<2xi8> to vector<8xi2>
+// CHECK: %[[EXCTRACT:.+]] = vector.extract_strided_slice %[[VEC_I2]] {offsets = [2], sizes = [3], strides = [1]} : vector<8xi2> to vector<3xi2>
+
+//-----
+
+func.func @vector_store_i2(%arg0: vector<3xi2>) {
+ %0 = memref.alloc() : memref<3x3xi2>
+ %c0 = arith.constant 0 : index
+ %c2 = arith.constant 2 : index
+ vector.store %arg0, %0[%c2, %c0] :memref<3x3xi2>, vector<3xi2>
+ return
+}
+
+// CHECK: func @vector_store_i2
+// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<3xi8>
+// CHECK: %[[INDEX:.+]] = arith.constant 1 : index
+// CHECK: %[[LOAD:.+]] = vector.load %[[ALLOC]][%[[INDEX]]] : memref<3xi8>, vector<2xi8>
+// CHECK: %[[BITCAST1:.+]] = vector.bitcast %[[LOAD]] : vector<2xi8> to vector<8xi2>
+// CHECK: %[[INSERT:.+]] = vector.insert_strided_slice %arg0, %[[BITCAST1]] {offsets = [2], strides = [1]} : vector<3xi2> into vector<8xi2>
+// CHECK: %[[BITCAST2:.+]] = vector.bitcast %[[INSERT]] : vector<8xi2> to vector<2xi8>
+// CHECK: vector.store %[[BITCAST2]], %[[ALLOC]][%[[INDEX]]] : memref<3xi8>, vector<2xi8>
+
+//-----
+
+func.func @vector_transfer_read_i2() -> vector<3xi2> {
+ %0 = memref.alloc() : memref<3x3xi2>
+ %c0i2 = arith.constant 0 : i2
+ %c0 = arith.constant 0 : index
+ %c2 = arith.constant 2 : index
+ %1 = vector.transfer_read %0[%c2, %c0], %c0i2 {in_bounds = [true]} : memref<3x3xi2>, vector<3xi2>
+ return %1 : vector<3xi2>
+}
+
+// CHECK: func @vector_transfer_read_i2
+// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<3xi8>
+// CHECK: %[[INDEX:.+]] = arith.constant 1 : index
+// CHECK: %[[READ:.+]] = vector.transfer_read %[[ALLOC]][%[[INDEX]]], %0 : memref<3xi8>, vector<2xi8>
+// CHECK: %[[BITCAST:.+]] = vector.bitcast %[[READ]] : vector<2xi8> to vector<8xi2>
+// CHECK: vector.extract_strided_slice %[[BITCAST]] {offsets = [2], sizes = [3], strides = [1]} : vector<8xi2> to vector<3xi2>
>From a855adf6b03ef178c9495733d5d9cb123e7c2142 Mon Sep 17 00:00:00 2001
From: Ubuntu <450283+lialan at users.noreply.github.com>
Date: Wed, 23 Oct 2024 15:49:41 +0000
Subject: [PATCH 2/4] Remove StoreOp
---
.../Transforms/VectorEmulateNarrowType.cpp | 57 ++++---------------
.../vector-emulate-narrow-type-unaligned.mlir | 19 -------
2 files changed, 10 insertions(+), 66 deletions(-)
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 42a9a2ab12196a..096f30648f49e7 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -160,17 +160,14 @@ struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
// vector<4xi8>
auto origElements = op.getValueToStore().getType().getNumElements();
-
- // if the size of vector we are loading is not byte-aligned, extra handling
- // is needed
- bool isUnalignedEmulation = origElements % scale != 0;
+ if (origElements % scale != 0)
+ return failure();
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
- memref::LinearizedMemRefInfo linearizedInfo;
- std::tie(linearizedInfo, linearizedIndices) =
+ std::tie(std::ignore, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -178,48 +175,14 @@ struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto foldedFrontPaddingSize = getFrontPaddingSize(
- rewriter, loc, linearizedInfo, isUnalignedEmulation);
-
- if (!foldedFrontPaddingSize) {
- // unimplemented case for dynamic front padding size
- return failure();
- }
-
- auto numElements =
- (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
- auto newVectorType = VectorType::get(numElements, newElementType);
-
- if (isUnalignedEmulation) {
- auto insertedVectorType =
- VectorType::get(numElements * scale, oldElementType);
-
- auto linearizedIndicesValue =
- getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices);
- auto passThru =
- rewriter.create<vector::LoadOp>(loc, newVectorType, adaptor.getBase(),
- ValueRange{linearizedIndicesValue});
- auto bitcastedPassThru =
- rewriter.create<vector::BitCastOp>(loc, insertedVectorType, passThru);
-
- // just extract it and use it for the strided slice offset
- auto insertStridedSlice = rewriter.create<vector::InsertStridedSliceOp>(
- loc, insertedVectorType, op.getValueToStore(), bitcastedPassThru,
- rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
- rewriter.getI64ArrayAttr({1}));
- // bit cast the vector to the original type
- auto bitCast = rewriter.create<vector::BitCastOp>(loc, newVectorType,
- insertStridedSlice);
+ auto numElements = origElements / scale;
+ auto bitCast = rewriter.create<vector::BitCastOp>(
+ loc, VectorType::get(numElements, newElementType),
+ op.getValueToStore());
- rewriter.replaceOpWithNewOp<vector::StoreOp>(
- op, bitCast.getResult(), adaptor.getBase(), linearizedIndicesValue);
- } else {
- auto bitCast = rewriter.create<vector::BitCastOp>(loc, newVectorType,
- op.getValueToStore());
- rewriter.replaceOpWithNewOp<vector::StoreOp>(
- op, bitCast.getResult(), adaptor.getBase(),
- getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
- }
+ rewriter.replaceOpWithNewOp<vector::StoreOp>(
+ op, bitCast.getResult(), adaptor.getBase(),
+ getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
return success();
}
};
diff --git a/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
index eebd7c74f44766..329ab2164c9b5c 100644
--- a/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
+++ b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
@@ -19,25 +19,6 @@ func.func @vector_load_i2(%arg1: index, %arg2: index) -> vector<3x3xi2> {
//-----
-func.func @vector_store_i2(%arg0: vector<3xi2>) {
- %0 = memref.alloc() : memref<3x3xi2>
- %c0 = arith.constant 0 : index
- %c2 = arith.constant 2 : index
- vector.store %arg0, %0[%c2, %c0] :memref<3x3xi2>, vector<3xi2>
- return
-}
-
-// CHECK: func @vector_store_i2
-// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<3xi8>
-// CHECK: %[[INDEX:.+]] = arith.constant 1 : index
-// CHECK: %[[LOAD:.+]] = vector.load %[[ALLOC]][%[[INDEX]]] : memref<3xi8>, vector<2xi8>
-// CHECK: %[[BITCAST1:.+]] = vector.bitcast %[[LOAD]] : vector<2xi8> to vector<8xi2>
-// CHECK: %[[INSERT:.+]] = vector.insert_strided_slice %arg0, %[[BITCAST1]] {offsets = [2], strides = [1]} : vector<3xi2> into vector<8xi2>
-// CHECK: %[[BITCAST2:.+]] = vector.bitcast %[[INSERT]] : vector<8xi2> to vector<2xi8>
-// CHECK: vector.store %[[BITCAST2]], %[[ALLOC]][%[[INDEX]]] : memref<3xi8>, vector<2xi8>
-
-//-----
-
func.func @vector_transfer_read_i2() -> vector<3xi2> {
%0 = memref.alloc() : memref<3x3xi2>
%c0i2 = arith.constant 0 : i2
>From b00a45a960c38bb397481022b1a809b366406cdd Mon Sep 17 00:00:00 2001
From: Ubuntu <450283+lialan at users.noreply.github.com>
Date: Thu, 24 Oct 2024 00:01:30 +0000
Subject: [PATCH 3/4] update and refactor
---
.../Transforms/VectorEmulateNarrowType.cpp | 64 ++++++++++---------
1 file changed, 33 insertions(+), 31 deletions(-)
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 096f30648f49e7..1b868ea9a8c705 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -103,7 +103,6 @@ static FailureOr<Operation *> getCompressedMaskOp(OpBuilder &rewriter,
return newMask;
}
-///
static std::optional<int64_t>
getFrontPaddingSize(ConversionPatternRewriter &rewriter, Location loc,
const memref::LinearizedMemRefInfo linearizedInfo,
@@ -120,6 +119,17 @@ getFrontPaddingSize(ConversionPatternRewriter &rewriter, Location loc,
return std::nullopt;
}
+static OpResult extractSubvector(ConversionPatternRewriter &rewriter,
+ Location loc, VectorType extractType,
+ Value vector, int64_t frontOffset,
+ int64_t subvecSize) {
+ return rewriter
+ .create<vector::ExtractStridedSliceOp>(
+ loc, extractType, vector, rewriter.getI64ArrayAttr({frontOffset}),
+ rewriter.getI64ArrayAttr({subvecSize}), rewriter.getI64ArrayAttr({1}))
+ ->getResult(0);
+}
+
namespace {
//===----------------------------------------------------------------------===//
@@ -353,26 +363,24 @@ struct ConvertVectorLoad final : OpConversionPattern<vector::LoadOp> {
}
auto numElements =
- (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
- auto loadVectorType = VectorType::get(numElements, newElementType);
+ llvm::alignTo(*foldedFrontPaddingSize + origElements, scale) / scale;
auto newLoad = rewriter.create<vector::LoadOp>(
- loc, loadVectorType, adaptor.getBase(),
+ loc, VectorType::get(numElements, newElementType), adaptor.getBase(),
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
- auto newBitCastType = VectorType::get(numElements * scale, oldElementType);
- auto bitCast =
- rewriter.create<vector::BitCastOp>(loc, newBitCastType, newLoad);
-
- if (newBitCastType.getNumElements() != origElements) {
- auto extractStridedSlice = rewriter.create<vector::ExtractStridedSliceOp>(
- loc, op.getType(), bitCast,
- rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
- rewriter.getI64ArrayAttr({origElements}),
- rewriter.getI64ArrayAttr({1}));
- rewriter.replaceOp(op, extractStridedSlice.getResult());
- } else {
- rewriter.replaceOp(op, bitCast->getResult(0));
+ OpResult castedResult =
+ rewriter
+ .create<vector::BitCastOp>(
+ loc, VectorType::get(numElements * scale, oldElementType),
+ newLoad)
+ ->getResult(0);
+
+ if (isUnalignedEmulation) {
+ castedResult = extractSubvector(rewriter, loc, op.getType(), castedResult,
+ *foldedFrontPaddingSize, origElements);
}
+
+ rewriter.replaceOp(op, castedResult);
return success();
}
};
@@ -542,28 +550,22 @@ struct ConvertVectorTransferRead final
}
auto numElements =
- (*foldedFrontPaddingSize + origElements + scale - 1) / scale;
- auto newReadType = VectorType::get(numElements, newElementType);
+ llvm::alignTo(*foldedFrontPaddingSize + origElements, scale) / scale;
auto newRead = rewriter.create<vector::TransferReadOp>(
- loc, newReadType, adaptor.getSource(),
+ loc, VectorType::get(numElements, newElementType), adaptor.getSource(),
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices),
newPadding);
- auto bitCastType = VectorType::get(numElements * scale, oldElementType);
- auto bitCast =
- rewriter.create<vector::BitCastOp>(loc, bitCastType, newRead);
+ auto bitCast = rewriter.create<vector::BitCastOp>(
+ loc, VectorType::get(numElements * scale, oldElementType), newRead);
+ auto bitCastResult = bitCast->getResult(0);
if (isUnalignedEmulation) {
- // we only extract a portion of the vector.
- rewriter.replaceOpWithNewOp<vector::ExtractStridedSliceOp>(
- op, op.getType(), bitCast,
- rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
- rewriter.getI64ArrayAttr({origElements}),
- rewriter.getI64ArrayAttr({1}));
- } else {
- rewriter.replaceOp(op, bitCast->getResult(0));
+ bitCastResult = extractSubvector(rewriter, loc, op.getType(), bitCast,
+ *foldedFrontPaddingSize, origElements);
}
+ rewriter.replaceOp(op, bitCastResult);
return success();
}
>From 6cf80dc0d90bcd43a3053a9cbb179f08df188748 Mon Sep 17 00:00:00 2001
From: Ubuntu <450283+lialan at users.noreply.github.com>
Date: Wed, 23 Oct 2024 20:44:47 +0000
Subject: [PATCH 4/4] Implement mask load
---
.../Transforms/VectorEmulateNarrowType.cpp | 120 ++++++++++++++----
.../vector-emulate-narrow-type-unaligned.mlir | 31 +++++
2 files changed, 124 insertions(+), 27 deletions(-)
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 1b868ea9a8c705..e1a6d6ed10c523 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -43,8 +43,9 @@ using namespace mlir;
/// %mask = [1, 1, 0]
static FailureOr<Operation *> getCompressedMaskOp(OpBuilder &rewriter,
Location loc, Value mask,
- int origElements, int scale) {
- auto numElements = (origElements + scale - 1) / scale;
+ int origElements, int scale,
+ int frontOffset = 0) {
+ auto numElements = (frontOffset + origElements + scale - 1) / scale;
Operation *maskOp = mask.getDefiningOp();
SmallVector<vector::ExtractOp, 2> extractOps;
@@ -68,6 +69,10 @@ static FailureOr<Operation *> getCompressedMaskOp(OpBuilder &rewriter,
shape.back() = numElements;
auto newMaskType = VectorType::get(shape, rewriter.getI1Type());
if (createMaskOp) {
+ if (frontOffset != 0) {
+ assert(false && "unimplemented case for frontOffset != 0");
+ return failure();
+ }
OperandRange maskOperands = createMaskOp.getOperands();
size_t numMaskOperands = maskOperands.size();
AffineExpr s0;
@@ -87,11 +92,27 @@ static FailureOr<Operation *> getCompressedMaskOp(OpBuilder &rewriter,
ArrayRef<int64_t> maskDimSizes = constantMaskOp.getMaskDimSizes();
size_t numMaskOperands = maskDimSizes.size();
int64_t origIndex = maskDimSizes[numMaskOperands - 1];
- int64_t maskIndex = (origIndex + scale - 1) / scale;
+ int64_t startIndex = frontOffset / scale;
+ int64_t maskIndex = llvm::alignTo(frontOffset + origIndex, scale) / scale;
+
+ // TODO: we only want the mask between [startIndex, maskIndex] to be true,
+ // the rest are false.
+ if (frontOffset != 0 && maskDimSizes.size() > 1)
+ return failure();
+
SmallVector<int64_t> newMaskDimSizes(maskDimSizes.drop_back());
newMaskDimSizes.push_back(maskIndex);
- newMask = rewriter.create<vector::ConstantMaskOp>(loc, newMaskType,
- newMaskDimSizes);
+
+ if (frontOffset == 0) {
+ newMask = rewriter.create<vector::ConstantMaskOp>(loc, newMaskType,
+ newMaskDimSizes);
+ } else {
+ SmallVector<bool> newMaskValues;
+ for (int64_t i = 0; i < numElements; ++i)
+ newMaskValues.push_back(i >= startIndex && i < maskIndex);
+ auto denseAttr = DenseElementsAttr::get(newMaskType, newMaskValues);
+ newMask = rewriter.create<arith::ConstantOp>(loc, newMaskType, denseAttr);
+ }
}
while (!extractOps.empty()) {
@@ -229,7 +250,8 @@ struct ConvertVectorMaskedStore final
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndicesOfr;
- std::tie(std::ignore, linearizedIndicesOfr) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndicesOfr) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -242,19 +264,19 @@ struct ConvertVectorMaskedStore final
// Load the whole data and use arith.select to handle the corner cases.
// E.g., given these input values:
//
- // %mask = [1, 1, 1, 0, 0, 0]
- // %0[%c0, %c0] contains [0x1, 0x2, 0x3, 0x4, 0x5, 0x6]
- // %value_to_store = [0x7, 0x8, 0x9, 0xA, 0xB, 0xC]
+ // %mask = [0, 1, 1, 1, 1, 1, 0, 0]
+ // %0[%c0, %c0] contains [0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8]
+ // %value_to_store = [0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 0x0]
//
// we'll have
//
- // expected output: [0x7, 0x8, 0x9, 0x4, 0x5, 0x6]
+ // expected output: [0x1, 0xA, 0xB, 0xC, 0xD, 0xE, 0x7, 0x8]
//
- // %new_mask = [1, 1, 0]
- // %maskedload = [0x12, 0x34, 0x0]
- // %bitcast = [0x1, 0x2, 0x3, 0x4, 0x0, 0x0]
- // %select_using_original_mask = [0x7, 0x8, 0x9, 0x4, 0x0, 0x0]
- // %packed_data = [0x78, 0x94, 0x00]
+ // %new_mask = [1, 1, 1, 0]
+ // %maskedload = [0x12, 0x34, 0x56, 0x00]
+ // %bitcast = [0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x0, 0x0]
+ // %select_using_shifted_mask = [0x1, 0xA, 0xB, 0xC, 0xD, 0xE, 0x0, 0x0]
+ // %packed_data = [0x1A, 0xBC, 0xDE, 0x00]
//
// Using the new mask to store %packed_data results in expected output.
FailureOr<Operation *> newMask =
@@ -271,8 +293,9 @@ struct ConvertVectorMaskedStore final
loc, newType, adaptor.getBase(), linearizedIndices,
newMask.value()->getResult(0), passThru);
- Value valueToStore = rewriter.create<vector::BitCastOp>(
- loc, op.getValueToStore().getType(), newLoad);
+ auto newBitCastType = VectorType::get(numElements * scale, oldElementType);
+ Value valueToStore =
+ rewriter.create<vector::BitCastOp>(loc, newBitCastType, newLoad);
valueToStore = rewriter.create<arith::SelectOp>(
loc, op.getMask(), op.getValueToStore(), valueToStore);
valueToStore =
@@ -454,13 +477,13 @@ struct ConvertVectorMaskedLoad final
// subvector at the proper offset after bit-casting.
auto origType = op.getVectorType();
auto origElements = origType.getNumElements();
- if (origElements % scale != 0)
- return failure();
+ bool isUnalignedEmulation = origElements % scale != 0;
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
@@ -468,15 +491,37 @@ struct ConvertVectorMaskedLoad final
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
+ auto foldedFrontPaddingSize = getFrontPaddingSize(
+ rewriter, loc, linearizedInfo, isUnalignedEmulation);
+ if (!foldedFrontPaddingSize) {
+ // unimplemented case for dynamic front padding size
+ return failure();
+ }
+
FailureOr<Operation *> newMask =
- getCompressedMaskOp(rewriter, loc, op.getMask(), origElements, scale);
+ getCompressedMaskOp(rewriter, loc, op.getMask(), origElements, scale,
+ *foldedFrontPaddingSize);
if (failed(newMask))
return failure();
- auto numElements = (origElements + scale - 1) / scale;
+ auto numElements =
+ llvm::alignTo(*foldedFrontPaddingSize + origElements, scale) / scale;
auto newType = VectorType::get(numElements, newElementType);
+
+ auto newBitcastType = VectorType::get(numElements * scale, oldElementType);
+
+ Value passthru = op.getPassThru();
+ if (isUnalignedEmulation) {
+ // create an empty vector of the new type
+ auto emptyVector = rewriter.create<arith::ConstantOp>(
+ loc, newBitcastType, rewriter.getZeroAttr(newBitcastType));
+ passthru = rewriter.create<vector::InsertStridedSliceOp>(
+ loc, newBitcastType, op.getPassThru(), emptyVector,
+ rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
+ rewriter.getI64ArrayAttr({1}));
+ }
auto newPassThru =
- rewriter.create<vector::BitCastOp>(loc, newType, op.getPassThru());
+ rewriter.create<vector::BitCastOp>(loc, newType, passthru);
// Generating the new masked load.
auto newLoad = rewriter.create<vector::MaskedLoadOp>(
@@ -487,10 +532,31 @@ struct ConvertVectorMaskedLoad final
// Setting the part that originally was not effectively loaded from memory
// to pass through.
auto bitCast =
- rewriter.create<vector::BitCastOp>(loc, op.getType(), newLoad);
- auto select = rewriter.create<arith::SelectOp>(loc, op.getMask(), bitCast,
- op.getPassThru());
- rewriter.replaceOp(op, select->getResult(0));
+ rewriter.create<vector::BitCastOp>(loc, newBitcastType, newLoad);
+
+ auto mask = op.getMask();
+ if (isUnalignedEmulation) {
+ auto newSelectMaskType =
+ VectorType::get(numElements * scale, rewriter.getI1Type());
+ // TODO: can fold if op's mask is constant
+ mask = rewriter.create<vector::InsertStridedSliceOp>(
+ loc, newSelectMaskType, op.getMask(),
+ rewriter.create<arith::ConstantOp>(
+ loc, newSelectMaskType, rewriter.getZeroAttr(newSelectMaskType)),
+ rewriter.getI64ArrayAttr({*foldedFrontPaddingSize}),
+ rewriter.getI64ArrayAttr({1}));
+ }
+
+ auto select =
+ rewriter.create<arith::SelectOp>(loc, mask, bitCast, passthru);
+
+ if (isUnalignedEmulation) {
+ auto extract = extractSubvector(rewriter, loc, op.getType(), select,
+ *foldedFrontPaddingSize, origElements);
+ rewriter.replaceOp(op, extract);
+ } else {
+ rewriter.replaceOp(op, select->getResult(0));
+ }
return success();
}
diff --git a/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
index 329ab2164c9b5c..7ecbad7968225d 100644
--- a/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
+++ b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
@@ -34,3 +34,34 @@ func.func @vector_transfer_read_i2() -> vector<3xi2> {
// CHECK: %[[READ:.+]] = vector.transfer_read %[[ALLOC]][%[[INDEX]]], %0 : memref<3xi8>, vector<2xi8>
// CHECK: %[[BITCAST:.+]] = vector.bitcast %[[READ]] : vector<2xi8> to vector<8xi2>
// CHECK: vector.extract_strided_slice %[[BITCAST]] {offsets = [2], sizes = [3], strides = [1]} : vector<8xi2> to vector<3xi2>
+
+//-----
+
+func.func @vector_cst_maskedload_i2(%passthru: vector<5xi2>) -> vector<3x5xi2> {
+ %0 = memref.alloc() : memref<3x5xi2>
+ %cst = arith.constant dense<0> : vector<3x5xi2>
+ %mask = vector.constant_mask [3] : vector<5xi1>
+ %c0 = arith.constant 0 : index
+ %c2 = arith.constant 2 : index
+ %1 = vector.maskedload %0[%c2, %c0], %mask, %passthru :
+ memref<3x5xi2>, vector<5xi1>, vector<5xi2> into vector<5xi2>
+ %2 = vector.insert %1, %cst [0] : vector<5xi2> into vector<3x5xi2>
+ return %2 : vector<3x5xi2>
+}
+
+// CHECK: func @vector_cst_maskedload_i2
+// CHECK: %[[ORIGINMASK:.+]] = vector.constant_mask [3] : vector<5xi1>
+// CHECK: %[[NEWMASK:.+]] = arith.constant dense<true> : vector<2xi1>
+// CHECK: %[[VESSEL:.+]] = arith.constant dense<0> : vector<8xi2>
+// CHECK: %[[INSERT1:.+]] = vector.insert_strided_slice %arg0, %[[VESSEL]]
+// CHECK-SAME: {offsets = [2], strides = [1]} : vector<5xi2> into vector<8xi2>
+// CHECK: %[[BITCAST1:.+]] = vector.bitcast %[[INSERT1]] : vector<8xi2> to vector<2xi8>
+// CHECK: %[[C2:.+]] = arith.constant 2 : index
+// CHECK: %[[MASKEDLOAD:.+]] = vector.maskedload %alloc[%[[C2]]], %[[NEWMASK:.+]], %[[BITCAST1]]
+// CHECK-SAME: : memref<4xi8>, vector<2xi1>, vector<2xi8> into vector<2xi8>
+// CHECK: %[[BITCAST2:.+]] = vector.bitcast %[[MASKEDLOAD]] : vector<2xi8> to vector<8xi2>
+// CHECK: %[[CST2:.+]] = arith.constant dense<false> : vector<8xi1>
+// CHECK: %[[INSERT2:.+]] = vector.insert_strided_slice %[[ORIGINMASK]], %[[CST2]]
+// CHECK-SAME: {offsets = [2], strides = [1]} : vector<5xi1> into vector<8xi1>
+// CHECK: %[[SELECT:.+]] = arith.select %[[INSERT2]], %[[BITCAST2]], %[[INSERT1]] : vector<8xi1>, vector<8xi2>
+// CHECK: vector.extract_strided_slice %[[SELECT]] {offsets = [2], sizes = [5], strides = [1]} : vector<8xi2> to vector<5xi2>
More information about the Mlir-commits
mailing list