[Mlir-commits] [mlir] c3c3ccc - [MLIR] support dynamic indexing of `vector.maskedload` in `VectorEmulateNarrowTypes` (#115070)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Tue Nov 12 09:22:19 PST 2024
Author: lialan
Date: 2024-11-12T09:22:16-08:00
New Revision: c3c3ccc364578c1897780974f685a44bdeec1584
URL: https://github.com/llvm/llvm-project/commit/c3c3ccc364578c1897780974f685a44bdeec1584
DIFF: https://github.com/llvm/llvm-project/commit/c3c3ccc364578c1897780974f685a44bdeec1584.diff
LOG: [MLIR] support dynamic indexing of `vector.maskedload` in `VectorEmulateNarrowTypes` (#115070)
Based on existing emulating scheme, this patch expands to support
dynamic indexing by dynamically create intermediate new mask, new pass
thru vector and dynamically insert the result into destination vector.
the dynamic parts are constructed by multiple `vector.extract` and
`vector.insert` to rearrange the original mask/passthru vector, as
`vector.insert_strided_slice` and `vector.extract_strided_slice` only
take static offsets and indices.
Note: currently only supporting `vector.maskedload` with masks created
by `vector.constant_mask`. `vector.create_mask` is currently not
working.
---------
Co-authored-by: hasekawa-takumi <167335845+hasekawa-takumi at users.noreply.github.com>
Added:
Modified:
mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 403fd6a534fa85..7578aadee23a6e 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -52,7 +52,9 @@ using namespace mlir;
///
/// %mask = [1, 1, 0, 0, 0, 0]
///
-/// will first be padded with number of `intraDataOffset` zeros:
+/// will first be padded in the front with number of `intraDataOffset` zeros,
+/// and pad zeros in the back to make the number of elements a multiple of
+/// `scale` (just to make it easier to compute). The new mask will be:
/// %mask = [0, 1, 1, 0, 0, 0, 0, 0]
///
/// then it will return the following new compressed mask:
@@ -62,7 +64,8 @@ static FailureOr<Operation *> getCompressedMaskOp(OpBuilder &rewriter,
Location loc, Value mask,
int origElements, int scale,
int intraDataOffset = 0) {
- auto numElements = (intraDataOffset + origElements + scale - 1) / scale;
+ assert(intraDataOffset < scale && "intraDataOffset must be less than scale");
+ auto numElements = llvm::divideCeil(intraDataOffset + origElements, scale);
Operation *maskOp = mask.getDefiningOp();
SmallVector<vector::ExtractOp, 2> extractOps;
@@ -194,6 +197,26 @@ static Value dynamicallyExtractSubVector(OpBuilder &rewriter, Location loc,
return dest;
}
+/// Inserts a 1-D subvector into a 1-D `dest` vector at index `destOffsetVar`.
+static Value dynamicallyInsertSubVector(RewriterBase &rewriter, Location loc,
+ TypedValue<VectorType> source,
+ Value dest, OpFoldResult destOffsetVar,
+ size_t length) {
+ assert(length > 0 && "length must be greater than 0");
+ Value destOffsetVal =
+ getValueOrCreateConstantIndexOp(rewriter, loc, destOffsetVar);
+ for (size_t i = 0; i < length; ++i) {
+ auto insertLoc = i == 0
+ ? destOffsetVal
+ : rewriter.create<arith::AddIOp>(
+ loc, rewriter.getIndexType(), destOffsetVal,
+ rewriter.create<arith::ConstantIndexOp>(loc, i));
+ auto extractOp = rewriter.create<vector::ExtractOp>(loc, source, i);
+ dest = rewriter.create<vector::InsertOp>(loc, extractOp, dest, insertLoc);
+ }
+ return dest;
+}
+
/// Returns the op sequence for an emulated sub-byte data type vector load.
/// specifically, use `emulatedElemType` for loading a vector of `origElemType`.
/// The load location is given by `base` and `linearizedIndices`, and the
@@ -466,18 +489,16 @@ struct ConvertVectorLoad final : OpConversionPattern<vector::LoadOp> {
emulatedVectorLoad(rewriter, loc, adaptor.getBase(), linearizedIndices,
numElements, oldElementType, newElementType);
- if (foldedIntraVectorOffset) {
- if (isUnalignedEmulation) {
- result =
- staticallyExtractSubvector(rewriter, loc, op.getType(), result,
- *foldedIntraVectorOffset, origElements);
- }
- } else {
+ if (!foldedIntraVectorOffset) {
auto resultVector = rewriter.create<arith::ConstantOp>(
loc, op.getType(), rewriter.getZeroAttr(op.getType()));
result = dynamicallyExtractSubVector(
rewriter, loc, dyn_cast<TypedValue<VectorType>>(result), resultVector,
linearizedInfo.intraDataOffset, origElements);
+ } else if (isUnalignedEmulation) {
+ result =
+ staticallyExtractSubvector(rewriter, loc, op.getType(), result,
+ *foldedIntraVectorOffset, origElements);
}
rewriter.replaceOp(op, result);
return success();
@@ -572,27 +593,26 @@ struct ConvertVectorMaskedLoad final
? getConstantIntValue(linearizedInfo.intraDataOffset)
: 0;
- if (!foldedIntraVectorOffset) {
- // unimplemented case for dynamic intra vector offset
- return failure();
- }
-
- FailureOr<Operation *> newMask =
- getCompressedMaskOp(rewriter, loc, op.getMask(), origElements, scale,
- *foldedIntraVectorOffset);
+ int64_t maxIntraDataOffset = foldedIntraVectorOffset.value_or(scale - 1);
+ FailureOr<Operation *> newMask = getCompressedMaskOp(
+ rewriter, loc, op.getMask(), origElements, scale, maxIntraDataOffset);
if (failed(newMask))
return failure();
+ Value passthru = op.getPassThru();
+
auto numElements =
- llvm::divideCeil(*foldedIntraVectorOffset + origElements, scale);
+ llvm::divideCeil(maxIntraDataOffset + origElements, scale);
auto loadType = VectorType::get(numElements, newElementType);
auto newBitcastType = VectorType::get(numElements * scale, oldElementType);
- Value passthru = op.getPassThru();
- if (isUnalignedEmulation) {
- // create an empty vector of the new type
- auto emptyVector = rewriter.create<arith::ConstantOp>(
- loc, newBitcastType, rewriter.getZeroAttr(newBitcastType));
+ auto emptyVector = rewriter.create<arith::ConstantOp>(
+ loc, newBitcastType, rewriter.getZeroAttr(newBitcastType));
+ if (!foldedIntraVectorOffset) {
+ passthru = dynamicallyInsertSubVector(
+ rewriter, loc, dyn_cast<TypedValue<VectorType>>(passthru),
+ emptyVector, linearizedInfo.intraDataOffset, origElements);
+ } else if (isUnalignedEmulation) {
passthru = staticallyInsertSubvector(rewriter, loc, passthru, emptyVector,
*foldedIntraVectorOffset);
}
@@ -611,20 +631,27 @@ struct ConvertVectorMaskedLoad final
rewriter.create<vector::BitCastOp>(loc, newBitcastType, newLoad);
Value mask = op.getMask();
- if (isUnalignedEmulation) {
- auto newSelectMaskType =
- VectorType::get(numElements * scale, rewriter.getI1Type());
- // TODO: can fold if op's mask is constant
- auto emptyVector = rewriter.create<arith::ConstantOp>(
- loc, newSelectMaskType, rewriter.getZeroAttr(newSelectMaskType));
- mask = staticallyInsertSubvector(rewriter, loc, op.getMask(), emptyVector,
+ auto newSelectMaskType =
+ VectorType::get(numElements * scale, rewriter.getI1Type());
+ // TODO: try to fold if op's mask is constant
+ auto emptyMask = rewriter.create<arith::ConstantOp>(
+ loc, newSelectMaskType, rewriter.getZeroAttr(newSelectMaskType));
+ if (!foldedIntraVectorOffset) {
+ mask = dynamicallyInsertSubVector(
+ rewriter, loc, dyn_cast<TypedValue<VectorType>>(mask), emptyMask,
+ linearizedInfo.intraDataOffset, origElements);
+ } else if (isUnalignedEmulation) {
+ mask = staticallyInsertSubvector(rewriter, loc, op.getMask(), emptyMask,
*foldedIntraVectorOffset);
}
Value result =
rewriter.create<arith::SelectOp>(loc, mask, bitCast, passthru);
-
- if (isUnalignedEmulation) {
+ if (!foldedIntraVectorOffset) {
+ result = dynamicallyExtractSubVector(
+ rewriter, loc, dyn_cast<TypedValue<VectorType>>(result),
+ op.getPassThru(), linearizedInfo.intraDataOffset, origElements);
+ } else if (isUnalignedEmulation) {
result =
staticallyExtractSubvector(rewriter, loc, op.getType(), result,
*foldedIntraVectorOffset, origElements);
@@ -685,10 +712,9 @@ struct ConvertVectorTransferRead final
? getConstantIntValue(linearizedInfo.intraDataOffset)
: 0;
- auto maxIntraVectorOffset =
- foldedIntraVectorOffset ? *foldedIntraVectorOffset : scale - 1;
+ int64_t maxIntraDataOffset = foldedIntraVectorOffset.value_or(scale - 1);
auto numElements =
- llvm::divideCeil(maxIntraVectorOffset + origElements, scale);
+ llvm::divideCeil(maxIntraDataOffset + origElements, scale);
auto newRead = rewriter.create<vector::TransferReadOp>(
loc, VectorType::get(numElements, newElementType), adaptor.getSource(),
@@ -699,18 +725,16 @@ struct ConvertVectorTransferRead final
loc, VectorType::get(numElements * scale, oldElementType), newRead);
Value result = bitCast->getResult(0);
- if (foldedIntraVectorOffset) {
- if (isUnalignedEmulation) {
- result =
- staticallyExtractSubvector(rewriter, loc, op.getType(), result,
- *foldedIntraVectorOffset, origElements);
- }
- } else {
+ if (!foldedIntraVectorOffset) {
auto zeros = rewriter.create<arith::ConstantOp>(
loc, op.getType(), rewriter.getZeroAttr(op.getType()));
result = dynamicallyExtractSubVector(rewriter, loc, bitCast, zeros,
linearizedInfo.intraDataOffset,
origElements);
+ } else if (isUnalignedEmulation) {
+ result =
+ staticallyExtractSubvector(rewriter, loc, op.getType(), result,
+ *foldedIntraVectorOffset, origElements);
}
rewriter.replaceOp(op, result);
diff --git a/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
index 0cecaddc5733e2..7ed75ff7f1579c 100644
--- a/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
+++ b/mlir/test/Dialect/Vector/vector-emulate-narrow-type-unaligned.mlir
@@ -183,3 +183,69 @@ func.func @vector_transfer_read_i2_dynamic_indexing_mixed(%idx1: index) -> vecto
// CHECK: %[[C2:.+]] = arith.constant 2 : index
// CHECK: %[[ADDI2:.+]] = arith.addi %[[LOADADDR2]], %[[C2]] : index
// CHECK: %[[EXTRACT3:.+]] = vector.extract %[[BITCAST]][%[[ADDI2]]] : i2 from vector<8xi2>
+// -----
+
+func.func @vector_maskedload_i2_dynamic_indexing_mixed(%passthru: vector<3xi2>, %idx: index) -> vector<3xi2> {
+ %0 = memref.alloc() : memref<3x3xi2>
+ %cst = arith.constant dense<0> : vector<3x3xi2>
+ %c2 = arith.constant 2 : index
+ %mask = vector.constant_mask [3] : vector<3xi1>
+ %1 = vector.maskedload %0[%idx, %c2], %mask, %passthru :
+ memref<3x3xi2>, vector<3xi1>, vector<3xi2> into vector<3xi2>
+ return %1 : vector<3xi2>
+}
+
+// CHECK: #[[MAP:.+]] = affine_map<()[s0] -> ((s0 * 3 + 2) floordiv 4)>
+// CHECK: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 * 3 - ((s0 * 3 + 2) floordiv 4) * 4 + 2)>
+// CHECK: func @vector_maskedload_i2_dynamic_indexing_mixed(
+// CHECK-SAME: %[[PTH:.+]]: vector<3xi2>, %[[IDX:.+]]: index) -> vector<3xi2>
+// CHECK: %[[ALLOC:.+]] = memref.alloc() : memref<3xi8>
+// CHECK: %[[MASK:.+]] = vector.constant_mask [3] : vector<3xi1>
+// CHECK: %[[LINEAR1:.+]] = affine.apply #map()[%[[IDX]]]
+// CHECK: %[[LINEAR2:.+]] = affine.apply #map1()[%[[IDX]]]
+// CHECK: %[[ONE:.+]] = arith.constant dense<true> : vector<2xi1>
+// CHECK: %[[ZERO:.+]] = arith.constant dense<0> : vector<8xi2>
+
+// Extract passthru vector, and insert into zero vector, this is for constructing a new passthru
+// CHECK: %[[EX1:.+]] = vector.extract %[[PTH]][0] : i2 from vector<3xi2>
+// CHECK: %[[IN1:.+]] = vector.insert %[[EX1]], %[[ZERO]] [%[[LINEAR2]]] : i2 into vector<8xi2>
+// CHECK: %[[C1:.+]] = arith.constant 1 : index
+// CHECK: %[[INCIDX:.+]] = arith.addi %[[LINEAR2]], %[[C1]] : index
+// CHECK: %[[EX2:.+]] = vector.extract %[[PTH]][1] : i2 from vector<3xi2>
+// CHECK: %[[IN2:.+]] = vector.insert %[[EX2]], %[[IN1]] [%[[INCIDX]]] : i2 into vector<8xi2>
+// CHECK: %[[C2:.+]] = arith.constant 2 : index
+// CHECK: %[[INCIDX2:.+]] = arith.addi %[[LINEAR2]], %[[C2]] : index
+// CHECK: %[[EX3:.+]] = vector.extract %[[PTH]][2] : i2 from vector<3xi2>
+// CHECK: %[[NEW_PASSTHRU:.+]] = vector.insert %[[EX3]], %[[IN2]] [%[[INCIDX2]]] : i2 into vector<8xi2>
+
+// Bitcast the new passthru vector to emulated i8 vector
+// CHECK: %[[BCAST_PASSTHRU:.+]] = vector.bitcast %[[NEW_PASSTHRU]] : vector<8xi2> to vector<2xi8>
+
+// Use the emulated i8 vector for masked load from the source memory
+// CHECK: %[[SOURCE:.+]] = vector.maskedload %[[ALLOC]][%[[LINEAR1]]], %[[ONE]], %[[BCAST_PASSTHRU]]
+// CHECK-SAME: memref<3xi8>, vector<2xi1>, vector<2xi8> into vector<2xi8>
+
+// Bitcast back to i2 vector
+// CHECK: %[[BCAST_MASKLOAD:.+]] = vector.bitcast %[[SOURCE]] : vector<2xi8> to vector<8xi2>
+
+// CHECK: %[[CST1:.+]] = arith.constant dense<false> : vector<8xi1>
+
+// Create a mask vector
+// Note that if indices are known then we can fold the part generating mask.
+// CHECK: %[[EX4:.+]] = vector.extract %[[MASK]][0] : i1 from vector<3xi1>
+// CHECK: %[[IN4:.+]] = vector.insert %[[EX4]], %[[CST1]] [%[[LINEAR2]]] : i1 into vector<8xi1>
+// CHECK: %[[EX5:.+]] = vector.extract %[[MASK]][1] : i1 from vector<3xi1>
+// CHECK: %[[IN5:.+]] = vector.insert %[[EX5]], %[[IN4]] [%[[INCIDX]]] : i1 into vector<8xi1>
+// CHECK: %[[EX6:.+]] = vector.extract %[[MASK]][2] : i1 from vector<3xi1>
+// CHECK: %[[NEW_MASK:.+]] = vector.insert %[[EX6]], %[[IN5]] [%[[INCIDX2]]] : i1 into vector<8xi1>
+
+// Select the effective part from the source and passthru vectors
+// CHECK: %[[SELECT:.+]] = arith.select %[[NEW_MASK]], %[[BCAST_MASKLOAD]], %[[NEW_PASSTHRU]] : vector<8xi1>, vector<8xi2>
+
+// Finally, insert the selected parts into actual passthru vector.
+// CHECK: %[[EX7:.+]] = vector.extract %[[SELECT]][%[[LINEAR2]]] : i2 from vector<8xi2>
+// CHECK: %[[IN7:.+]] = vector.insert %[[EX7]], %[[PTH]] [0] : i2 into vector<3xi2>
+// CHECK: %[[EX8:.+]] = vector.extract %[[SELECT]][%[[INCIDX]]] : i2 from vector<8xi2>
+// CHECK: %[[IN8:.+]] = vector.insert %[[EX8]], %[[IN7]] [1] : i2 into vector<3xi2>
+// CHECK: %[[EX9:.+]] = vector.extract %[[SELECT]][%[[INCIDX2]]] : i2 from vector<8xi2>
+// CHECK: %[[IN9:.+]] = vector.insert %[[EX9]], %[[IN8]] [2] : i2 into vector<3xi2>
More information about the Mlir-commits
mailing list