[Mlir-commits] [mlir] [MLIR] Implement emulation of static indexing subbyte type vector stores (PR #115922)
Andrzej WarzyĆski
llvmlistbot at llvm.org
Tue Jan 14 09:16:09 PST 2025
================
@@ -336,30 +442,143 @@ struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
// vector.store %bitcast, %alloc[%linear_index] : memref<16xi8>,
// vector<4xi8>
- auto origElements = op.getValueToStore().getType().getNumElements();
- if (origElements % scale != 0)
- return failure();
+ auto origElements = valueToStore.getType().getNumElements();
+ bool isUnalignedEmulation = origElements % numSrcElemsPerDest != 0;
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
stridedMetadata.getConstifiedMixedSizes(),
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto numElements = origElements / scale;
- auto bitCast = rewriter.create<vector::BitCastOp>(
- loc, VectorType::get(numElements, newElementType),
- op.getValueToStore());
+ std::optional<int64_t> foldedNumFrontPadElems =
+ isUnalignedEmulation
+ ? getConstantIntValue(linearizedInfo.intraDataOffset)
+ : 0;
+
+ if (!foldedNumFrontPadElems) {
+ return failure("subbyte store emulation: dynamic front padding size is "
+ "not yet implemented");
+ }
+
+ auto memrefBase = cast<MemRefValue>(adaptor.getBase());
+
+ // Shortcut: conditions when subbyte emulated store at the front is not
+ // needed:
+ // 1. The source vector size is multiple of byte size
+ // 2. The address of the store is aligned to the emulated width boundary
+ //
+ // For example, to store a vector<4xi2> to <13xi2> at offset 4, does not
+ // need unaligned emulation because the store address is aligned and the
+ // source is a whole byte.
+ if (!isUnalignedEmulation && *foldedNumFrontPadElems == 0) {
+ auto numElements = origElements / numSrcElemsPerDest;
+ auto bitCast = rewriter.create<vector::BitCastOp>(
+ loc, VectorType::get(numElements, newElementType),
+ op.getValueToStore());
+ rewriter.replaceOpWithNewOp<vector::StoreOp>(
+ op, bitCast.getResult(), memrefBase,
+ getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ return success();
+ }
+
+ // The index into the target memref we are storing to
+ Value currentDestIndex =
+ getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices);
+ auto subWidthStoreMaskType =
+ VectorType::get({numSrcElemsPerDest}, rewriter.getI1Type());
+ // The index into the source vector we are currently processing
+ auto currentSourceIndex = 0;
+
+ // 1. Partial width store for the first byte, when the store address is not
----------------
banach-space wrote:
Let's do something this for consistency (making a suggestion for _headings_ for 1., 2. and 3. below):
```
1. Partial width store for the leading output byte.
(...)
2. Full width store for the inner output bytes.
(...)
3. Partial width store for the trailing output byte.
```
https://github.com/llvm/llvm-project/pull/115922
More information about the Mlir-commits
mailing list