[Mlir-commits] [mlir] [MLIR] Implement emulation of static indexing subbyte type vector stores (PR #115922)
Diego Caballero
llvmlistbot at llvm.org
Sun Jan 19 19:12:21 PST 2025
================
@@ -336,30 +439,181 @@ struct ConvertVectorStore final : OpConversionPattern<vector::StoreOp> {
// vector.store %bitcast, %alloc[%linear_index] : memref<16xi8>,
// vector<4xi8>
- auto origElements = op.getValueToStore().getType().getNumElements();
- if (origElements % scale != 0)
- return failure();
+ auto origElements = valueToStore.getType().getNumElements();
+ bool isAlignedEmulation = origElements % numSrcElemsPerDest == 0;
auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, op.getBase());
OpFoldResult linearizedIndices;
- std::tie(std::ignore, linearizedIndices) =
+ memref::LinearizedMemRefInfo linearizedInfo;
+ std::tie(linearizedInfo, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(
rewriter, loc, srcBits, dstBits,
stridedMetadata.getConstifiedMixedOffset(),
stridedMetadata.getConstifiedMixedSizes(),
stridedMetadata.getConstifiedMixedStrides(),
getAsOpFoldResult(adaptor.getIndices()));
- auto numElements = origElements / scale;
- auto bitCast = rewriter.create<vector::BitCastOp>(
- loc, VectorType::get(numElements, newElementType),
- op.getValueToStore());
+ std::optional<int64_t> foldedNumFrontPadElems =
+ isAlignedEmulation
+ ? 0
+ : getConstantIntValue(linearizedInfo.intraDataOffset);
+
+ if (!foldedNumFrontPadElems) {
+ return rewriter.notifyMatchFailure(
+ op, "subbyte store emulation: dynamic front padding size is "
+ "not yet implemented");
+ }
+
+ auto memrefBase = cast<MemRefValue>(adaptor.getBase());
- rewriter.replaceOpWithNewOp<vector::StoreOp>(
- op, bitCast.getResult(), adaptor.getBase(),
- getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ // Conditions when subbyte emulated store is not needed:
+ // 1. The source vector size (in bits) is a multiple of byte size.
+ // 2. The address of the store is aligned to the emulated width boundary.
+ //
+ // For example, to store a vector<4xi2> to <13xi2> at offset 4, does not
+ // need unaligned emulation because the store address is aligned and the
+ // source is a whole byte.
+ bool emulationRequiresPartialStores =
+ !isAlignedEmulation || *foldedNumFrontPadElems != 0;
+ if (!emulationRequiresPartialStores) {
+ // Basic case: storing full bytes.
+ auto numElements = origElements / numSrcElemsPerDest;
+ auto bitCast = rewriter.create<vector::BitCastOp>(
+ loc, VectorType::get(numElements, newElementType),
+ op.getValueToStore());
+ rewriter.replaceOpWithNewOp<vector::StoreOp>(
+ op, bitCast.getResult(), memrefBase,
+ getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices));
+ return success();
+ }
+
+ // Next, handle the case when sub-byte read-modify-write
+ // sequences are needed to emulate a vector store.
+ // Here is an example:
+ //
+ // Vector to store: vector<7xi2>
+ // Value to store: 11 11 11 11 11 11 11 (all ones)
+ //
+ // Destination: memref<12xi2>
+ // Store offset: 2 (i.e. 4 bits into the 1st emulated byte).
+ //
+ // MLIR: vector.store %val, %dest[%c2] : memref<12xi2>, vector<7xi2>
----------------
dcaballe wrote:
nit: MLIR -> Input or Input MLIR?
https://github.com/llvm/llvm-project/pull/115922
More information about the Mlir-commits
mailing list