[llvm] [VPlan] Split VPWidenMemoryInstructionRecipe (NFCI). (PR #87411)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Apr 14 08:12:00 PDT 2024
================
@@ -9409,92 +9403,29 @@ static Instruction *lowerLoadUsingVectorIntrinsics(IRBuilderBase &Builder,
return Call;
}
-void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
- VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
-
+void VPWidenLoadRecipe::execute(VPTransformState &State) {
// Attempt to issue a wide load.
- LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
- StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
-
- assert((LI || SI) && "Invalid Load/Store instruction");
- assert((!SI || StoredValue) && "No stored value provided for widened store");
- assert((!LI || !StoredValue) && "Stored value provided for widened load");
+ auto *LI = cast<LoadInst>(&Ingredient);
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
-
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);
- bool CreateGatherScatter = !isConsecutive();
+ bool CreateGather = !isConsecutive();
auto &Builder = State.Builder;
- InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
- bool isMaskRequired = getMask();
- if (isMaskRequired) {
- // Mask reversal is only needed for non-all-one (null) masks, as reverse of
- // a null all-one mask is a null mask.
- for (unsigned Part = 0; Part < State.UF; ++Part) {
- Value *Mask = State.get(getMask(), Part);
- if (isReverse())
- Mask = Builder.CreateVectorReverse(Mask, "reverse");
- BlockInMaskParts[Part] = Mask;
- }
- }
-
- // Handle Stores:
- if (SI) {
- State.setDebugLocFrom(getDebugLoc());
-
- for (unsigned Part = 0; Part < State.UF; ++Part) {
- Instruction *NewSI = nullptr;
- Value *StoredVal = State.get(StoredValue, Part);
- // TODO: split this into several classes for better design.
- if (State.EVL) {
- assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
- "explicit vector length.");
- assert(cast<VPInstruction>(State.EVL)->getOpcode() ==
- VPInstruction::ExplicitVectorLength &&
- "EVL must be VPInstruction::ExplicitVectorLength.");
- Value *EVL = State.get(State.EVL, VPIteration(0, 0));
- // If EVL is not nullptr, then EVL must be a valid value set during plan
- // creation, possibly default value = whole vector register length. EVL
- // is created only if TTI prefers predicated vectorization, thus if EVL
- // is not nullptr it also implies preference for predicated
- // vectorization.
- // FIXME: Support reverse store after vp_reverse is added.
- Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
- NewSI = lowerStoreUsingVectorIntrinsics(
- Builder, State.get(getAddr(), Part, !CreateGatherScatter),
- StoredVal, CreateGatherScatter, MaskPart, EVL, Alignment);
- } else if (CreateGatherScatter) {
- Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
- Value *VectorGep = State.get(getAddr(), Part);
- NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
- MaskPart);
- } else {
- if (isReverse()) {
- // If we store to reverse consecutive memory locations, then we need
- // to reverse the order of elements in the stored value.
- StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
- // We don't want to update the value in the map as it might be used in
- // another expression. So don't call resetVectorValue(StoredVal).
- }
- auto *VecPtr = State.get(getAddr(), Part, /*IsScalar*/ true);
- if (isMaskRequired)
- NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
- BlockInMaskParts[Part]);
- else
- NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
- }
- State.addMetadata(NewSI, SI);
- }
- return;
- }
-
// Handle loads.
----------------
ayalz wrote:
nit: remove - redundant?
https://github.com/llvm/llvm-project/pull/87411
More information about the llvm-commits
mailing list