[llvm] e854c38 - [VPlan] Manage noalias/alias_scope metadata in VPlan. (#136450)
via llvm-commits
llvm-commits at lists.llvm.org
Fri May 9 03:19:15 PDT 2025
Author: Florian Hahn
Date: 2025-05-09T11:19:12+01:00
New Revision: e854c381c6344b2ff8d1b58aa97008b4ecd9a9c5
URL: https://github.com/llvm/llvm-project/commit/e854c381c6344b2ff8d1b58aa97008b4ecd9a9c5
DIFF: https://github.com/llvm/llvm-project/commit/e854c381c6344b2ff8d1b58aa97008b4ecd9a9c5.diff
LOG: [VPlan] Manage noalias/alias_scope metadata in VPlan. (#136450)
Use VPIRMetadata added in
https://github.com/llvm/llvm-project/pull/135272
to also manage no-alias metadata added by versioning.
Note that this means we have to build the no-alias metadata up-front
once. If it is not used, it will be discarded automatically.
This also fixes a case where incorrect metadata was added to wide
loads/stores that got converted from an interleave group.
Compile-time impact is neutral:
https://llvm-compile-time-tracker.com/compare.php?from=38bf1af41c5425a552a53feb13c71d82873f1c18&to=2fd7844cfdf5ec0f1c2ce0b9b3ae0763245b6922&stat=instructions:u
Added:
Modified:
llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
llvm/lib/Transforms/Vectorize/VPlan.cpp
llvm/lib/Transforms/Vectorize/VPlan.h
llvm/lib/Transforms/Vectorize/VPlanHelpers.h
llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-metadata.ll
llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index 1b06c8b6ee3bd..98140f6fddc01 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -36,6 +36,7 @@ class LoopVectorizationLegality;
class LoopVectorizationCostModel;
class PredicatedScalarEvolution;
class LoopVectorizeHints;
+class LoopVersioning;
class OptimizationRemarkEmitter;
class TargetTransformInfo;
class TargetLibraryInfo;
@@ -524,7 +525,7 @@ class LoopVectorizationPlanner {
/// returned VPlan is valid for. If no VPlan can be built for the input range,
/// set the largest included VF to the maximum VF for which no plan could be
/// built.
- VPlanPtr tryToBuildVPlanWithVPRecipes(VFRange &Range);
+ VPlanPtr tryToBuildVPlanWithVPRecipes(VFRange &Range, LoopVersioning *LVer);
/// Build VPlans for power-of-2 VF's between \p MinVF and \p MaxVF inclusive,
/// according to the information gathered by Legal when it checked if it is
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 9208fc45a0188..b06bcbee9fdca 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7838,24 +7838,6 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
if (VectorizingEpilogue)
VPlanTransforms::removeDeadRecipes(BestVPlan);
- // Only use noalias metadata when using memory checks guaranteeing no overlap
- // across all iterations.
- const LoopAccessInfo *LAI = Legal->getLAI();
- std::unique_ptr<LoopVersioning> LVer = nullptr;
- if (LAI && !LAI->getRuntimePointerChecking()->getChecks().empty() &&
- !LAI->getRuntimePointerChecking()->getDiffChecks()) {
-
- // We currently don't use LoopVersioning for the actual loop cloning but we
- // still use it to add the noalias metadata.
- // TODO: Find a better way to re-use LoopVersioning functionality to add
- // metadata.
- LVer = std::make_unique<LoopVersioning>(
- *LAI, LAI->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, DT,
- PSE.getSE());
- State.LVer = &*LVer;
- State.LVer->prepareNoAliasMetadata();
- }
-
ILV.printDebugTracesAtStart();
//===------------------------------------------------===//
@@ -8468,11 +8450,12 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
}
if (LoadInst *Load = dyn_cast<LoadInst>(I))
return new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
- I->getDebugLoc());
+ VPIRMetadata(*Load, LVer), I->getDebugLoc());
StoreInst *Store = cast<StoreInst>(I);
return new VPWidenStoreRecipe(*Store, Ptr, Operands[0], Mask, Consecutive,
- Reverse, I->getDebugLoc());
+ Reverse, VPIRMetadata(*Store, LVer),
+ I->getDebugLoc());
}
/// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
@@ -8845,7 +8828,8 @@ VPRecipeBuilder::handleReplication(Instruction *I, ArrayRef<VPValue *> Operands,
assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
(Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
"Should not predicate a uniform recipe");
- auto *Recipe = new VPReplicateRecipe(I, Operands, IsUniform, BlockInMask);
+ auto *Recipe = new VPReplicateRecipe(I, Operands, IsUniform, BlockInMask,
+ VPIRMetadata(*I, LVer));
return Recipe;
}
@@ -9092,10 +9076,20 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
ElementCount MaxVF) {
assert(OrigLoop->isInnermost() && "Inner loop expected.");
+ const LoopAccessInfo *LAI = Legal->getLAI();
+ LoopVersioning LVer(*LAI, LAI->getRuntimePointerChecking()->getChecks(),
+ OrigLoop, LI, DT, PSE.getSE());
+ if (!LAI->getRuntimePointerChecking()->getChecks().empty() &&
+ !LAI->getRuntimePointerChecking()->getDiffChecks()) {
+ // Only use noalias metadata when using memory checks guaranteeing no
+ // overlap across all iterations.
+ LVer.prepareNoAliasMetadata();
+ }
+
auto MaxVFTimes2 = MaxVF * 2;
for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
VFRange SubRange = {VF, MaxVFTimes2};
- if (auto Plan = tryToBuildVPlanWithVPRecipes(SubRange)) {
+ if (auto Plan = tryToBuildVPlanWithVPRecipes(SubRange, &LVer)) {
bool HasScalarVF = Plan->hasScalarVFOnly();
// Now optimize the initial VPlan.
if (!HasScalarVF)
@@ -9357,7 +9351,8 @@ static void addExitUsersForFirstOrderRecurrences(
}
VPlanPtr
-LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
+LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range,
+ LoopVersioning *LVer) {
using namespace llvm::VPlanPatternMatch;
SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
@@ -9413,7 +9408,7 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
}
VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
- Builder);
+ Builder, LVer);
// ---------------------------------------------------------------------------
// Pre-construction: record ingredients whose recipes we'll need to further
@@ -9520,7 +9515,8 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
// Only create recipe for the final invariant store of the reduction.
if (Legal->isInvariantStoreOfReduction(SI)) {
auto *Recipe =
- new VPReplicateRecipe(SI, R.operands(), true /* IsUniform */);
+ new VPReplicateRecipe(SI, R.operands(), true /* IsUniform */,
+ nullptr /*Mask*/, VPIRMetadata(*SI, LVer));
Recipe->insertBefore(*MiddleVPBB, MBIP);
}
R.eraseFromParent();
@@ -9702,7 +9698,7 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
// Collect mapping of IR header phis to header phi recipes, to be used in
// addScalarResumePhis.
VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
- Builder);
+ Builder, nullptr /*LVer*/);
for (auto &R : Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
if (isa<VPCanonicalIVPHIRecipe>(&R))
continue;
diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
index fd0064a34c4c9..caa18e263676b 100644
--- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
+++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
@@ -90,6 +90,10 @@ class VPRecipeBuilder {
/// A mapping of partial reduction exit instructions to their scaling factor.
DenseMap<const Instruction *, unsigned> ScaledReductionMap;
+ /// Loop versioning instance for getting noalias metadata guaranteed by
+ /// runtime checks.
+ LoopVersioning *LVer;
+
/// Check if \p I can be widened at the start of \p Range and possibly
/// decrease the range such that the returned value holds for the entire \p
/// Range. The function should not be called for memory instructions or calls.
@@ -155,9 +159,10 @@ class VPRecipeBuilder {
const TargetTransformInfo *TTI,
LoopVectorizationLegality *Legal,
LoopVectorizationCostModel &CM,
- PredicatedScalarEvolution &PSE, VPBuilder &Builder)
+ PredicatedScalarEvolution &PSE, VPBuilder &Builder,
+ LoopVersioning *LVer)
: Plan(Plan), OrigLoop(OrigLoop), TLI(TLI), TTI(TTI), Legal(Legal),
- CM(CM), PSE(PSE), Builder(Builder) {}
+ CM(CM), PSE(PSE), Builder(Builder), LVer(LVer) {}
std::optional<unsigned> getScalingForReduction(const Instruction *ExitInst) {
auto It = ScaledReductionMap.find(ExitInst);
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 90331a2ce2ce3..167aff737d3fd 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -220,8 +220,8 @@ VPTransformState::VPTransformState(const TargetTransformInfo *TTI,
IRBuilderBase &Builder, VPlan *Plan,
Loop *CurrentParentLoop, Type *CanonicalIVTy)
: TTI(TTI), VF(VF), CFG(DT), LI(LI), AC(AC), Builder(Builder), Plan(Plan),
- CurrentParentLoop(CurrentParentLoop), LVer(nullptr),
- TypeAnalysis(CanonicalIVTy), VPDT(*Plan) {}
+ CurrentParentLoop(CurrentParentLoop), TypeAnalysis(CanonicalIVTy),
+ VPDT(*Plan) {}
Value *VPTransformState::get(const VPValue *Def, const VPLane &Lane) {
if (Def->isLiveIn())
@@ -350,14 +350,6 @@ Value *VPTransformState::get(const VPValue *Def, bool NeedsScalar) {
return VectorValue;
}
-void VPTransformState::addNewMetadata(Instruction *To,
- const Instruction *Orig) {
- // If the loop was versioned with memchecks, add the corresponding no-alias
- // metadata.
- if (LVer && isa<LoadInst, StoreInst>(Orig))
- LVer->annotateInstWithNoAlias(To, Orig);
-}
-
void VPTransformState::setDebugLocFrom(DebugLoc DL) {
const DILocation *DIL = DL;
// When a FSDiscriminator is enabled, we don't need to add the multiply
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index e85d6ea9f5966..136ee36636565 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -65,6 +65,7 @@ class VPReplicateRecipe;
class VPlanSlp;
class Value;
class LoopVectorizationCostModel;
+class LoopVersioning;
struct VPCostContext;
@@ -1236,11 +1237,20 @@ struct VPIRPhi : public VPIRInstruction {
class VPIRMetadata {
SmallVector<std::pair<unsigned, MDNode *>> Metadata;
-protected:
+public:
VPIRMetadata() {}
+
+ /// Adds metatadata that can be preserved from the original instruction
+ /// \p I.
VPIRMetadata(Instruction &I) { getMetadataToPropagate(&I, Metadata); }
-public:
+ /// Adds metatadata that can be preserved from the original instruction
+ /// \p I and noalias metadata guaranteed by runtime checks using \p LVer.
+ VPIRMetadata(Instruction &I, LoopVersioning *LVer);
+
+ /// Copy constructor for cloning.
+ VPIRMetadata(const VPIRMetadata &Other) : Metadata(Other.Metadata) {}
+
/// Add all metadata to \p I.
void applyMetadata(Instruction &I) const;
};
@@ -2511,7 +2521,7 @@ class VPReductionEVLRecipe : public VPReductionRecipe {
/// copies of the original scalar type, one per lane, instead of producing a
/// single copy of widened type for all lanes. If the instruction is known to be
/// uniform only one copy, per lane zero, will be generated.
-class VPReplicateRecipe : public VPRecipeWithIRFlags {
+class VPReplicateRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
/// Indicator if only a single replica per lane is needed.
bool IsUniform;
@@ -2520,9 +2530,10 @@ class VPReplicateRecipe : public VPRecipeWithIRFlags {
public:
VPReplicateRecipe(Instruction *I, ArrayRef<VPValue *> Operands,
- bool IsUniform, VPValue *Mask = nullptr)
+ bool IsUniform, VPValue *Mask = nullptr,
+ VPIRMetadata Metadata = {})
: VPRecipeWithIRFlags(VPDef::VPReplicateSC, Operands, *I),
- IsUniform(IsUniform), IsPredicated(Mask) {
+ VPIRMetadata(Metadata), IsUniform(IsUniform), IsPredicated(Mask) {
if (Mask)
addOperand(Mask);
}
@@ -2532,7 +2543,7 @@ class VPReplicateRecipe : public VPRecipeWithIRFlags {
VPReplicateRecipe *clone() override {
auto *Copy =
new VPReplicateRecipe(getUnderlyingInstr(), operands(), IsUniform,
- isPredicated() ? getMask() : nullptr);
+ isPredicated() ? getMask() : nullptr, *this);
Copy->transferFlags(*this);
return Copy;
}
@@ -2692,8 +2703,9 @@ class VPWidenMemoryRecipe : public VPRecipeBase, public VPIRMetadata {
VPWidenMemoryRecipe(const char unsigned SC, Instruction &I,
std::initializer_list<VPValue *> Operands,
- bool Consecutive, bool Reverse, DebugLoc DL)
- : VPRecipeBase(SC, Operands, DL), VPIRMetadata(I), Ingredient(I),
+ bool Consecutive, bool Reverse,
+ const VPIRMetadata &Metadata, DebugLoc DL)
+ : VPRecipeBase(SC, Operands, DL), VPIRMetadata(Metadata), Ingredient(I),
Consecutive(Consecutive), Reverse(Reverse) {
assert((Consecutive || !Reverse) && "Reverse implies consecutive");
}
@@ -2751,16 +2763,17 @@ class VPWidenMemoryRecipe : public VPRecipeBase, public VPIRMetadata {
/// optional mask.
struct VPWidenLoadRecipe final : public VPWidenMemoryRecipe, public VPValue {
VPWidenLoadRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
- bool Consecutive, bool Reverse, DebugLoc DL)
+ bool Consecutive, bool Reverse,
+ const VPIRMetadata &Metadata, DebugLoc DL)
: VPWidenMemoryRecipe(VPDef::VPWidenLoadSC, Load, {Addr}, Consecutive,
- Reverse, DL),
+ Reverse, Metadata, DL),
VPValue(this, &Load) {
setMask(Mask);
}
VPWidenLoadRecipe *clone() override {
return new VPWidenLoadRecipe(cast<LoadInst>(Ingredient), getAddr(),
- getMask(), Consecutive, Reverse,
+ getMask(), Consecutive, Reverse, *this,
getDebugLoc());
}
@@ -2792,7 +2805,7 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe, public VPValue {
VPWidenLoadEVLRecipe(VPWidenLoadRecipe &L, VPValue &EVL, VPValue *Mask)
: VPWidenMemoryRecipe(VPDef::VPWidenLoadEVLSC, L.getIngredient(),
{L.getAddr(), &EVL}, L.isConsecutive(),
- L.isReverse(), L.getDebugLoc()),
+ L.isReverse(), L, L.getDebugLoc()),
VPValue(this, &getIngredient()) {
setMask(Mask);
}
@@ -2829,16 +2842,17 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe, public VPValue {
/// to store to and an optional mask.
struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
VPWidenStoreRecipe(StoreInst &Store, VPValue *Addr, VPValue *StoredVal,
- VPValue *Mask, bool Consecutive, bool Reverse, DebugLoc DL)
+ VPValue *Mask, bool Consecutive, bool Reverse,
+ const VPIRMetadata &Metadata, DebugLoc DL)
: VPWidenMemoryRecipe(VPDef::VPWidenStoreSC, Store, {Addr, StoredVal},
- Consecutive, Reverse, DL) {
+ Consecutive, Reverse, Metadata, DL) {
setMask(Mask);
}
VPWidenStoreRecipe *clone() override {
return new VPWidenStoreRecipe(cast<StoreInst>(Ingredient), getAddr(),
getStoredValue(), getMask(), Consecutive,
- Reverse, getDebugLoc());
+ Reverse, *this, getDebugLoc());
}
VP_CLASSOF_IMPL(VPDef::VPWidenStoreSC);
@@ -2872,7 +2886,8 @@ struct VPWidenStoreEVLRecipe final : public VPWidenMemoryRecipe {
VPWidenStoreEVLRecipe(VPWidenStoreRecipe &S, VPValue &EVL, VPValue *Mask)
: VPWidenMemoryRecipe(VPDef::VPWidenStoreEVLSC, S.getIngredient(),
{S.getAddr(), S.getStoredValue(), &EVL},
- S.isConsecutive(), S.isReverse(), S.getDebugLoc()) {
+ S.isConsecutive(), S.isReverse(), S,
+ S.getDebugLoc()) {
setMask(Mask);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
index c752931ba8933..1d42c8f5f3737 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
@@ -39,7 +39,6 @@ class VPBasicBlock;
class VPRegionBlock;
class VPlan;
class Value;
-class LoopVersioning;
/// Returns a calculation for the total number of elements for a given \p VF.
/// For fixed width vectors this value is a constant, whereas for scalable
@@ -284,13 +283,6 @@ struct VPTransformState {
Iter->second[CacheIdx] = V;
}
- /// Add additional metadata to \p To that was not present on \p Orig.
- ///
- /// Currently this is used to add the noalias annotations based on the
- /// inserted memchecks. Use this for instructions that are *cloned* into the
- /// vector loop.
- void addNewMetadata(Instruction *To, const Instruction *Orig);
-
/// Set the debug location in the builder using the debug location \p DL.
void setDebugLocFrom(DebugLoc DL);
@@ -339,13 +331,6 @@ struct VPTransformState {
/// The parent loop object for the current scope, or nullptr.
Loop *CurrentParentLoop = nullptr;
- /// LoopVersioning. It's only set up (non-null) if memchecks were
- /// used.
- ///
- /// This is currently only used to add no-alias metadata based on the
- /// memchecks. The actually versioning is performed manually.
- LoopVersioning *LVer = nullptr;
-
/// VPlan-based type analysis.
VPTypeAnalysis TypeAnalysis;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 7239503470720..056a9aa0565d3 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -37,6 +37,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/LoopVersioning.h"
#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include <cassert>
@@ -1229,6 +1230,17 @@ void VPIRPhi::print(raw_ostream &O, const Twine &Indent,
}
#endif
+VPIRMetadata::VPIRMetadata(Instruction &I, LoopVersioning *LVer)
+ : VPIRMetadata(I) {
+ if (!LVer || !isa<LoadInst, StoreInst>(&I))
+ return;
+ const auto &[AliasScopeMD, NoAliasMD] = LVer->getNoAliasMetadataFor(&I);
+ if (AliasScopeMD)
+ Metadata.emplace_back(LLVMContext::MD_alias_scope, AliasScopeMD);
+ if (NoAliasMD)
+ Metadata.emplace_back(LLVMContext::MD_noalias, NoAliasMD);
+}
+
void VPIRMetadata::applyMetadata(Instruction &I) const {
for (const auto &[Kind, Node] : Metadata)
I.setMetadata(Kind, Node);
@@ -2590,6 +2602,7 @@ static void scalarizeInstruction(const Instruction *Instr,
}
RepRecipe->applyFlags(*Cloned);
+ RepRecipe->applyMetadata(*Cloned);
if (auto DL = RepRecipe->getDebugLoc())
State.setDebugLocFrom(DL);
@@ -2603,7 +2616,6 @@ static void scalarizeInstruction(const Instruction *Instr,
InputLane = VPLane::getFirstLane();
Cloned->setOperand(I.index(), State.get(Operand, InputLane));
}
- State.addNewMetadata(Cloned, Instr);
// Place the cloned scalar in the new loop.
State.Builder.Insert(Cloned);
@@ -2841,7 +2853,6 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
}
void VPWidenLoadRecipe::execute(VPTransformState &State) {
-
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);
@@ -2869,9 +2880,6 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
} else {
NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
}
- // Add metadata to the load, but set the result to the reverse shuffle, if
- // needed.
- State.addNewMetadata(cast<Instruction>(NewLI), &Ingredient);
applyMetadata(*cast<Instruction>(NewLI));
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
@@ -2900,7 +2908,6 @@ static Instruction *createReverseEVL(IRBuilderBase &Builder, Value *Operand,
}
void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
-
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);
@@ -2931,7 +2938,6 @@ void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
}
NewLI->addParamAttr(
0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
- State.addNewMetadata(NewLI, &Ingredient);
applyMetadata(*NewLI);
Instruction *Res = NewLI;
if (isReverse())
@@ -3006,7 +3012,6 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {
NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
else
NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
- State.addNewMetadata(NewSI, &Ingredient);
applyMetadata(*NewSI);
}
@@ -3019,7 +3024,6 @@ void VPWidenStoreRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
-
VPValue *StoredValue = getStoredValue();
bool CreateScatter = !isConsecutive();
const Align Alignment = getLoadStoreAlignment(&Ingredient);
@@ -3053,7 +3057,6 @@ void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
}
NewSI->addParamAttr(
1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
- State.addNewMetadata(NewSI, &Ingredient);
applyMetadata(*NewSI);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 79ddb8bf0b09b..b10b47cc1282a 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -80,13 +80,13 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
NewRecipe = new VPWidenLoadRecipe(
*Load, Ingredient.getOperand(0), nullptr /*Mask*/,
- false /*Consecutive*/, false /*Reverse*/,
+ false /*Consecutive*/, false /*Reverse*/, VPIRMetadata(*Load),
Ingredient.getDebugLoc());
} else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
NewRecipe = new VPWidenStoreRecipe(
*Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/,
- Ingredient.getDebugLoc());
+ VPIRMetadata(*Store), Ingredient.getDebugLoc());
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
} else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
@@ -179,11 +179,13 @@ static bool sinkScalarOperands(VPlan &Plan) {
if (ScalarVFOnly)
continue;
VPSingleDefRecipe *Clone;
- if (isa<VPReplicateRecipe>(SinkCandidate)) {
+ if (auto *SinkCandidateRepR =
+ dyn_cast<VPReplicateRecipe>(SinkCandidate)) {
// TODO: Handle converting to uniform recipes as separate transform,
// then cloning should be sufficient here.
Instruction *I = SinkCandidate->getUnderlyingInstr();
- Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
+ Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true,
+ nullptr /*Mask*/, *SinkCandidateRepR);
// TODO: add ".cloned" suffix to name of Clone's VPValue.
} else {
Clone = SinkCandidate->clone();
@@ -345,7 +347,7 @@ static VPRegionBlock *createReplicateRegion(VPReplicateRecipe *PredRecipe,
auto *RecipeWithoutMask = new VPReplicateRecipe(
PredRecipe->getUnderlyingInstr(),
make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
- PredRecipe->isUniform());
+ PredRecipe->isUniform(), nullptr /*Mask*/, *PredRecipe);
auto *Pred =
Plan.createVPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
@@ -2784,7 +2786,7 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
auto *L = new VPWidenLoadRecipe(
*cast<LoadInst>(LoadGroup->getInterleaveGroup()->getInsertPos()),
LoadGroup->getAddr(), LoadGroup->getMask(), /*Consecutive=*/true,
- /*Reverse=*/false, LoadGroup->getDebugLoc());
+ /*Reverse=*/false, {}, LoadGroup->getDebugLoc());
L->insertBefore(LoadGroup);
return L;
}
@@ -2794,7 +2796,8 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
// Narrow wide load to uniform scalar load, as transformed VPlan will only
// process one original iteration.
auto *N = new VPReplicateRecipe(&WideLoad->getIngredient(),
- WideLoad->operands(), /*IsUniform*/ true);
+ WideLoad->operands(), /*IsUniform*/ true,
+ /*Mask*/ nullptr, *WideLoad);
N->insertBefore(WideLoad);
return N;
};
@@ -2815,7 +2818,7 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
auto *S = new VPWidenStoreRecipe(
*cast<StoreInst>(StoreGroup->getInterleaveGroup()->getInsertPos()),
StoreGroup->getAddr(), Res, nullptr, /*Consecutive=*/true,
- /*Reverse=*/false, StoreGroup->getDebugLoc());
+ /*Reverse=*/false, {}, StoreGroup->getDebugLoc());
S->insertBefore(StoreGroup);
StoreGroup->eraseFromParent();
}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
index 9b6a1686eee6e..15c5258b57cc9 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
@@ -188,17 +188,17 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; DEFAULT: vec.epilog.vector.body:
; DEFAULT-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; DEFAULT-NEXT: [[TMP16:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META11:![0-9]+]]
+; DEFAULT-NEXT: [[TMP16:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META5]]
; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <8 x i64> poison, i64 [[TMP16]], i64 0
; DEFAULT-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT7]], <8 x i64> poison, <8 x i32> zeroinitializer
; DEFAULT-NEXT: [[TMP18:%.*]] = trunc <8 x i64> [[BROADCAST_SPLAT8]] to <8 x i8>
; DEFAULT-NEXT: [[TMP14:%.*]] = and <8 x i8> [[TMP18]], [[TMP15]]
; DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX5]]
; DEFAULT-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i32 0
-; DEFAULT-NEXT: store <8 x i8> [[TMP14]], ptr [[TMP27]], align 1, !alias.scope [[META14:![0-9]+]], !noalias [[META11]]
+; DEFAULT-NEXT: store <8 x i8> [[TMP14]], ptr [[TMP27]], align 1, !alias.scope [[META8]], !noalias [[META5]]
; DEFAULT-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX5]], 8
; DEFAULT-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT8]], 1000
-; DEFAULT-NEXT: br i1 [[TMP17]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[TMP17]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; DEFAULT: vec.epilog.middle.block:
; DEFAULT-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; DEFAULT: vec.epilog.scalar.ph:
@@ -214,7 +214,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; DEFAULT-NEXT: store i8 [[TRUNC]], ptr [[GEP]], align 1
; DEFAULT-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
; DEFAULT: exit:
; DEFAULT-NEXT: ret void
;
@@ -313,13 +313,8 @@ attributes #1 = { vscale_range(1,16) "target-features"="+sve" }
; DEFAULT: [[META8]] = !{[[META9:![0-9]+]]}
; DEFAULT: [[META9]] = distinct !{[[META9]], [[META7]]}
; DEFAULT: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; DEFAULT: [[META11]] = !{[[META12:![0-9]+]]}
-; DEFAULT: [[META12]] = distinct !{[[META12]], [[META13:![0-9]+]]}
-; DEFAULT: [[META13]] = distinct !{[[META13]], !"LVerDomain"}
-; DEFAULT: [[META14]] = !{[[META15:![0-9]+]]}
-; DEFAULT: [[META15]] = distinct !{[[META15]], [[META13]]}
-; DEFAULT: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]}
+; DEFAULT: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
+; DEFAULT: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]}
;.
; PRED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; PRED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-metadata.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-metadata.ll
index bdcd56fe5e939..0a83ff6e1fdda 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-metadata.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-metadata.ll
@@ -17,11 +17,11 @@ define void @load_store_interleave_group_with_metadata(ptr noalias %data) {
; VF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; VF2-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1
; VF2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP0]]
-; VF2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8, !tbaa [[TBAA0:![0-9]+]]
-; VF2-NEXT: store <2 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8, !tbaa [[TBAA4:![0-9]+]]
+; VF2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
+; VF2-NEXT: store <2 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8
; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 1
; VF2-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
-; VF2-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; VF2-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VF2: [[MIDDLE_BLOCK]]:
; VF2-NEXT: br i1 true, [[EXIT:label %.*]], label %[[SCALAR_PH]]
; VF2: [[SCALAR_PH]]:
@@ -55,15 +55,9 @@ exit:
!5 = !{ i64 0, i64 2 }
;.
-; VF2: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0, i64 0}
-; VF2: [[META1]] = !{!"A", [[META2:![0-9]+]]}
-; VF2: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]], i64 0}
-; VF2: [[META3]] = !{!"Simple C/C++ TBAA"}
-; VF2: [[TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0, i64 0}
-; VF2: [[META5]] = !{!"B", [[META2]]}
-; VF2: [[LOOP6]] = distinct !{[[LOOP6]], [[META7:![0-9]+]], [[META8:![0-9]+]]}
-; VF2: [[META7]] = !{!"llvm.loop.isvectorized", i32 1}
-; VF2: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"}
+; VF2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; VF2: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; VF2: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
;.
; VF4: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
; VF4: [[META1]] = !{!"omnipotent char", [[META2:![0-9]+]], i64 0}
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
index cb7545171744e..eec7b4480b75d 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
@@ -1084,7 +1084,7 @@ TEST_F(VPRecipeTest, CastVPWidenMemoryRecipeToVPUserAndVPDef) {
new LoadInst(Int32, PoisonValue::get(Int32Ptr), "", false, Align(1));
VPValue *Addr = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 1));
VPValue *Mask = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 2));
- VPWidenLoadRecipe Recipe(*Load, Addr, Mask, true, false, {});
+ VPWidenLoadRecipe Recipe(*Load, Addr, Mask, true, false, {}, {});
EXPECT_TRUE(isa<VPUser>(&Recipe));
VPRecipeBase *BaseR = &Recipe;
EXPECT_TRUE(isa<VPUser>(BaseR));
@@ -1201,7 +1201,7 @@ TEST_F(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
new LoadInst(Int32, PoisonValue::get(Int32Ptr), "", false, Align(1));
VPValue *Mask = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 1));
VPValue *Addr = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 2));
- VPWidenLoadRecipe Recipe(*Load, Addr, Mask, true, false, {});
+ VPWidenLoadRecipe Recipe(*Load, Addr, Mask, true, false, {}, {});
EXPECT_FALSE(Recipe.mayHaveSideEffects());
EXPECT_TRUE(Recipe.mayReadFromMemory());
EXPECT_FALSE(Recipe.mayWriteToMemory());
@@ -1215,7 +1215,8 @@ TEST_F(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
VPValue *Mask = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 1));
VPValue *Addr = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 2));
VPValue *StoredV = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 3));
- VPWidenStoreRecipe Recipe(*Store, Addr, StoredV, Mask, false, false, {});
+ VPWidenStoreRecipe Recipe(*Store, Addr, StoredV, Mask, false, false, {},
+ {});
EXPECT_TRUE(Recipe.mayHaveSideEffects());
EXPECT_FALSE(Recipe.mayReadFromMemory());
EXPECT_TRUE(Recipe.mayWriteToMemory());
More information about the llvm-commits
mailing list