[llvm] [VPlan] Manage noalias/alias_scope metadata in VPlan. (NFC) (PR #136450)
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 19 12:27:19 PDT 2025
https://github.com/fhahn created https://github.com/llvm/llvm-project/pull/136450
Use VPIRMetadata added in https://github.com/llvm/llvm-project/pull/135272
to also manage no-alias metadata added by versioning.
Note that this means we have to build the no-alias metadata up-front
once. If it is not used, it will be discarded automatically.
>From 38bf1af41c5425a552a53feb13c71d82873f1c18 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 9 Apr 2025 12:35:39 +0100
Subject: [PATCH 1/2] [VPlan] Manage instruction medata in VPlan.
Add a new helper to manage IR metadata that can be progated to generated
instructions for recipes.
This helps to remove a number of remaining uses of getUnderlyingInstr
during VPlan execution.
Add metadata on cloning.
!fixup address latest comments, thanks
---
.../Transforms/Vectorize/LoopVectorize.cpp | 34 +++--
.../Transforms/Vectorize/VPRecipeBuilder.h | 5 +
llvm/lib/Transforms/Vectorize/VPlan.cpp | 20 +--
llvm/lib/Transforms/Vectorize/VPlan.h | 120 +++++++++++-------
llvm/lib/Transforms/Vectorize/VPlanHelpers.h | 9 +-
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 38 ++++--
.../Transforms/Vectorize/VPlanTransforms.cpp | 31 +++--
.../Transforms/Vectorize/VPlanTest.cpp | 24 ++--
8 files changed, 168 insertions(+), 113 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 7a5f618d09e95..398e575199b21 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -8599,11 +8599,13 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
}
if (LoadInst *Load = dyn_cast<LoadInst>(I))
return new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
+ getMetadataToPropagate(Load),
I->getDebugLoc());
StoreInst *Store = cast<StoreInst>(I);
return new VPWidenStoreRecipe(*Store, Ptr, Operands[0], Mask, Consecutive,
- Reverse, I->getDebugLoc());
+ Reverse, getMetadataToPropagate(Store),
+ I->getDebugLoc());
}
/// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
@@ -8745,6 +8747,7 @@ VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
Range);
if (ShouldUseVectorIntrinsic)
return new VPWidenIntrinsicRecipe(*CI, ID, Ops, CI->getType(),
+ getMetadataToPropagate(CI),
CI->getDebugLoc());
Function *Variant = nullptr;
@@ -8798,7 +8801,8 @@ VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
}
Ops.push_back(Operands.back());
- return new VPWidenCallRecipe(CI, Variant, Ops, CI->getDebugLoc());
+ return new VPWidenCallRecipe(CI, Variant, Ops, getMetadataToPropagate(CI),
+ CI->getDebugLoc());
}
return nullptr;
@@ -8836,7 +8840,8 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
Plan.getOrAddLiveIn(ConstantInt::get(I->getType(), 1u, false));
auto *SafeRHS = Builder.createSelect(Mask, Ops[1], One, I->getDebugLoc());
Ops[1] = SafeRHS;
- return new VPWidenRecipe(*I, make_range(Ops.begin(), Ops.end()));
+ return new VPWidenRecipe(*I, make_range(Ops.begin(), Ops.end()),
+ getMetadataToPropagate(I));
}
[[fallthrough]];
}
@@ -8882,7 +8887,8 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
// For other binops, the legacy cost model only checks the second operand.
NewOps[1] = GetConstantViaSCEV(NewOps[1]);
}
- return new VPWidenRecipe(*I, make_range(NewOps.begin(), NewOps.end()));
+ return new VPWidenRecipe(*I, make_range(NewOps.begin(), NewOps.end()),
+ getMetadataToPropagate(I));
}
case Instruction::ExtractValue: {
SmallVector<VPValue *> NewOps(Operands);
@@ -8891,7 +8897,8 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
assert(EVI->getNumIndices() == 1 && "Expected one extractvalue index");
unsigned Idx = EVI->getIndices()[0];
NewOps.push_back(Plan.getOrAddLiveIn(ConstantInt::get(I32Ty, Idx, false)));
- return new VPWidenRecipe(*I, make_range(NewOps.begin(), NewOps.end()));
+ return new VPWidenRecipe(*I, make_range(NewOps.begin(), NewOps.end()),
+ getMetadataToPropagate(I));
}
};
}
@@ -9096,6 +9103,13 @@ bool VPRecipeBuilder::getScaledReductions(
return false;
}
+SmallVector<std::pair<unsigned, MDNode *>>
+VPRecipeBuilder::getMetadataToPropagate(Instruction *I) {
+ SmallVector<std::pair<unsigned, MDNode *>> Metadata;
+ ::getMetadataToPropagate(I, Metadata);
+ return Metadata;
+}
+
VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(
Instruction *Instr, ArrayRef<VPValue *> Operands, VFRange &Range) {
// First, check for specific widening recipes that deal with inductions, Phi
@@ -9168,13 +9182,14 @@ VPRecipeBase *VPRecipeBuilder::tryToCreateWidenRecipe(
make_range(Operands.begin(), Operands.end()));
if (auto *SI = dyn_cast<SelectInst>(Instr)) {
- return new VPWidenSelectRecipe(
- *SI, make_range(Operands.begin(), Operands.end()));
+ return new VPWidenSelectRecipe(*SI,
+ make_range(Operands.begin(), Operands.end()),
+ getMetadataToPropagate(SI));
}
if (auto *CI = dyn_cast<CastInst>(Instr)) {
return new VPWidenCastRecipe(CI->getOpcode(), Operands[0], CI->getType(),
- *CI);
+ *CI, getMetadataToPropagate(CI));
}
return tryToWiden(Instr, Operands);
@@ -9200,7 +9215,8 @@ VPRecipeBuilder::tryToCreatePartialReduction(Instruction *Reduction,
SmallVector<VPValue *, 2> Ops;
Ops.push_back(Plan.getOrAddLiveIn(Zero));
Ops.push_back(BinOp);
- BinOp = new VPWidenRecipe(*Reduction, make_range(Ops.begin(), Ops.end()));
+ BinOp = new VPWidenRecipe(*Reduction, make_range(Ops.begin(), Ops.end()),
+ getMetadataToPropagate(Reduction));
Builder.insert(BinOp->getDefiningRecipe());
ReductionOpcode = Instruction::Add;
}
diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
index fd0064a34c4c9..aab34834ae9bf 100644
--- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
+++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
@@ -233,6 +233,11 @@ class VPRecipeBuilder {
}
return Plan.getOrAddLiveIn(V);
}
+
+ /// Returns the metatadata that can be preserved from the original instruction
+ /// \p I, including noalias metadata guaranteed by runtime checks.
+ static SmallVector<std::pair<unsigned, MDNode *>>
+ getMetadataToPropagate(Instruction *I);
};
} // end namespace llvm
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index fa2d95a44609a..e7eb4b36ced07 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -355,23 +355,13 @@ BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) {
return VPBB2IRBB[LoopRegion->getPreheaderVPBB()];
}
-void VPTransformState::addNewMetadata(Instruction *To,
- const Instruction *Orig) {
+void VPTransformState::addNewMetadata(Value *To, const Instruction *Orig) {
+
// If the loop was versioned with memchecks, add the corresponding no-alias
// metadata.
- if (LVer && isa<LoadInst, StoreInst>(Orig))
- LVer->annotateInstWithNoAlias(To, Orig);
-}
-
-void VPTransformState::addMetadata(Value *To, Instruction *From) {
- // No source instruction to transfer metadata from?
- if (!From)
- return;
-
- if (Instruction *ToI = dyn_cast<Instruction>(To)) {
- propagateMetadata(ToI, From);
- addNewMetadata(ToI, From);
- }
+ Instruction *ToI = dyn_cast<Instruction>(To);
+ if (ToI && LVer && isa<LoadInst, StoreInst>(Orig))
+ LVer->annotateInstWithNoAlias(ToI, Orig);
}
void VPTransformState::setDebugLocFrom(DebugLoc DL) {
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 7084676af6d5b..0692ad4e3bbfb 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -1190,28 +1190,49 @@ struct VPIRPhi : public VPIRInstruction {
#endif
};
+using MDArrayRef = ArrayRef<std::pair<unsigned, MDNode *>>;
+
+/// Helper to manage IR metadata for recipes. It filters out metadata that
+/// cannot be proagated.
+class VPIRMetadata {
+ SmallVector<std::pair<unsigned, MDNode *>> Metadata;
+
+protected:
+ VPIRMetadata(MDArrayRef Metadata) : Metadata(Metadata) {}
+
+public:
+ /// Add all metadata to \p V if it is an instruction.
+ void applyMetadata(Value *V) const;
+
+ /// Return the IR metadata.
+ MDArrayRef getMetadata() const { return Metadata; }
+};
+
/// VPWidenRecipe is a recipe for producing a widened instruction using the
/// opcode and operands of the recipe. This recipe covers most of the
/// traditional vectorization cases where each recipe transforms into a
/// vectorized version of itself.
-class VPWidenRecipe : public VPRecipeWithIRFlags {
+class VPWidenRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
unsigned Opcode;
protected:
template <typename IterT>
VPWidenRecipe(unsigned VPDefOpcode, Instruction &I,
- iterator_range<IterT> Operands)
- : VPRecipeWithIRFlags(VPDefOpcode, Operands, I), Opcode(I.getOpcode()) {}
+ iterator_range<IterT> Operands, MDArrayRef Metadata)
+ : VPRecipeWithIRFlags(VPDefOpcode, Operands, I), VPIRMetadata(Metadata),
+ Opcode(I.getOpcode()) {}
public:
template <typename IterT>
- VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
- : VPWidenRecipe(VPDef::VPWidenSC, I, Operands) {}
+ VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands,
+ MDArrayRef Metadata)
+ : VPWidenRecipe(VPDef::VPWidenSC, I, Operands, Metadata) {}
~VPWidenRecipe() override = default;
VPWidenRecipe *clone() override {
- auto *R = new VPWidenRecipe(*getUnderlyingInstr(), operands());
+ auto *R =
+ new VPWidenRecipe(*getUnderlyingInstr(), operands(), getMetadata());
R->transferFlags(*this);
return R;
}
@@ -1236,7 +1257,7 @@ class VPWidenRecipe : public VPRecipeWithIRFlags {
};
/// VPWidenCastRecipe is a recipe to create vector cast instructions.
-class VPWidenCastRecipe : public VPRecipeWithIRFlags {
+class VPWidenCastRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
/// Cast instruction opcode.
Instruction::CastOps Opcode;
@@ -1245,23 +1266,23 @@ class VPWidenCastRecipe : public VPRecipeWithIRFlags {
public:
VPWidenCastRecipe(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy,
- CastInst &UI)
- : VPRecipeWithIRFlags(VPDef::VPWidenCastSC, Op, UI), Opcode(Opcode),
- ResultTy(ResultTy) {
+ CastInst &UI, MDArrayRef Metadata)
+ : VPRecipeWithIRFlags(VPDef::VPWidenCastSC, Op, UI),
+ VPIRMetadata(Metadata), Opcode(Opcode), ResultTy(ResultTy) {
assert(UI.getOpcode() == Opcode &&
"opcode of underlying cast doesn't match");
}
VPWidenCastRecipe(Instruction::CastOps Opcode, VPValue *Op, Type *ResultTy)
- : VPRecipeWithIRFlags(VPDef::VPWidenCastSC, Op), Opcode(Opcode),
- ResultTy(ResultTy) {}
+ : VPRecipeWithIRFlags(VPDef::VPWidenCastSC, Op), VPIRMetadata({}),
+ Opcode(Opcode), ResultTy(ResultTy) {}
~VPWidenCastRecipe() override = default;
VPWidenCastRecipe *clone() override {
if (auto *UV = getUnderlyingValue())
return new VPWidenCastRecipe(Opcode, getOperand(0), ResultTy,
- *cast<CastInst>(UV));
+ *cast<CastInst>(UV), getMetadata());
return new VPWidenCastRecipe(Opcode, getOperand(0), ResultTy);
}
@@ -1288,7 +1309,7 @@ class VPWidenCastRecipe : public VPRecipeWithIRFlags {
};
/// A recipe for widening vector intrinsics.
-class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags {
+class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
/// ID of the vector intrinsic to widen.
Intrinsic::ID VectorIntrinsicID;
@@ -1307,18 +1328,19 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags {
public:
VPWidenIntrinsicRecipe(CallInst &CI, Intrinsic::ID VectorIntrinsicID,
ArrayRef<VPValue *> CallArguments, Type *Ty,
- DebugLoc DL = {})
+ MDArrayRef Metadata, DebugLoc DL = {})
: VPRecipeWithIRFlags(VPDef::VPWidenIntrinsicSC, CallArguments, CI),
- VectorIntrinsicID(VectorIntrinsicID), ResultTy(Ty),
- MayReadFromMemory(CI.mayReadFromMemory()),
+ VPIRMetadata(Metadata), VectorIntrinsicID(VectorIntrinsicID),
+ ResultTy(Ty), MayReadFromMemory(CI.mayReadFromMemory()),
MayWriteToMemory(CI.mayWriteToMemory()),
MayHaveSideEffects(CI.mayHaveSideEffects()) {}
VPWidenIntrinsicRecipe(Intrinsic::ID VectorIntrinsicID,
ArrayRef<VPValue *> CallArguments, Type *Ty,
- DebugLoc DL = {})
+ MDArrayRef Metadata, DebugLoc DL = {})
: VPRecipeWithIRFlags(VPDef::VPWidenIntrinsicSC, CallArguments, DL),
- VectorIntrinsicID(VectorIntrinsicID), ResultTy(Ty) {
+ VPIRMetadata(Metadata), VectorIntrinsicID(VectorIntrinsicID),
+ ResultTy(Ty) {
LLVMContext &Ctx = Ty->getContext();
AttributeSet Attrs = Intrinsic::getFnAttributes(Ctx, VectorIntrinsicID);
MemoryEffects ME = Attrs.getMemoryEffects();
@@ -1334,7 +1356,7 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags {
VPWidenIntrinsicRecipe *clone() override {
return new VPWidenIntrinsicRecipe(*cast<CallInst>(getUnderlyingValue()),
VectorIntrinsicID, {op_begin(), op_end()},
- ResultTy, getDebugLoc());
+ ResultTy, getMetadata(), getDebugLoc());
}
VP_CLASSOF_IMPL(VPDef::VPWidenIntrinsicSC)
@@ -1374,7 +1396,7 @@ class VPWidenIntrinsicRecipe : public VPRecipeWithIRFlags {
};
/// A recipe for widening Call instructions using library calls.
-class VPWidenCallRecipe : public VPRecipeWithIRFlags {
+class VPWidenCallRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
/// Variant stores a pointer to the chosen function. There is a 1:1 mapping
/// between a given VF and the chosen vectorized variant, so there will be a
/// different VPlan for each VF with a valid variant.
@@ -1382,10 +1404,11 @@ class VPWidenCallRecipe : public VPRecipeWithIRFlags {
public:
VPWidenCallRecipe(Value *UV, Function *Variant,
- ArrayRef<VPValue *> CallArguments, DebugLoc DL = {})
+ ArrayRef<VPValue *> CallArguments, MDArrayRef Metadata,
+ DebugLoc DL = {})
: VPRecipeWithIRFlags(VPDef::VPWidenCallSC, CallArguments,
*cast<Instruction>(UV)),
- Variant(Variant) {
+ VPIRMetadata(Metadata), Variant(Variant) {
assert(
isa<Function>(getOperand(getNumOperands() - 1)->getLiveInIRValue()) &&
"last operand must be the called function");
@@ -1395,7 +1418,8 @@ class VPWidenCallRecipe : public VPRecipeWithIRFlags {
VPWidenCallRecipe *clone() override {
return new VPWidenCallRecipe(getUnderlyingValue(), Variant,
- {op_begin(), op_end()}, getDebugLoc());
+ {op_begin(), op_end()}, getMetadata(),
+ getDebugLoc());
}
VP_CLASSOF_IMPL(VPDef::VPWidenCallSC)
@@ -1471,16 +1495,18 @@ class VPHistogramRecipe : public VPRecipeBase {
};
/// A recipe for widening select instructions.
-struct VPWidenSelectRecipe : public VPRecipeWithIRFlags {
+struct VPWidenSelectRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
template <typename IterT>
- VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands)
- : VPRecipeWithIRFlags(VPDef::VPWidenSelectSC, Operands, I) {}
+ VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
+ MDArrayRef Metadata)
+ : VPRecipeWithIRFlags(VPDef::VPWidenSelectSC, Operands, I),
+ VPIRMetadata(Metadata) {}
~VPWidenSelectRecipe() override = default;
VPWidenSelectRecipe *clone() override {
return new VPWidenSelectRecipe(*cast<SelectInst>(getUnderlyingInstr()),
- operands());
+ operands(), getMetadata());
}
VP_CLASSOF_IMPL(VPDef::VPWidenSelectSC)
@@ -2602,7 +2628,7 @@ class VPPredInstPHIRecipe : public VPSingleDefRecipe {
/// A common base class for widening memory operations. An optional mask can be
/// provided as the last operand.
-class VPWidenMemoryRecipe : public VPRecipeBase {
+class VPWidenMemoryRecipe : public VPRecipeBase, public VPIRMetadata {
protected:
Instruction &Ingredient;
@@ -2625,9 +2651,10 @@ class VPWidenMemoryRecipe : public VPRecipeBase {
VPWidenMemoryRecipe(const char unsigned SC, Instruction &I,
std::initializer_list<VPValue *> Operands,
- bool Consecutive, bool Reverse, DebugLoc DL)
- : VPRecipeBase(SC, Operands, DL), Ingredient(I), Consecutive(Consecutive),
- Reverse(Reverse) {
+ bool Consecutive, bool Reverse, MDArrayRef Metadata,
+ DebugLoc DL)
+ : VPRecipeBase(SC, Operands, DL), VPIRMetadata(Metadata), Ingredient(I),
+ Consecutive(Consecutive), Reverse(Reverse) {
assert((Consecutive || !Reverse) && "Reverse implies consecutive");
}
@@ -2684,17 +2711,19 @@ class VPWidenMemoryRecipe : public VPRecipeBase {
/// optional mask.
struct VPWidenLoadRecipe final : public VPWidenMemoryRecipe, public VPValue {
VPWidenLoadRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
- bool Consecutive, bool Reverse, DebugLoc DL)
+ bool Consecutive, bool Reverse, MDArrayRef Metadata,
+ DebugLoc DL)
: VPWidenMemoryRecipe(VPDef::VPWidenLoadSC, Load, {Addr}, Consecutive,
- Reverse, DL),
+ Reverse, Metadata, DL),
VPValue(this, &Load) {
setMask(Mask);
}
VPWidenLoadRecipe *clone() override {
- return new VPWidenLoadRecipe(cast<LoadInst>(Ingredient), getAddr(),
- getMask(), Consecutive, Reverse,
- getDebugLoc());
+ auto *Copy = new VPWidenLoadRecipe(cast<LoadInst>(Ingredient), getAddr(),
+ getMask(), Consecutive, Reverse,
+ getMetadata(), getDebugLoc());
+ return Copy;
}
VP_CLASSOF_IMPL(VPDef::VPWidenLoadSC);
@@ -2725,7 +2754,7 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe, public VPValue {
VPWidenLoadEVLRecipe(VPWidenLoadRecipe &L, VPValue &EVL, VPValue *Mask)
: VPWidenMemoryRecipe(VPDef::VPWidenLoadEVLSC, L.getIngredient(),
{L.getAddr(), &EVL}, L.isConsecutive(),
- L.isReverse(), L.getDebugLoc()),
+ L.isReverse(), L.getMetadata(), L.getDebugLoc()),
VPValue(this, &getIngredient()) {
setMask(Mask);
}
@@ -2762,16 +2791,18 @@ struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe, public VPValue {
/// to store to and an optional mask.
struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
VPWidenStoreRecipe(StoreInst &Store, VPValue *Addr, VPValue *StoredVal,
- VPValue *Mask, bool Consecutive, bool Reverse, DebugLoc DL)
+ VPValue *Mask, bool Consecutive, bool Reverse,
+ MDArrayRef Metadata, DebugLoc DL)
: VPWidenMemoryRecipe(VPDef::VPWidenStoreSC, Store, {Addr, StoredVal},
- Consecutive, Reverse, DL) {
+ Consecutive, Reverse, Metadata, DL) {
setMask(Mask);
}
VPWidenStoreRecipe *clone() override {
- return new VPWidenStoreRecipe(cast<StoreInst>(Ingredient), getAddr(),
- getStoredValue(), getMask(), Consecutive,
- Reverse, getDebugLoc());
+ auto *Copy = new VPWidenStoreRecipe(
+ cast<StoreInst>(Ingredient), getAddr(), getStoredValue(), getMask(),
+ Consecutive, Reverse, getMetadata(), getDebugLoc());
+ return Copy;
}
VP_CLASSOF_IMPL(VPDef::VPWidenStoreSC);
@@ -2805,7 +2836,8 @@ struct VPWidenStoreEVLRecipe final : public VPWidenMemoryRecipe {
VPWidenStoreEVLRecipe(VPWidenStoreRecipe &S, VPValue &EVL, VPValue *Mask)
: VPWidenMemoryRecipe(VPDef::VPWidenStoreEVLSC, S.getIngredient(),
{S.getAddr(), S.getStoredValue(), &EVL},
- S.isConsecutive(), S.isReverse(), S.getDebugLoc()) {
+ S.isConsecutive(), S.isReverse(), S.getMetadata(),
+ S.getDebugLoc()) {
setMask(Mask);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
index bebea1915690f..4826dfd562c2d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
@@ -288,14 +288,7 @@ struct VPTransformState {
/// Currently this is used to add the noalias annotations based on the
/// inserted memchecks. Use this for instructions that are *cloned* into the
/// vector loop.
- void addNewMetadata(Instruction *To, const Instruction *Orig);
-
- /// Add metadata from one instruction to another.
- ///
- /// This includes both the original MDs from \p From and additional ones (\see
- /// addNewMetadata). Use this for *newly created* instructions in the vector
- /// loop.
- void addMetadata(Value *To, Instruction *From);
+ void addNewMetadata(Value *To, const Instruction *Orig);
/// Set the debug location in the builder using the debug location \p DL.
void setDebugLocFrom(DebugLoc DL);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 2cc558f49ccce..b5a5d8b7ace7d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -1204,6 +1204,14 @@ void VPIRPhi::print(raw_ostream &O, const Twine &Indent,
}
#endif
+void VPIRMetadata::applyMetadata(Value *V) const {
+ auto *I = dyn_cast<Instruction>(V);
+ if (!I)
+ return;
+ for (const auto &[Kind, Node] : Metadata)
+ I->setMetadata(Kind, Node);
+}
+
void VPWidenCallRecipe::execute(VPTransformState &State) {
assert(State.VF.isVector() && "not widening");
@@ -1231,10 +1239,10 @@ void VPWidenCallRecipe::execute(VPTransformState &State) {
CallInst *V = State.Builder.CreateCall(Variant, Args, OpBundles);
setFlags(V);
+ applyMetadata(V);
if (!V->getType()->isVoidTy())
State.set(this, V);
- State.addMetadata(V, CI);
}
InstructionCost VPWidenCallRecipe::computeCost(ElementCount VF,
@@ -1310,10 +1318,10 @@ void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
setFlags(V);
+ applyMetadata(V);
if (!V->getType()->isVoidTy())
State.set(this, V);
- State.addMetadata(V, CI);
}
InstructionCost VPWidenIntrinsicRecipe::computeCost(ElementCount VF,
@@ -1510,7 +1518,7 @@ void VPWidenSelectRecipe::execute(VPTransformState &State) {
State.set(this, Sel);
if (isa<FPMathOperator>(Sel))
setFlags(cast<Instruction>(Sel));
- State.addMetadata(Sel, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
+ applyMetadata(Sel);
}
InstructionCost VPWidenSelectRecipe::computeCost(ElementCount VF,
@@ -1641,12 +1649,13 @@ void VPWidenRecipe::execute(VPTransformState &State) {
Value *V = Builder.CreateNAryOp(Opcode, Ops);
- if (auto *VecOp = dyn_cast<Instruction>(V))
+ if (auto *VecOp = dyn_cast<Instruction>(V)) {
setFlags(VecOp);
+ applyMetadata(V);
+ }
// Use this vector value for all users of the original instruction.
State.set(this, V);
- State.addMetadata(V, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
break;
}
case Instruction::ExtractValue: {
@@ -1678,8 +1687,8 @@ void VPWidenRecipe::execute(VPTransformState &State) {
} else {
C = Builder.CreateICmp(getPredicate(), A, B);
}
+ applyMetadata(C);
State.set(this, C);
- State.addMetadata(C, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
break;
}
default:
@@ -1795,9 +1804,10 @@ void VPWidenCastRecipe::execute(VPTransformState &State) {
Value *A = State.get(Op);
Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
State.set(this, Cast);
- State.addMetadata(Cast, cast_or_null<Instruction>(getUnderlyingValue()));
- if (auto *CastOp = dyn_cast<Instruction>(Cast))
+ if (auto *CastOp = dyn_cast<Instruction>(Cast)) {
setFlags(CastOp);
+ applyMetadata(CastOp);
+ }
}
InstructionCost VPWidenCastRecipe::computeCost(ElementCount VF,
@@ -2749,7 +2759,8 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
}
// Add metadata to the load, but setVectorValue to the reverse shuffle.
- State.addMetadata(NewLI, LI);
+ State.addNewMetadata(NewLI, LI);
+ applyMetadata(NewLI);
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
State.set(this, NewLI);
@@ -2809,7 +2820,8 @@ void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
}
NewLI->addParamAttr(
0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
- State.addMetadata(NewLI, LI);
+ State.addNewMetadata(NewLI, LI);
+ applyMetadata(NewLI);
Instruction *Res = NewLI;
if (isReverse())
Res = createReverseEVL(Builder, Res, EVL, "vp.reverse");
@@ -2885,7 +2897,8 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {
NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
else
NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
- State.addMetadata(NewSI, SI);
+ State.addNewMetadata(NewSI, SI);
+ applyMetadata(NewSI);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -2932,7 +2945,8 @@ void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
}
NewSI->addParamAttr(
1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
- State.addMetadata(NewSI, SI);
+ State.addNewMetadata(NewSI, SI);
+ applyMetadata(NewSI);
}
InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF,
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index b80fe18d1bd66..445bcac5919fc 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -70,6 +70,7 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
NewRecipe = new VPWidenIntOrFpInductionRecipe(
Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc());
} else {
+ auto Metadata = VPRecipeBuilder::getMetadataToPropagate(Inst);
assert(isa<VPInstruction>(&Ingredient) &&
"only VPInstructions expected here");
assert(!isa<PHINode>(Inst) && "phis should be handled above");
@@ -77,13 +78,13 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
NewRecipe = new VPWidenLoadRecipe(
*Load, Ingredient.getOperand(0), nullptr /*Mask*/,
- false /*Consecutive*/, false /*Reverse*/,
+ false /*Consecutive*/, false /*Reverse*/, Metadata,
Ingredient.getDebugLoc());
} else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
NewRecipe = new VPWidenStoreRecipe(
*Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/,
- Ingredient.getDebugLoc());
+ Metadata, Ingredient.getDebugLoc());
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
} else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
@@ -93,14 +94,16 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
NewRecipe = new VPWidenIntrinsicRecipe(
*CI, getVectorIntrinsicIDForCall(CI, &TLI),
{Ingredient.op_begin(), Ingredient.op_end() - 1}, CI->getType(),
- CI->getDebugLoc());
+ Metadata, CI->getDebugLoc());
} else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) {
- NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands());
+ NewRecipe =
+ new VPWidenSelectRecipe(*SI, Ingredient.operands(), Metadata);
} else if (auto *CI = dyn_cast<CastInst>(Inst)) {
- NewRecipe = new VPWidenCastRecipe(
- CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), *CI);
+ NewRecipe =
+ new VPWidenCastRecipe(CI->getOpcode(), Ingredient.getOperand(0),
+ CI->getType(), *CI, Metadata);
} else {
- NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands());
+ NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands(), Metadata);
}
}
@@ -2009,9 +2012,9 @@ static VPRecipeBase *createEVLRecipe(VPValue *HeaderMask,
.Case<VPWidenSelectRecipe>([&](VPWidenSelectRecipe *Sel) {
SmallVector<VPValue *> Ops(Sel->operands());
Ops.push_back(&EVL);
- return new VPWidenIntrinsicRecipe(Intrinsic::vp_select, Ops,
- TypeInfo.inferScalarType(Sel),
- Sel->getDebugLoc());
+ return new VPWidenIntrinsicRecipe(
+ Intrinsic::vp_select, Ops, TypeInfo.inferScalarType(Sel),
+ Sel->getMetadata(), Sel->getDebugLoc());
})
.Case<VPInstruction>([&](VPInstruction *VPI) -> VPRecipeBase * {
if (VPI->getOpcode() == VPInstruction::FirstOrderRecurrenceSplice) {
@@ -2023,7 +2026,7 @@ static VPRecipeBase *createEVLRecipe(VPValue *HeaderMask,
Ops.append({MinusOneVPV, &AllOneMask, PrevEVL, &EVL});
return new VPWidenIntrinsicRecipe(Intrinsic::experimental_vp_splice,
Ops, TypeInfo.inferScalarType(VPI),
- VPI->getDebugLoc());
+ {}, VPI->getDebugLoc());
}
VPValue *LHS, *RHS;
@@ -2038,7 +2041,7 @@ static VPRecipeBase *createEVLRecipe(VPValue *HeaderMask,
// limited to selects whose condition is a header mask.
return new VPWidenIntrinsicRecipe(
Intrinsic::vp_merge, {&AllOneMask, LHS, RHS, &EVL},
- TypeInfo.inferScalarType(LHS), VPI->getDebugLoc());
+ TypeInfo.inferScalarType(LHS), {}, VPI->getDebugLoc());
})
.Default([&](VPRecipeBase *R) { return nullptr; });
}
@@ -2755,7 +2758,7 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
auto *L = new VPWidenLoadRecipe(
*cast<LoadInst>(LoadGroup->getInterleaveGroup()->getInsertPos()),
LoadGroup->getAddr(), LoadGroup->getMask(), /*Consecutive=*/true,
- /*Reverse=*/false, LoadGroup->getDebugLoc());
+ /*Reverse=*/false, {}, LoadGroup->getDebugLoc());
L->insertBefore(LoadGroup);
return L;
}
@@ -2786,7 +2789,7 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
auto *S = new VPWidenStoreRecipe(
*cast<StoreInst>(StoreGroup->getInterleaveGroup()->getInsertPos()),
StoreGroup->getAddr(), Res, nullptr, /*Consecutive=*/true,
- /*Reverse=*/false, StoreGroup->getDebugLoc());
+ /*Reverse=*/false, {}, StoreGroup->getDebugLoc());
S->insertBefore(StoreGroup);
StoreGroup->eraseFromParent();
}
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
index cb7545171744e..277e03cbe8588 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
@@ -929,7 +929,7 @@ TEST_F(VPRecipeTest, CastVPWidenRecipeToVPUser) {
SmallVector<VPValue *, 2> Args;
Args.push_back(Op1);
Args.push_back(Op2);
- VPWidenRecipe WidenR(*AI, make_range(Args.begin(), Args.end()));
+ VPWidenRecipe WidenR(*AI, make_range(Args.begin(), Args.end()), {});
EXPECT_TRUE(isa<VPUser>(&WidenR));
VPRecipeBase *WidenRBase = &WidenR;
EXPECT_TRUE(isa<VPUser>(WidenRBase));
@@ -950,7 +950,7 @@ TEST_F(VPRecipeTest, CastVPWidenCallRecipeToVPUserAndVPDef) {
Args.push_back(Op1);
Args.push_back(Op2);
Args.push_back(CalledFn);
- VPWidenCallRecipe Recipe(Call, Fn, Args);
+ VPWidenCallRecipe Recipe(Call, Fn, Args, {});
EXPECT_TRUE(isa<VPUser>(&Recipe));
VPRecipeBase *BaseR = &Recipe;
EXPECT_TRUE(isa<VPUser>(BaseR));
@@ -978,7 +978,7 @@ TEST_F(VPRecipeTest, CastVPWidenSelectRecipeToVPUserAndVPDef) {
Args.push_back(Op2);
Args.push_back(Op3);
VPWidenSelectRecipe WidenSelectR(*SelectI,
- make_range(Args.begin(), Args.end()));
+ make_range(Args.begin(), Args.end()), {});
EXPECT_TRUE(isa<VPUser>(&WidenSelectR));
VPRecipeBase *BaseR = &WidenSelectR;
EXPECT_TRUE(isa<VPUser>(BaseR));
@@ -1084,7 +1084,7 @@ TEST_F(VPRecipeTest, CastVPWidenMemoryRecipeToVPUserAndVPDef) {
new LoadInst(Int32, PoisonValue::get(Int32Ptr), "", false, Align(1));
VPValue *Addr = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 1));
VPValue *Mask = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 2));
- VPWidenLoadRecipe Recipe(*Load, Addr, Mask, true, false, {});
+ VPWidenLoadRecipe Recipe(*Load, Addr, Mask, true, false, {}, {});
EXPECT_TRUE(isa<VPUser>(&Recipe));
VPRecipeBase *BaseR = &Recipe;
EXPECT_TRUE(isa<VPUser>(BaseR));
@@ -1111,7 +1111,7 @@ TEST_F(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
SmallVector<VPValue *, 2> Args;
Args.push_back(Op1);
Args.push_back(Op2);
- VPWidenRecipe Recipe(*AI, make_range(Args.begin(), Args.end()));
+ VPWidenRecipe Recipe(*AI, make_range(Args.begin(), Args.end()), {});
EXPECT_FALSE(Recipe.mayHaveSideEffects());
EXPECT_FALSE(Recipe.mayReadFromMemory());
EXPECT_FALSE(Recipe.mayWriteToMemory());
@@ -1130,7 +1130,8 @@ TEST_F(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
Args.push_back(Op1);
Args.push_back(Op2);
Args.push_back(Op3);
- VPWidenSelectRecipe Recipe(*SelectI, make_range(Args.begin(), Args.end()));
+ VPWidenSelectRecipe Recipe(*SelectI, make_range(Args.begin(), Args.end()),
+ {});
EXPECT_FALSE(Recipe.mayHaveSideEffects());
EXPECT_FALSE(Recipe.mayReadFromMemory());
EXPECT_FALSE(Recipe.mayWriteToMemory());
@@ -1201,7 +1202,7 @@ TEST_F(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
new LoadInst(Int32, PoisonValue::get(Int32Ptr), "", false, Align(1));
VPValue *Mask = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 1));
VPValue *Addr = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 2));
- VPWidenLoadRecipe Recipe(*Load, Addr, Mask, true, false, {});
+ VPWidenLoadRecipe Recipe(*Load, Addr, Mask, true, false, {}, {});
EXPECT_FALSE(Recipe.mayHaveSideEffects());
EXPECT_TRUE(Recipe.mayReadFromMemory());
EXPECT_FALSE(Recipe.mayWriteToMemory());
@@ -1215,7 +1216,8 @@ TEST_F(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
VPValue *Mask = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 1));
VPValue *Addr = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 2));
VPValue *StoredV = Plan.getOrAddLiveIn(ConstantInt::get(Int32, 3));
- VPWidenStoreRecipe Recipe(*Store, Addr, StoredV, Mask, false, false, {});
+ VPWidenStoreRecipe Recipe(*Store, Addr, StoredV, Mask, false, false, {},
+ {});
EXPECT_TRUE(Recipe.mayHaveSideEffects());
EXPECT_FALSE(Recipe.mayReadFromMemory());
EXPECT_TRUE(Recipe.mayWriteToMemory());
@@ -1234,7 +1236,7 @@ TEST_F(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
Args.push_back(Op1);
Args.push_back(Op2);
Args.push_back(CalledFn);
- VPWidenCallRecipe Recipe(Call, Fn, Args);
+ VPWidenCallRecipe Recipe(Call, Fn, Args, {});
EXPECT_TRUE(Recipe.mayHaveSideEffects());
EXPECT_TRUE(Recipe.mayReadFromMemory());
EXPECT_TRUE(Recipe.mayWriteToMemory());
@@ -1257,7 +1259,7 @@ TEST_F(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
Args.push_back(Op1);
Args.push_back(Op2);
Args.push_back(CalledFn);
- VPWidenCallRecipe Recipe(Call, TheFn, Args);
+ VPWidenCallRecipe Recipe(Call, TheFn, Args, {});
EXPECT_FALSE(Recipe.mayHaveSideEffects());
EXPECT_FALSE(Recipe.mayReadFromMemory());
EXPECT_FALSE(Recipe.mayWriteToMemory());
@@ -1314,7 +1316,7 @@ TEST_F(VPRecipeTest, dumpRecipeInPlan) {
Args.push_back(ExtVPV1);
Args.push_back(ExtVPV2);
VPWidenRecipe *WidenR =
- new VPWidenRecipe(*AI, make_range(Args.begin(), Args.end()));
+ new VPWidenRecipe(*AI, make_range(Args.begin(), Args.end()), {});
VPBB1->appendRecipe(WidenR);
{
>From 2fd7844cfdf5ec0f1c2ce0b9b3ae0763245b6922 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Mon, 14 Apr 2025 21:04:42 +0200
Subject: [PATCH 2/2] [VPlan] Manage noalias/alias_scope metadata in VPlan.
Use VPIRMetadata added in https://github.com/llvm/llvm-project/pull/135272
to also manage no-alias metadata added by versioning.
Note that this means we have to build the no-alias metadata up-front
once. If it is not used, it will be discarded automatically.
---
.../Vectorize/LoopVectorizationPlanner.h | 3 +-
.../Transforms/Vectorize/LoopVectorize.cpp | 67 ++++++++++---------
.../Transforms/Vectorize/VPRecipeBuilder.h | 13 ++--
llvm/lib/Transforms/Vectorize/VPlan.cpp | 13 +---
llvm/lib/Transforms/Vectorize/VPlan.h | 13 ++--
llvm/lib/Transforms/Vectorize/VPlanHelpers.h | 15 -----
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 12 ----
.../Transforms/Vectorize/VPlanTransforms.cpp | 12 ++--
.../LoopVectorize/AArch64/store-costs-sve.ll | 17 ++---
9 files changed, 69 insertions(+), 96 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index f639f0adb9c43..d46c14f53b24f 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -36,6 +36,7 @@ class LoopVectorizationLegality;
class LoopVectorizationCostModel;
class PredicatedScalarEvolution;
class LoopVectorizeHints;
+class LoopVersioning;
class OptimizationRemarkEmitter;
class TargetTransformInfo;
class TargetLibraryInfo;
@@ -515,7 +516,7 @@ class LoopVectorizationPlanner {
/// returned VPlan is valid for. If no VPlan can be built for the input range,
/// set the largest included VF to the maximum VF for which no plan could be
/// built.
- VPlanPtr tryToBuildVPlanWithVPRecipes(VFRange &Range);
+ VPlanPtr tryToBuildVPlanWithVPRecipes(VFRange &Range, LoopVersioning *LVer);
/// Build VPlans for power-of-2 VF's between \p MinVF and \p MaxVF inclusive,
/// according to the information gathered by Legal when it checked if it is
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 398e575199b21..f965d53f91bfb 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2371,7 +2371,7 @@ void InnerLoopVectorizer::scalarizeInstruction(const Instruction *Instr,
InputLane = VPLane::getFirstLane();
Cloned->setOperand(I.index(), State.get(Operand, InputLane));
}
- State.addNewMetadata(Cloned, Instr);
+ RepRecipe->applyMetadata(Cloned);
// Place the cloned scalar in the new loop.
State.Builder.Insert(Cloned);
@@ -7989,24 +7989,6 @@ DenseMap<const SCEV *, Value *> LoopVectorizationPlanner::executePlan(
if (VectorizingEpilogue)
VPlanTransforms::removeDeadRecipes(BestVPlan);
- // Only use noalias metadata when using memory checks guaranteeing no overlap
- // across all iterations.
- const LoopAccessInfo *LAI = ILV.Legal->getLAI();
- std::unique_ptr<LoopVersioning> LVer = nullptr;
- if (LAI && !LAI->getRuntimePointerChecking()->getChecks().empty() &&
- !LAI->getRuntimePointerChecking()->getDiffChecks()) {
-
- // We currently don't use LoopVersioning for the actual loop cloning but we
- // still use it to add the noalias metadata.
- // TODO: Find a better way to re-use LoopVersioning functionality to add
- // metadata.
- LVer = std::make_unique<LoopVersioning>(
- *LAI, LAI->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, DT,
- PSE.getSE());
- State.LVer = &*LVer;
- State.LVer->prepareNoAliasMetadata();
- }
-
ILV.printDebugTracesAtStart();
//===------------------------------------------------===//
@@ -8597,15 +8579,14 @@ VPRecipeBuilder::tryToWidenMemory(Instruction *I, ArrayRef<VPValue *> Operands,
Builder.insert(VectorPtr);
Ptr = VectorPtr;
}
+ auto Metadata = getMetadataToPropagate(I);
if (LoadInst *Load = dyn_cast<LoadInst>(I))
return new VPWidenLoadRecipe(*Load, Ptr, Mask, Consecutive, Reverse,
- getMetadataToPropagate(Load),
- I->getDebugLoc());
+ Metadata, I->getDebugLoc());
StoreInst *Store = cast<StoreInst>(I);
return new VPWidenStoreRecipe(*Store, Ptr, Operands[0], Mask, Consecutive,
- Reverse, getMetadataToPropagate(Store),
- I->getDebugLoc());
+ Reverse, Metadata, I->getDebugLoc());
}
/// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also
@@ -8985,8 +8966,9 @@ VPRecipeBuilder::handleReplication(Instruction *I, ArrayRef<VPValue *> Operands,
assert((Range.Start.isScalar() || !IsUniform || !IsPredicated ||
(Range.Start.isScalable() && isa<IntrinsicInst>(I))) &&
"Should not predicate a uniform recipe");
- auto *Recipe = new VPReplicateRecipe(
- I, make_range(Operands.begin(), Operands.end()), IsUniform, BlockInMask);
+ auto *Recipe =
+ new VPReplicateRecipe(I, make_range(Operands.begin(), Operands.end()),
+ IsUniform, BlockInMask, getMetadataToPropagate(I));
return Recipe;
}
@@ -9104,9 +9086,16 @@ bool VPRecipeBuilder::getScaledReductions(
}
SmallVector<std::pair<unsigned, MDNode *>>
-VPRecipeBuilder::getMetadataToPropagate(Instruction *I) {
+VPRecipeBuilder::getMetadataToPropagate(Instruction *I) const {
SmallVector<std::pair<unsigned, MDNode *>> Metadata;
::getMetadataToPropagate(I, Metadata);
+ if (LVer && isa<LoadInst, StoreInst>(I)) {
+ const auto &[AliasScopeMD, NoAliasMD] = LVer->getNoAliasMetadataFor(I);
+ if (AliasScopeMD)
+ Metadata.emplace_back(LLVMContext::MD_alias_scope, AliasScopeMD);
+ if (NoAliasMD)
+ Metadata.emplace_back(LLVMContext::MD_noalias, NoAliasMD);
+ }
return Metadata;
}
@@ -9239,10 +9228,22 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
ElementCount MaxVF) {
assert(OrigLoop->isInnermost() && "Inner loop expected.");
+ // Only use noalias metadata when using memory checks guaranteeing no overlap
+ // across all iterations.
+ const LoopAccessInfo *LAI = Legal->getLAI();
+ std::unique_ptr<LoopVersioning> LVer = nullptr;
+ if (LAI && !LAI->getRuntimePointerChecking()->getChecks().empty() &&
+ !LAI->getRuntimePointerChecking()->getDiffChecks()) {
+ LVer = std::make_unique<LoopVersioning>(
+ *LAI, LAI->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, DT,
+ PSE.getSE());
+ LVer->prepareNoAliasMetadata();
+ }
+
auto MaxVFTimes2 = MaxVF * 2;
for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
VFRange SubRange = {VF, MaxVFTimes2};
- if (auto Plan = tryToBuildVPlanWithVPRecipes(SubRange)) {
+ if (auto Plan = tryToBuildVPlanWithVPRecipes(SubRange, LVer.get())) {
bool HasScalarVF = Plan->hasScalarVFOnly();
// Now optimize the initial VPlan.
if (!HasScalarVF)
@@ -9550,7 +9551,8 @@ static void addExitUsersForFirstOrderRecurrences(
}
VPlanPtr
-LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
+LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range,
+ LoopVersioning *LVer) {
using namespace llvm::VPlanPatternMatch;
SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
@@ -9596,7 +9598,7 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), HasNUW, DL);
VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
- Builder);
+ Builder, LVer);
// ---------------------------------------------------------------------------
// Pre-construction: record ingredients whose recipes we'll need to further
@@ -9710,8 +9712,9 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
// Only create recipe for the final invariant store of the reduction.
if (Legal->isInvariantStoreOfReduction(SI)) {
- auto *Recipe =
- new VPReplicateRecipe(SI, R.operands(), true /* IsUniform */);
+ auto *Recipe = new VPReplicateRecipe(
+ SI, R.operands(), true /* IsUniform */, /*Mask*/ nullptr,
+ RecipeBuilder.getMetadataToPropagate(SI));
Recipe->insertBefore(*MiddleVPBB, MBIP);
}
R.eraseFromParent();
@@ -9897,7 +9900,7 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) {
// Collect mapping of IR header phis to header phi recipes, to be used in
// addScalarResumePhis.
VPRecipeBuilder RecipeBuilder(*Plan, OrigLoop, TLI, &TTI, Legal, CM, PSE,
- Builder);
+ Builder, nullptr);
for (auto &R : Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
if (isa<VPCanonicalIVPHIRecipe>(&R))
continue;
diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
index aab34834ae9bf..357a68972cb43 100644
--- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
+++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
@@ -90,6 +90,10 @@ class VPRecipeBuilder {
/// A mapping of partial reduction exit instructions to their scaling factor.
DenseMap<const Instruction *, unsigned> ScaledReductionMap;
+ /// Loop versioning instance for getting noalias metadata guaranteed by
+ /// runtime checks.
+ LoopVersioning *LVer;
+
/// Check if \p I can be widened at the start of \p Range and possibly
/// decrease the range such that the returned value holds for the entire \p
/// Range. The function should not be called for memory instructions or calls.
@@ -155,9 +159,10 @@ class VPRecipeBuilder {
const TargetTransformInfo *TTI,
LoopVectorizationLegality *Legal,
LoopVectorizationCostModel &CM,
- PredicatedScalarEvolution &PSE, VPBuilder &Builder)
+ PredicatedScalarEvolution &PSE, VPBuilder &Builder,
+ LoopVersioning *LVer)
: Plan(Plan), OrigLoop(OrigLoop), TLI(TLI), TTI(TTI), Legal(Legal),
- CM(CM), PSE(PSE), Builder(Builder) {}
+ CM(CM), PSE(PSE), Builder(Builder), LVer(LVer) {}
std::optional<unsigned> getScalingForReduction(const Instruction *ExitInst) {
auto It = ScaledReductionMap.find(ExitInst);
@@ -236,8 +241,8 @@ class VPRecipeBuilder {
/// Returns the metatadata that can be preserved from the original instruction
/// \p I, including noalias metadata guaranteed by runtime checks.
- static SmallVector<std::pair<unsigned, MDNode *>>
- getMetadataToPropagate(Instruction *I);
+ SmallVector<std::pair<unsigned, MDNode *>>
+ getMetadataToPropagate(Instruction *I) const;
};
} // end namespace llvm
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index e7eb4b36ced07..7ca5008c607cf 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -220,8 +220,8 @@ VPTransformState::VPTransformState(const TargetTransformInfo *TTI,
InnerLoopVectorizer *ILV, VPlan *Plan,
Loop *CurrentParentLoop, Type *CanonicalIVTy)
: TTI(TTI), VF(VF), CFG(DT), LI(LI), Builder(Builder), ILV(ILV), Plan(Plan),
- CurrentParentLoop(CurrentParentLoop), LVer(nullptr),
- TypeAnalysis(CanonicalIVTy), VPDT(*Plan) {}
+ CurrentParentLoop(CurrentParentLoop), TypeAnalysis(CanonicalIVTy),
+ VPDT(*Plan) {}
Value *VPTransformState::get(const VPValue *Def, const VPLane &Lane) {
if (Def->isLiveIn())
@@ -355,15 +355,6 @@ BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) {
return VPBB2IRBB[LoopRegion->getPreheaderVPBB()];
}
-void VPTransformState::addNewMetadata(Value *To, const Instruction *Orig) {
-
- // If the loop was versioned with memchecks, add the corresponding no-alias
- // metadata.
- Instruction *ToI = dyn_cast<Instruction>(To);
- if (ToI && LVer && isa<LoadInst, StoreInst>(Orig))
- LVer->annotateInstWithNoAlias(ToI, Orig);
-}
-
void VPTransformState::setDebugLocFrom(DebugLoc DL) {
const DILocation *DIL = DL;
// When a FSDiscriminator is enabled, we don't need to add the multiply
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 0692ad4e3bbfb..585fa3cd12ef6 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2469,7 +2469,7 @@ class VPReductionEVLRecipe : public VPReductionRecipe {
/// copies of the original scalar type, one per lane, instead of producing a
/// single copy of widened type for all lanes. If the instruction is known to be
/// uniform only one copy, per lane zero, will be generated.
-class VPReplicateRecipe : public VPRecipeWithIRFlags {
+class VPReplicateRecipe : public VPRecipeWithIRFlags, public VPIRMetadata {
/// Indicator if only a single replica per lane is needed.
bool IsUniform;
@@ -2479,9 +2479,10 @@ class VPReplicateRecipe : public VPRecipeWithIRFlags {
public:
template <typename IterT>
VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
- bool IsUniform, VPValue *Mask = nullptr)
+ bool IsUniform, VPValue *Mask = nullptr,
+ ArrayRef<std::pair<unsigned, MDNode *>> Metadata = {})
: VPRecipeWithIRFlags(VPDef::VPReplicateSC, Operands, *I),
- IsUniform(IsUniform), IsPredicated(Mask) {
+ VPIRMetadata(Metadata), IsUniform(IsUniform), IsPredicated(Mask) {
if (Mask)
addOperand(Mask);
}
@@ -2489,9 +2490,9 @@ class VPReplicateRecipe : public VPRecipeWithIRFlags {
~VPReplicateRecipe() override = default;
VPReplicateRecipe *clone() override {
- auto *Copy =
- new VPReplicateRecipe(getUnderlyingInstr(), operands(), IsUniform,
- isPredicated() ? getMask() : nullptr);
+ auto *Copy = new VPReplicateRecipe(
+ getUnderlyingInstr(), operands(), IsUniform,
+ isPredicated() ? getMask() : nullptr, getMetadata());
Copy->transferFlags(*this);
return Copy;
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
index 4826dfd562c2d..9bbc8af7c13eb 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
@@ -38,7 +38,6 @@ class VPBasicBlock;
class VPRegionBlock;
class VPlan;
class Value;
-class LoopVersioning;
/// Returns a calculation for the total number of elements for a given \p VF.
/// For fixed width vectors this value is a constant, whereas for scalable
@@ -283,13 +282,6 @@ struct VPTransformState {
Iter->second[CacheIdx] = V;
}
- /// Add additional metadata to \p To that was not present on \p Orig.
- ///
- /// Currently this is used to add the noalias annotations based on the
- /// inserted memchecks. Use this for instructions that are *cloned* into the
- /// vector loop.
- void addNewMetadata(Value *To, const Instruction *Orig);
-
/// Set the debug location in the builder using the debug location \p DL.
void setDebugLocFrom(DebugLoc DL);
@@ -341,13 +333,6 @@ struct VPTransformState {
/// The parent loop object for the current scope, or nullptr.
Loop *CurrentParentLoop = nullptr;
- /// LoopVersioning. It's only set up (non-null) if memchecks were
- /// used.
- ///
- /// This is currently only used to add no-alias metadata based on the
- /// memchecks. The actually versioning is performed manually.
- LoopVersioning *LVer = nullptr;
-
/// VPlan-based type analysis.
VPTypeAnalysis TypeAnalysis;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index b5a5d8b7ace7d..0645642dfca3f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -2729,8 +2729,6 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
}
void VPWidenLoadRecipe::execute(VPTransformState &State) {
- auto *LI = cast<LoadInst>(&Ingredient);
-
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);
@@ -2759,7 +2757,6 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
}
// Add metadata to the load, but setVectorValue to the reverse shuffle.
- State.addNewMetadata(NewLI, LI);
applyMetadata(NewLI);
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
@@ -2788,8 +2785,6 @@ static Instruction *createReverseEVL(IRBuilderBase &Builder, Value *Operand,
}
void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
- auto *LI = cast<LoadInst>(&Ingredient);
-
Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);
@@ -2820,7 +2815,6 @@ void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
}
NewLI->addParamAttr(
0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
- State.addNewMetadata(NewLI, LI);
applyMetadata(NewLI);
Instruction *Res = NewLI;
if (isReverse())
@@ -2864,8 +2858,6 @@ void VPWidenLoadEVLRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
void VPWidenStoreRecipe::execute(VPTransformState &State) {
- auto *SI = cast<StoreInst>(&Ingredient);
-
VPValue *StoredVPValue = getStoredValue();
bool CreateScatter = !isConsecutive();
const Align Alignment = getLoadStoreAlignment(&Ingredient);
@@ -2897,7 +2889,6 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {
NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
else
NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
- State.addNewMetadata(NewSI, SI);
applyMetadata(NewSI);
}
@@ -2910,8 +2901,6 @@ void VPWidenStoreRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
- auto *SI = cast<StoreInst>(&Ingredient);
-
VPValue *StoredValue = getStoredValue();
bool CreateScatter = !isConsecutive();
const Align Alignment = getLoadStoreAlignment(&Ingredient);
@@ -2945,7 +2934,6 @@ void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
}
NewSI->addParamAttr(
1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
- State.addNewMetadata(NewSI, SI);
applyMetadata(NewSI);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 445bcac5919fc..9b6d4f24f735d 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -70,7 +70,8 @@ bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
NewRecipe = new VPWidenIntOrFpInductionRecipe(
Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc());
} else {
- auto Metadata = VPRecipeBuilder::getMetadataToPropagate(Inst);
+ SmallVector<std::pair<unsigned, MDNode *>> Metadata;
+ ::getMetadataToPropagate(Inst, Metadata);
assert(isa<VPInstruction>(&Ingredient) &&
"only VPInstructions expected here");
assert(!isa<PHINode>(Inst) && "phis should be handled above");
@@ -177,7 +178,9 @@ static bool sinkScalarOperands(VPlan &Plan) {
if (ScalarVFOnly)
continue;
Instruction *I = SinkCandidate->getUnderlyingInstr();
- auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
+ auto *Clone = new VPReplicateRecipe(
+ I, SinkCandidate->operands(), true, /*Mask*/ nullptr,
+ cast<VPReplicateRecipe>(SinkCandidate)->getMetadata());
// TODO: add ".cloned" suffix to name of Clone's VPValue.
Clone->insertBefore(SinkCandidate);
@@ -336,7 +339,7 @@ static VPRegionBlock *createReplicateRegion(VPReplicateRecipe *PredRecipe,
auto *RecipeWithoutMask = new VPReplicateRecipe(
PredRecipe->getUnderlyingInstr(),
make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
- PredRecipe->isUniform());
+ PredRecipe->isUniform(), /*Mask*/ nullptr, PredRecipe->getMetadata());
auto *Pred =
Plan.createVPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
@@ -2768,7 +2771,8 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
// Narrow wide load to uniform scalar load, as transformed VPlan will only
// process one original iteration.
auto *N = new VPReplicateRecipe(&WideLoad->getIngredient(),
- WideLoad->operands(), /*IsUniform*/ true);
+ WideLoad->operands(), /*IsUniform*/ true,
+ /*Mask*/ nullptr, WideLoad->getMetadata());
N->insertBefore(WideLoad);
return N;
};
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
index 9b6a1686eee6e..15c5258b57cc9 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
@@ -188,17 +188,17 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; DEFAULT-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
; DEFAULT: vec.epilog.vector.body:
; DEFAULT-NEXT: [[INDEX5:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT8:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ]
-; DEFAULT-NEXT: [[TMP16:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META11:![0-9]+]]
+; DEFAULT-NEXT: [[TMP16:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META5]]
; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <8 x i64> poison, i64 [[TMP16]], i64 0
; DEFAULT-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT7]], <8 x i64> poison, <8 x i32> zeroinitializer
; DEFAULT-NEXT: [[TMP18:%.*]] = trunc <8 x i64> [[BROADCAST_SPLAT8]] to <8 x i8>
; DEFAULT-NEXT: [[TMP14:%.*]] = and <8 x i8> [[TMP18]], [[TMP15]]
; DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[DST]], i64 [[INDEX5]]
; DEFAULT-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP26]], i32 0
-; DEFAULT-NEXT: store <8 x i8> [[TMP14]], ptr [[TMP27]], align 1, !alias.scope [[META14:![0-9]+]], !noalias [[META11]]
+; DEFAULT-NEXT: store <8 x i8> [[TMP14]], ptr [[TMP27]], align 1, !alias.scope [[META8]], !noalias [[META5]]
; DEFAULT-NEXT: [[INDEX_NEXT8]] = add nuw i64 [[INDEX5]], 8
; DEFAULT-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT8]], 1000
-; DEFAULT-NEXT: br i1 [[TMP17]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[TMP17]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; DEFAULT: vec.epilog.middle.block:
; DEFAULT-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; DEFAULT: vec.epilog.scalar.ph:
@@ -214,7 +214,7 @@ define void @trunc_store(ptr %dst, ptr %src, i16 %x) #1 {
; DEFAULT-NEXT: store i8 [[TRUNC]], ptr [[GEP]], align 1
; DEFAULT-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; DEFAULT-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
-; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
; DEFAULT: exit:
; DEFAULT-NEXT: ret void
;
@@ -313,13 +313,8 @@ attributes #1 = { vscale_range(1,16) "target-features"="+sve" }
; DEFAULT: [[META8]] = !{[[META9:![0-9]+]]}
; DEFAULT: [[META9]] = distinct !{[[META9]], [[META7]]}
; DEFAULT: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; DEFAULT: [[META11]] = !{[[META12:![0-9]+]]}
-; DEFAULT: [[META12]] = distinct !{[[META12]], [[META13:![0-9]+]]}
-; DEFAULT: [[META13]] = distinct !{[[META13]], !"LVerDomain"}
-; DEFAULT: [[META14]] = !{[[META15:![0-9]+]]}
-; DEFAULT: [[META15]] = distinct !{[[META15]], [[META13]]}
-; DEFAULT: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]}
+; DEFAULT: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
+; DEFAULT: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]]}
;.
; PRED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; PRED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
More information about the llvm-commits
mailing list