[llvm] 357237e - Recommit "[TTI] Unify FavorPostInc and FavorBackedgeIndex into getPreferredAddressingMode"
Sjoerd Meijer via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 15 03:33:21 PST 2021
Author: Sjoerd Meijer
Date: 2021-02-15T11:33:00Z
New Revision: 357237e93ec1c75302bf11975cfadc8fc2220a55
URL: https://github.com/llvm/llvm-project/commit/357237e93ec1c75302bf11975cfadc8fc2220a55
DIFF: https://github.com/llvm/llvm-project/commit/357237e93ec1c75302bf11975cfadc8fc2220a55.diff
LOG: Recommit "[TTI] Unify FavorPostInc and FavorBackedgeIndex into getPreferredAddressingMode"
This reverts commit effc3b079927a6dd3084b4ff712ec07f926366f0, with the build
problem fixed.
Added:
Modified:
llvm/include/llvm/Analysis/TargetTransformInfo.h
llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
llvm/lib/Analysis/TargetTransformInfo.cpp
llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
llvm/lib/Target/ARM/ARMTargetTransformInfo.h
llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index c3d7d2cc80a4..79303dab92a2 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -638,13 +638,15 @@ class TargetTransformInfo {
DominatorTree *DT, AssumptionCache *AC,
TargetLibraryInfo *LibInfo) const;
- /// \return True is LSR should make efforts to create/preserve post-inc
- /// addressing mode expressions.
- bool shouldFavorPostInc() const;
+ enum AddressingModeKind {
+ AMK_PreIndexed,
+ AMK_PostIndexed,
+ AMK_None
+ };
- /// Return true if LSR should make efforts to generate indexed addressing
- /// modes that operate across loop iterations.
- bool shouldFavorBackedgeIndex(const Loop *L) const;
+ /// Return the preferred addressing mode LSR should make efforts to generate.
+ AddressingModeKind getPreferredAddressingMode(const Loop *L,
+ ScalarEvolution *SE) const;
/// Return true if the target supports masked store.
bool isLegalMaskedStore(Type *DataType, Align Alignment) const;
@@ -1454,8 +1456,8 @@ class TargetTransformInfo::Concept {
virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
TargetLibraryInfo *LibInfo) = 0;
- virtual bool shouldFavorPostInc() const = 0;
- virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0;
+ virtual AddressingModeKind
+ getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const = 0;
virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0;
virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0;
virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
@@ -1796,9 +1798,10 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
TargetLibraryInfo *LibInfo) override {
return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
}
- bool shouldFavorPostInc() const override { return Impl.shouldFavorPostInc(); }
- bool shouldFavorBackedgeIndex(const Loop *L) const override {
- return Impl.shouldFavorBackedgeIndex(L);
+ AddressingModeKind
+ getPreferredAddressingMode(const Loop *L,
+ ScalarEvolution *SE) const override {
+ return Impl.getPreferredAddressingMode(L, SE);
}
bool isLegalMaskedStore(Type *DataType, Align Alignment) override {
return Impl.isLegalMaskedStore(DataType, Alignment);
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 84de5038df42..a9c9d3cb9f4f 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -209,9 +209,10 @@ class TargetTransformInfoImplBase {
return false;
}
- bool shouldFavorPostInc() const { return false; }
-
- bool shouldFavorBackedgeIndex(const Loop *L) const { return false; }
+ TTI::AddressingModeKind
+ getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const {
+ return TTI::AMK_None;
+ }
bool isLegalMaskedStore(Type *DataType, Align Alignment) const {
return false;
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 16992d099e0a..3db4b0b0d553 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -409,12 +409,10 @@ bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI,
return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
}
-bool TargetTransformInfo::shouldFavorPostInc() const {
- return TTIImpl->shouldFavorPostInc();
-}
-
-bool TargetTransformInfo::shouldFavorBackedgeIndex(const Loop *L) const {
- return TTIImpl->shouldFavorBackedgeIndex(L);
+TTI::AddressingModeKind
+TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
+ ScalarEvolution *SE) const {
+ return TTIImpl->getPreferredAddressingMode(L, SE);
}
bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 80f1f2a2a8f7..8c2a79efc674 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -100,18 +100,20 @@ bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
return MatchExact && MatchSubset;
}
-bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
- if (L->getHeader()->getParent()->hasOptSize())
- return false;
+TTI::AddressingModeKind
+ARMTTIImpl::getPreferredAddressingMode(const Loop *L,
+ ScalarEvolution *SE) const {
if (ST->hasMVEIntegerOps())
- return false;
- return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
-}
+ return TTI::AMK_PostIndexed;
-bool ARMTTIImpl::shouldFavorPostInc() const {
- if (ST->hasMVEIntegerOps())
- return true;
- return false;
+ if (L->getHeader()->getParent()->hasOptSize())
+ return TTI::AMK_None;
+
+ if (ST->isMClass() && ST->isThumb2() &&
+ L->getNumBlocks() == 1)
+ return TTI::AMK_PreIndexed;
+
+ return TTI::AMK_None;
}
Optional<Instruction *>
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index b8de27101a61..808128929000 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -103,8 +103,8 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
bool enableInterleavedAccessVectorization() { return true; }
- bool shouldFavorBackedgeIndex(const Loop *L) const;
- bool shouldFavorPostInc() const;
+ TTI::AddressingModeKind
+ getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const;
/// Floating-point computation using ARMv8 AArch32 Advanced
/// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
index af7bc4682249..c6ef63b2aa31 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
@@ -80,8 +80,10 @@ void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
}
}
-bool HexagonTTIImpl::shouldFavorPostInc() const {
- return true;
+TTI::AddressingModeKind
+HexagonTTIImpl::getPreferredAddressingMode(const Loop *L,
+ ScalarEvolution *SE) const {
+ return TTI::AMK_PostIndexed;
}
/// --- Vector TTI begin ---
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
index dc075d6147b6..10b31088e451 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
@@ -67,7 +67,8 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
TTI::PeelingPreferences &PP);
/// Bias LSR towards creating post-increment opportunities.
- bool shouldFavorPostInc() const;
+ TTI::AddressingModeKind
+ getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const;
// L1 cache prefetch.
unsigned getPrefetchDistance() const override;
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 5dec9b542076..2f90df70a3c3 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -1227,13 +1227,15 @@ static unsigned getSetupCost(const SCEV *Reg, unsigned Depth) {
/// Tally up interesting quantities from the given register.
void Cost::RateRegister(const Formula &F, const SCEV *Reg,
SmallPtrSetImpl<const SCEV *> &Regs) {
+ TTI::AddressingModeKind AMK = TTI->getPreferredAddressingMode(L, SE);
+
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
// If this is an addrec for another loop, it should be an invariant
// with respect to L since L is the innermost loop (at least
// for now LSR only handles innermost loops).
if (AR->getLoop() != L) {
// If the AddRec exists, consider it's register free and leave it alone.
- if (isExistingPhi(AR, *SE) && !TTI->shouldFavorPostInc())
+ if (isExistingPhi(AR, *SE) && AMK != TTI::AMK_PostIndexed)
return;
// It is bad to allow LSR for current loop to add induction variables
@@ -1254,13 +1256,11 @@ void Cost::RateRegister(const Formula &F, const SCEV *Reg,
// If the step size matches the base offset, we could use pre-indexed
// addressing.
- if (TTI->shouldFavorBackedgeIndex(L)) {
+ if (AMK == TTI::AMK_PreIndexed) {
if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)))
if (Step->getAPInt() == F.BaseOffset)
LoopCost = 0;
- }
-
- if (TTI->shouldFavorPostInc()) {
+ } else if (AMK == TTI::AMK_PostIndexed) {
const SCEV *LoopStep = AR->getStepRecurrence(*SE);
if (isa<SCEVConstant>(LoopStep)) {
const SCEV *LoopStart = AR->getStart();
@@ -3575,7 +3575,8 @@ void LSRInstance::GenerateReassociationsImpl(LSRUse &LU, unsigned LUIdx,
// may generate a post-increment operator. The reason is that the
// reassociations cause extra base+register formula to be created,
// and possibly chosen, but the post-increment is more efficient.
- if (TTI.shouldFavorPostInc() && mayUsePostIncMode(TTI, LU, BaseReg, L, SE))
+ TTI::AddressingModeKind AMK = TTI.getPreferredAddressingMode(L, &SE);
+ if (AMK == TTI::AMK_PostIndexed && mayUsePostIncMode(TTI, LU, BaseReg, L, SE))
return;
SmallVector<const SCEV *, 8> AddOps;
const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE);
@@ -4239,7 +4240,8 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
NewF.BaseOffset = (uint64_t)NewF.BaseOffset + Imm;
if (!isLegalUse(TTI, LU.MinOffset, LU.MaxOffset,
LU.Kind, LU.AccessTy, NewF)) {
- if (TTI.shouldFavorPostInc() &&
+ if (TTI.getPreferredAddressingMode(this->L, &SE) ==
+ TTI::AMK_PostIndexed &&
mayUsePostIncMode(TTI, LU, OrigReg, this->L, SE))
continue;
if (!TTI.isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
@@ -4679,7 +4681,7 @@ void LSRInstance::NarrowSearchSpaceByFilterFormulaWithSameScaledReg() {
/// If we are over the complexity limit, filter out any post-inc prefering
/// variables to only post-inc values.
void LSRInstance::NarrowSearchSpaceByFilterPostInc() {
- if (!TTI.shouldFavorPostInc())
+ if (TTI.getPreferredAddressingMode(L, &SE) != TTI::AMK_PostIndexed)
return;
if (EstimateSearchSpaceComplexity() < ComplexityLimit)
return;
@@ -4978,7 +4980,8 @@ void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
// This can sometimes (notably when trying to favour postinc) lead to
// sub-optimial decisions. There it is best left to the cost modelling to
// get correct.
- if (!TTI.shouldFavorPostInc() || LU.Kind != LSRUse::Address) {
+ if (TTI.getPreferredAddressingMode(L, &SE) != TTI::AMK_PostIndexed ||
+ LU.Kind != LSRUse::Address) {
int NumReqRegsToFind = std::min(F.getNumRegs(), ReqRegs.size());
for (const SCEV *Reg : ReqRegs) {
if ((F.ScaledReg && F.ScaledReg == Reg) ||
@@ -5560,7 +5563,8 @@ LSRInstance::LSRInstance(Loop *L, IVUsers &IU, ScalarEvolution &SE,
TargetLibraryInfo &TLI, MemorySSAUpdater *MSSAU)
: IU(IU), SE(SE), DT(DT), LI(LI), AC(AC), TLI(TLI), TTI(TTI), L(L),
MSSAU(MSSAU), FavorBackedgeIndex(EnableBackedgeIndexing &&
- TTI.shouldFavorBackedgeIndex(L)) {
+ TTI.getPreferredAddressingMode(L, &SE) ==
+ TTI::AMK_PreIndexed) {
// If LoopSimplify form is not available, stay out of trouble.
if (!L->isLoopSimplifyForm())
return;
More information about the llvm-commits
mailing list