[llvm-branch-commits] [llvm] acaa6e4 - [NFC] Uniquify 'const' in TargetTransformInfoImpl.h
Simon Moll via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Wed Dec 23 05:31:58 PST 2020
Author: Simon Moll
Date: 2020-12-23T14:21:41+01:00
New Revision: acaa6e4260cb5b2aa88f465eafea320d5f3f249c
URL: https://github.com/llvm/llvm-project/commit/acaa6e4260cb5b2aa88f465eafea320d5f3f249c
DIFF: https://github.com/llvm/llvm-project/commit/acaa6e4260cb5b2aa88f465eafea320d5f3f249c.diff
LOG: [NFC] Uniquify 'const' in TargetTransformInfoImpl.h
Some member functions of class TargetTransformInfoImplBase in
TargetTransformInfoImpl.h are marked const while others are not. Yet all
of the should be marked const since they are just providing default TTI
values. This patch fixes the inconsistency.
Authored-by: Jinzheng Tu <b1f6c1c4 at gmail.com>
Reviewed By: simoll
Differential revision: https://reviews.llvm.org/D93573
Added:
Modified:
llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 6415e7bfe7c3..620bfb885b54 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -46,7 +46,7 @@ class TargetTransformInfoImplBase {
int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands,
- TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) {
+ TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const {
// In the basic model, we just assume that all-constant GEPs will be folded
// into their uses via addressing modes.
for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
@@ -59,28 +59,30 @@ class TargetTransformInfoImplBase {
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
unsigned &JTSize,
ProfileSummaryInfo *PSI,
- BlockFrequencyInfo *BFI) {
+ BlockFrequencyInfo *BFI) const {
(void)PSI;
(void)BFI;
JTSize = 0;
return SI.getNumCases();
}
- unsigned getInliningThresholdMultiplier() { return 1; }
+ unsigned getInliningThresholdMultiplier() const { return 1; }
- int getInlinerVectorBonusPercent() { return 150; }
+ int getInlinerVectorBonusPercent() const { return 150; }
- unsigned getMemcpyCost(const Instruction *I) { return TTI::TCC_Expensive; }
+ unsigned getMemcpyCost(const Instruction *I) const {
+ return TTI::TCC_Expensive;
+ }
- bool hasBranchDivergence() { return false; }
+ bool hasBranchDivergence() const { return false; }
- bool useGPUDivergenceAnalysis() { return false; }
+ bool useGPUDivergenceAnalysis() const { return false; }
- bool isSourceOfDivergence(const Value *V) { return false; }
+ bool isSourceOfDivergence(const Value *V) const { return false; }
- bool isAlwaysUniform(const Value *V) { return false; }
+ bool isAlwaysUniform(const Value *V) const { return false; }
- unsigned getFlatAddressSpace() { return -1; }
+ unsigned getFlatAddressSpace() const { return -1; }
bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const {
@@ -96,7 +98,7 @@ class TargetTransformInfoImplBase {
return nullptr;
}
- bool isLoweredToCall(const Function *F) {
+ bool isLoweredToCall(const Function *F) const {
assert(F && "A concrete function must be provided to this routine.");
// FIXME: These should almost certainly not be handled here, and instead
@@ -134,7 +136,7 @@ class TargetTransformInfoImplBase {
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
AssumptionCache &AC, TargetLibraryInfo *LibInfo,
- HardwareLoopInfo &HWLoopInfo) {
+ HardwareLoopInfo &HWLoopInfo) const {
return false;
}
@@ -170,39 +172,39 @@ class TargetTransformInfoImplBase {
}
void getUnrollingPreferences(Loop *, ScalarEvolution &,
- TTI::UnrollingPreferences &) {}
+ TTI::UnrollingPreferences &) const {}
void getPeelingPreferences(Loop *, ScalarEvolution &,
- TTI::PeelingPreferences &) {}
+ TTI::PeelingPreferences &) const {}
- bool isLegalAddImmediate(int64_t Imm) { return false; }
+ bool isLegalAddImmediate(int64_t Imm) const { return false; }
- bool isLegalICmpImmediate(int64_t Imm) { return false; }
+ bool isLegalICmpImmediate(int64_t Imm) const { return false; }
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
- Instruction *I = nullptr) {
+ Instruction *I = nullptr) const {
// Guess that only reg and reg+reg addressing is allowed. This heuristic is
// taken from the implementation of LSR.
return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
}
- bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) {
+ bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) const {
return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
C2.ScaleCost, C2.ImmCost, C2.SetupCost);
}
- bool isNumRegsMajorCostOfLSR() { return true; }
+ bool isNumRegsMajorCostOfLSR() const { return true; }
- bool isProfitableLSRChainElement(Instruction *I) { return false; }
+ bool isProfitableLSRChainElement(Instruction *I) const { return false; }
- bool canMacroFuseCmp() { return false; }
+ bool canMacroFuseCmp() const { return false; }
bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
DominatorTree *DT, AssumptionCache *AC,
- TargetLibraryInfo *LibInfo) {
+ TargetLibraryInfo *LibInfo) const {
return false;
}
@@ -210,40 +212,51 @@ class TargetTransformInfoImplBase {
bool shouldFavorBackedgeIndex(const Loop *L) const { return false; }
- bool isLegalMaskedStore(Type *DataType, Align Alignment) { return false; }
+ bool isLegalMaskedStore(Type *DataType, Align Alignment) const {
+ return false;
+ }
- bool isLegalMaskedLoad(Type *DataType, Align Alignment) { return false; }
+ bool isLegalMaskedLoad(Type *DataType, Align Alignment) const {
+ return false;
+ }
- bool isLegalNTStore(Type *DataType, Align Alignment) {
+ bool isLegalNTStore(Type *DataType, Align Alignment) const {
// By default, assume nontemporal memory stores are available for stores
// that are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
return Alignment >= DataSize && isPowerOf2_32(DataSize);
}
- bool isLegalNTLoad(Type *DataType, Align Alignment) {
+ bool isLegalNTLoad(Type *DataType, Align Alignment) const {
// By default, assume nontemporal memory loads are available for loads that
// are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
return Alignment >= DataSize && isPowerOf2_32(DataSize);
}
- bool isLegalMaskedScatter(Type *DataType, Align Alignment) { return false; }
+ bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
+ return false;
+ }
- bool isLegalMaskedGather(Type *DataType, Align Alignment) { return false; }
+ bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
+ return false;
+ }
- bool isLegalMaskedCompressStore(Type *DataType) { return false; }
+ bool isLegalMaskedCompressStore(Type *DataType) const { return false; }
- bool isLegalMaskedExpandLoad(Type *DataType) { return false; }
+ bool isLegalMaskedExpandLoad(Type *DataType) const { return false; }
- bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; }
+ bool hasDivRemOp(Type *DataType, bool IsSigned) const { return false; }
- bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; }
+ bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const {
+ return false;
+ }
- bool prefersVectorizedAddressing() { return true; }
+ bool prefersVectorizedAddressing() const { return true; }
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
- bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
+ bool HasBaseReg, int64_t Scale,
+ unsigned AddrSpace) const {
// Guess that all legal addressing mode are free.
if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
AddrSpace))
@@ -251,83 +264,87 @@ class TargetTransformInfoImplBase {
return -1;
}
- bool LSRWithInstrQueries() { return false; }
+ bool LSRWithInstrQueries() const { return false; }
- bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const { return false; }
- bool isProfitableToHoist(Instruction *I) { return true; }
+ bool isProfitableToHoist(Instruction *I) const { return true; }
- bool useAA() { return false; }
+ bool useAA() const { return false; }
- bool isTypeLegal(Type *Ty) { return false; }
+ bool isTypeLegal(Type *Ty) const { return false; }
- unsigned getRegUsageForType(Type *Ty) { return 1; }
+ unsigned getRegUsageForType(Type *Ty) const { return 1; }
- bool shouldBuildLookupTables() { return true; }
- bool shouldBuildLookupTablesForConstant(Constant *C) { return true; }
+ bool shouldBuildLookupTables() const { return true; }
+ bool shouldBuildLookupTablesForConstant(Constant *C) const { return true; }
- bool useColdCCForColdCall(Function &F) { return false; }
+ bool useColdCCForColdCall(Function &F) const { return false; }
unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
- bool Insert, bool Extract) {
+ bool Insert, bool Extract) const {
return 0;
}
unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
- unsigned VF) {
+ unsigned VF) const {
return 0;
}
- bool supportsEfficientVectorElementLoadStore() { return false; }
+ bool supportsEfficientVectorElementLoadStore() const { return false; }
- bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
+ bool enableAggressiveInterleaving(bool LoopHasReductions) const {
+ return false;
+ }
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
bool IsZeroCmp) const {
return {};
}
- bool enableInterleavedAccessVectorization() { return false; }
+ bool enableInterleavedAccessVectorization() const { return false; }
- bool enableMaskedInterleavedAccessVectorization() { return false; }
+ bool enableMaskedInterleavedAccessVectorization() const { return false; }
- bool isFPVectorizationPotentiallyUnsafe() { return false; }
+ bool isFPVectorizationPotentiallyUnsafe() const { return false; }
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
unsigned AddressSpace, unsigned Alignment,
- bool *Fast) {
+ bool *Fast) const {
return false;
}
- TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
+ TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const {
return TTI::PSK_Software;
}
- bool haveFastSqrt(Type *Ty) { return false; }
+ bool haveFastSqrt(Type *Ty) const { return false; }
- bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
+ bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return true; }
- unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
+ unsigned getFPOpCost(Type *Ty) const {
+ return TargetTransformInfo::TCC_Basic;
+ }
int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
- Type *Ty) {
+ Type *Ty) const {
return 0;
}
unsigned getIntImmCost(const APInt &Imm, Type *Ty,
- TTI::TargetCostKind CostKind) {
+ TTI::TargetCostKind CostKind) const {
return TTI::TCC_Basic;
}
unsigned getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
Type *Ty, TTI::TargetCostKind CostKind,
- Instruction *Inst = nullptr) {
+ Instruction *Inst = nullptr) const {
return TTI::TCC_Free;
}
unsigned getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty,
- TTI::TargetCostKind CostKind) {
+ TTI::TargetCostKind CostKind) const {
return TTI::TCC_Free;
}
@@ -350,7 +367,7 @@ class TargetTransformInfoImplBase {
unsigned getRegisterBitWidth(bool Vector) const { return 32; }
- unsigned getMinVectorRegisterBitWidth() { return 128; }
+ unsigned getMinVectorRegisterBitWidth() const { return 128; }
bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; }
@@ -358,9 +375,8 @@ class TargetTransformInfoImplBase {
unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { return 0; }
- bool
- shouldConsiderAddressTypePromotion(const Instruction &I,
- bool &AllowPromotionWithoutCommonHeader) {
+ bool shouldConsiderAddressTypePromotion(
+ const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
AllowPromotionWithoutCommonHeader = false;
return false;
}
@@ -399,7 +415,7 @@ class TargetTransformInfoImplBase {
unsigned getMaxPrefetchIterationsAhead() const { return UINT_MAX; }
bool enableWritePrefetching() const { return false; }
- unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
+ unsigned getMaxInterleaveFactor(unsigned VF) const { return 1; }
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
TTI::TargetCostKind CostKind,
@@ -408,7 +424,7 @@ class TargetTransformInfoImplBase {
TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo,
ArrayRef<const Value *> Args,
- const Instruction *CxtI = nullptr) {
+ const Instruction *CxtI = nullptr) const {
// FIXME: A number of transformation tests seem to require these values
// which seems a little odd for how arbitary there are.
switch (Opcode) {
@@ -427,14 +443,14 @@ class TargetTransformInfoImplBase {
}
unsigned getShuffleCost(TTI::ShuffleKind Kind, VectorType *Ty, int Index,
- VectorType *SubTp) {
+ VectorType *SubTp) const {
return 1;
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
- const Instruction *I) {
+ const Instruction *I) const {
switch (Opcode) {
default:
break;
@@ -470,12 +486,11 @@ class TargetTransformInfoImplBase {
}
unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
- VectorType *VecTy, unsigned Index) {
+ VectorType *VecTy, unsigned Index) const {
return 1;
}
- unsigned getCFInstrCost(unsigned Opcode,
- TTI::TargetCostKind CostKind) {
+ unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) const {
// A phi would be free, unless we're costing the throughput because it
// will require a register.
if (Opcode == Instruction::PHI && CostKind != TTI::TCK_RecipThroughput)
@@ -490,7 +505,8 @@ class TargetTransformInfoImplBase {
return 1;
}
- unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const {
return 1;
}
@@ -502,26 +518,26 @@ class TargetTransformInfoImplBase {
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace,
- TTI::TargetCostKind CostKind) {
+ TTI::TargetCostKind CostKind) const {
return 1;
}
unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
Align Alignment, TTI::TargetCostKind CostKind,
- const Instruction *I = nullptr) {
+ const Instruction *I = nullptr) const {
return 1;
}
unsigned getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
- bool UseMaskForCond, bool UseMaskForGaps) {
+ bool UseMaskForCond, bool UseMaskForGaps) const {
return 1;
}
unsigned getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
- TTI::TargetCostKind CostKind) {
+ TTI::TargetCostKind CostKind) const {
switch (ICA.getID()) {
default:
break;
@@ -560,26 +576,32 @@ class TargetTransformInfoImplBase {
}
unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys,
- TTI::TargetCostKind CostKind) {
+ TTI::TargetCostKind CostKind) const {
return 1;
}
- unsigned getNumberOfParts(Type *Tp) { return 0; }
+ unsigned getNumberOfParts(Type *Tp) const { return 0; }
unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *,
- const SCEV *) {
+ const SCEV *) const {
return 0;
}
unsigned getArithmeticReductionCost(unsigned, VectorType *, bool,
- TTI::TargetCostKind) { return 1; }
+ TTI::TargetCostKind) const {
+ return 1;
+ }
unsigned getMinMaxReductionCost(VectorType *, VectorType *, bool, bool,
- TTI::TargetCostKind) { return 1; }
+ TTI::TargetCostKind) const {
+ return 1;
+ }
- unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
+ unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
+ return 0;
+ }
- bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) {
+ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const {
return false;
}
@@ -593,7 +615,7 @@ class TargetTransformInfoImplBase {
}
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
- Type *ExpectedType) {
+ Type *ExpectedType) const {
return nullptr;
}
@@ -692,7 +714,7 @@ class TargetTransformInfoImplBase {
protected:
// Obtain the minimum required size to hold the value (without the sign)
// In case of a vector it returns the min required size for one element.
- unsigned minRequiredElementSize(const Value *Val, bool &isSigned) {
+ unsigned minRequiredElementSize(const Value *Val, bool &isSigned) const {
if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
const auto *VectorValue = cast<Constant>(Val);
@@ -746,12 +768,12 @@ class TargetTransformInfoImplBase {
return Val->getType()->getScalarSizeInBits();
}
- bool isStridedAccess(const SCEV *Ptr) {
+ bool isStridedAccess(const SCEV *Ptr) const {
return Ptr && isa<SCEVAddRecExpr>(Ptr);
}
const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE,
- const SCEV *Ptr) {
+ const SCEV *Ptr) const {
if (!isStridedAccess(Ptr))
return nullptr;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
@@ -759,7 +781,7 @@ class TargetTransformInfoImplBase {
}
bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr,
- int64_t MergeDistance) {
+ int64_t MergeDistance) const {
const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
if (!Step)
return false;
More information about the llvm-branch-commits
mailing list