[llvm] 0014b49 - [TTI] Make all interface methods const (NFCI) (#136598)

via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 21 20:27:33 PDT 2025


Author: Sergei Barannikov
Date: 2025-04-22T06:27:29+03:00
New Revision: 0014b49482c0862c140149c650d653b4e41fa9b4

URL: https://github.com/llvm/llvm-project/commit/0014b49482c0862c140149c650d653b4e41fa9b4
DIFF: https://github.com/llvm/llvm-project/commit/0014b49482c0862c140149c650d653b4e41fa9b4.diff

LOG: [TTI] Make all interface methods const (NFCI) (#136598)

Making `TargetTransformInfo::Model::Impl` `const` makes sure all
interface methods are `const`, in `BasicTTIImpl`, its bases, and in all
derived classes.

Pull Request: https://github.com/llvm/llvm-project/pull/136598

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/TargetTransformInfo.h
    llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
    llvm/include/llvm/CodeGen/BasicTTIImpl.h
    llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
    llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
    llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
    llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
    llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h
    llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
    llvm/lib/Target/ARM/ARMTargetTransformInfo.h
    llvm/lib/Target/BPF/BPFTargetTransformInfo.h
    llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
    llvm/lib/Target/DirectX/DirectXTargetTransformInfo.h
    llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
    llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
    llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h
    llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
    llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
    llvm/lib/Target/Mips/MipsTargetTransformInfo.cpp
    llvm/lib/Target/Mips/MipsTargetTransformInfo.h
    llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
    llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
    llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
    llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
    llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
    llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
    llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
    llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
    llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
    llvm/lib/Target/VE/VETargetTransformInfo.h
    llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
    llvm/lib/Target/X86/X86TargetTransformInfo.cpp
    llvm/lib/Target/X86/X86TargetTransformInfo.h

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 2efca0d1d754f..b5d766c34d09d 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -2367,7 +2367,7 @@ class TargetTransformInfo::Concept {
 
 template <typename T>
 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
-  T Impl;
+  const T Impl;
 
 public:
   Model(T Impl) : Impl(std::move(Impl)) {}

diff  --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index ea481baddc5c3..d28803cf164a3 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -341,7 +341,7 @@ class TargetTransformInfoImplBase {
   }
 
   bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
-                                    Align Alignment, unsigned AddrSpace) {
+                                    Align Alignment, unsigned AddrSpace) const {
     return false;
   }
 
@@ -440,7 +440,7 @@ class TargetTransformInfoImplBase {
 
   bool enableSelectOptimize() const { return true; }
 
-  bool shouldTreatInstructionLikeSelect(const Instruction *I) {
+  bool shouldTreatInstructionLikeSelect(const Instruction *I) const {
     // A select with two constant operands will usually be better left as a
     // select.
     using namespace llvm::PatternMatch;
@@ -747,7 +747,7 @@ class TargetTransformInfoImplBase {
 
   unsigned getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
                                      const APInt &DemandedDstElts,
-                                     TTI::TargetCostKind CostKind) {
+                                     TTI::TargetCostKind CostKind) const {
     return 1;
   }
 
@@ -1170,7 +1170,7 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
 
   InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
                              ArrayRef<const Value *> Operands, Type *AccessType,
-                             TTI::TargetCostKind CostKind) {
+                             TTI::TargetCostKind CostKind) const {
     assert(PointeeType && Ptr && "can't get GEPCost of nullptr");
     auto *BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
     bool HasBaseReg = (BaseGV == nullptr);
@@ -1234,7 +1234,7 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
 
     // If the final address of the GEP is a legal addressing mode for the given
     // access type, then we can fold it into its users.
-    if (static_cast<T *>(this)->isLegalAddressingMode(
+    if (static_cast<const T *>(this)->isLegalAddressingMode(
             AccessType, const_cast<GlobalValue *>(BaseGV),
             BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale,
             Ptr->getType()->getPointerAddressSpace()))
@@ -1250,7 +1250,7 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
                                        const Value *Base,
                                        const TTI::PointersChainInfo &Info,
                                        Type *AccessTy,
-                                       TTI::TargetCostKind CostKind) {
+                                       TTI::TargetCostKind CostKind) const {
     InstructionCost Cost = TTI::TCC_Free;
     // In the basic model we take into account GEP instructions only
     // (although here can come alloca instruction, a value, constants and/or
@@ -1269,15 +1269,15 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
       if (Info.isSameBase() && V != Base) {
         if (GEP->hasAllConstantIndices())
           continue;
-        Cost += static_cast<T *>(this)->getArithmeticInstrCost(
+        Cost += static_cast<const T *>(this)->getArithmeticInstrCost(
             Instruction::Add, GEP->getType(), CostKind,
             {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
             {});
       } else {
         SmallVector<const Value *> Indices(GEP->indices());
-        Cost += static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(),
-                                                   GEP->getPointerOperand(),
-                                                   Indices, AccessTy, CostKind);
+        Cost += static_cast<const T *>(this)->getGEPCost(
+            GEP->getSourceElementType(), GEP->getPointerOperand(), Indices,
+            AccessTy, CostKind);
       }
     }
     return Cost;
@@ -1285,10 +1285,10 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
 
   InstructionCost getInstructionCost(const User *U,
                                      ArrayRef<const Value *> Operands,
-                                     TTI::TargetCostKind CostKind) {
+                                     TTI::TargetCostKind CostKind) const {
     using namespace llvm::PatternMatch;
 
-    auto *TargetTTI = static_cast<T *>(this);
+    auto *TargetTTI = static_cast<const T *>(this);
     // Handle non-intrinsic calls, invokes, and callbr.
     // FIXME: Unlikely to be true for anything but CodeSize.
     auto *CB = dyn_cast<CallBase>(U);
@@ -1585,8 +1585,8 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
     return CostKind == TTI::TCK_RecipThroughput ? -1 : TTI::TCC_Basic;
   }
 
-  bool isExpensiveToSpeculativelyExecute(const Instruction *I) {
-    auto *TargetTTI = static_cast<T *>(this);
+  bool isExpensiveToSpeculativelyExecute(const Instruction *I) const {
+    auto *TargetTTI = static_cast<const T *>(this);
     SmallVector<const Value *, 4> Ops(I->operand_values());
     InstructionCost Cost = TargetTTI->getInstructionCost(
         I, Ops, TargetTransformInfo::TCK_SizeAndLatency);

diff  --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index b42223eda9922..ca32d36297beb 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -379,11 +379,11 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return (CallerBits & CalleeBits) == CalleeBits;
   }
 
-  bool hasBranchDivergence(const Function *F = nullptr) { return false; }
+  bool hasBranchDivergence(const Function *F = nullptr) const { return false; }
 
-  bool isSourceOfDivergence(const Value *V) { return false; }
+  bool isSourceOfDivergence(const Value *V) const { return false; }
 
-  bool isAlwaysUniform(const Value *V) { return false; }
+  bool isAlwaysUniform(const Value *V) const { return false; }
 
   bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
     return false;
@@ -393,7 +393,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return true;
   }
 
-  unsigned getFlatAddressSpace() {
+  unsigned getFlatAddressSpace() const {
     // Return an invalid address space.
     return -1;
   }
@@ -426,22 +426,22 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return nullptr;
   }
 
-  bool isLegalAddImmediate(int64_t imm) {
+  bool isLegalAddImmediate(int64_t imm) const {
     return getTLI()->isLegalAddImmediate(imm);
   }
 
-  bool isLegalAddScalableImmediate(int64_t Imm) {
+  bool isLegalAddScalableImmediate(int64_t Imm) const {
     return getTLI()->isLegalAddScalableImmediate(Imm);
   }
 
-  bool isLegalICmpImmediate(int64_t imm) {
+  bool isLegalICmpImmediate(int64_t imm) const {
     return getTLI()->isLegalICmpImmediate(imm);
   }
 
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
                              Instruction *I = nullptr,
-                             int64_t ScalableOffset = 0) {
+                             int64_t ScalableOffset = 0) const {
     TargetLoweringBase::AddrMode AM;
     AM.BaseGV = BaseGV;
     AM.BaseOffs = BaseOffset;
@@ -487,11 +487,11 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
   }
 
-  bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
+  bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) const {
     return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
   }
 
-  bool isNumRegsMajorCostOfLSR() {
+  bool isNumRegsMajorCostOfLSR() const {
     return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR();
   }
 
@@ -499,13 +499,14 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return TargetTransformInfoImplBase::shouldDropLSRSolutionIfLessProfitable();
   }
 
-  bool isProfitableLSRChainElement(Instruction *I) {
+  bool isProfitableLSRChainElement(Instruction *I) const {
     return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
   }
 
   InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
                                        StackOffset BaseOffset, bool HasBaseReg,
-                                       int64_t Scale, unsigned AddrSpace) {
+                                       int64_t Scale,
+                                       unsigned AddrSpace) const {
     TargetLoweringBase::AddrMode AM;
     AM.BaseGV = BaseGV;
     AM.BaseOffs = BaseOffset.getFixed();
@@ -517,11 +518,11 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return InstructionCost::getInvalid();
   }
 
-  bool isTruncateFree(Type *Ty1, Type *Ty2) {
+  bool isTruncateFree(Type *Ty1, Type *Ty2) const {
     return getTLI()->isTruncateFree(Ty1, Ty2);
   }
 
-  bool isProfitableToHoist(Instruction *I) {
+  bool isProfitableToHoist(Instruction *I) const {
     return getTLI()->isProfitableToHoist(I);
   }
 
@@ -539,14 +540,14 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
 
   InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
                              ArrayRef<const Value *> Operands, Type *AccessType,
-                             TTI::TargetCostKind CostKind) {
+                             TTI::TargetCostKind CostKind) const {
     return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
   }
 
   unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
                                             unsigned &JumpTableSize,
                                             ProfileSummaryInfo *PSI,
-                                            BlockFrequencyInfo *BFI) {
+                                            BlockFrequencyInfo *BFI) const {
     /// Try to find the estimated number of clusters. Note that the number of
     /// clusters identified in this function could be 
diff erent from the actual
     /// numbers found in lowering. This function ignore switches that are
@@ -602,7 +603,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return N;
   }
 
-  bool shouldBuildLookupTables() {
+  bool shouldBuildLookupTables() const {
     const TargetLoweringBase *TLI = getTLI();
     return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
            TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
@@ -633,18 +634,16 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     return true;
   }
 
-  bool haveFastSqrt(Type *Ty) {
+  bool haveFastSqrt(Type *Ty) const {
     const TargetLoweringBase *TLI = getTLI();
     EVT VT = TLI->getValueType(DL, Ty);
     return TLI->isTypeLegal(VT) &&
            TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
   }
 
-  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
-    return true;
-  }
+  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return true; }
 
-  InstructionCost getFPOpCost(Type *Ty) {
+  InstructionCost getFPOpCost(Type *Ty) const {
     // Check whether FADD is available, as a proxy for floating-point in
     // general.
     const TargetLoweringBase *TLI = getTLI();
@@ -674,7 +673,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   }
 
   unsigned getInliningThresholdMultiplier() const { return 1; }
-  unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
+  unsigned adjustInliningThreshold(const CallBase *CB) const { return 0; }
   unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const {
     return 0;
   }
@@ -683,7 +682,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
 
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE) {
+                               OptimizationRemarkEmitter *ORE) const {
     // This unrolling functionality is target independent, but to provide some
     // motivation for its intended use, for x86:
 
@@ -754,7 +753,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   }
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP) {
+                             TTI::PeelingPreferences &PP) const {
     PP.PeelCount = 0;
     PP.AllowPeeling = true;
     PP.AllowLoopNestsPeeling = false;
@@ -762,34 +761,33 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   }
 
   bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
-                                AssumptionCache &AC,
-                                TargetLibraryInfo *LibInfo,
-                                HardwareLoopInfo &HWLoopInfo) {
+                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
+                                HardwareLoopInfo &HWLoopInfo) const {
     return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
   }
 
-  unsigned getEpilogueVectorizationMinVF() {
+  unsigned getEpilogueVectorizationMinVF() const {
     return BaseT::getEpilogueVectorizationMinVF();
   }
 
-  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) {
+  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const {
     return BaseT::preferPredicateOverEpilogue(TFI);
   }
 
   TailFoldingStyle
-  getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) {
+  getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
     return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
   }
 
   std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
-                                               IntrinsicInst &II) {
+                                                    IntrinsicInst &II) const {
     return BaseT::instCombineIntrinsic(IC, II);
   }
 
   std::optional<Value *>
   simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
                                    APInt DemandedMask, KnownBits &Known,
-                                   bool &KnownBitsComputed) {
+                                   bool &KnownBitsComputed) const {
     return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
                                                    KnownBitsComputed);
   }
@@ -798,7 +796,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
       InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
       APInt &UndefElts2, APInt &UndefElts3,
       std::function<void(Instruction *, unsigned, APInt, APInt &)>
-          SimplifyAndSetOp) {
+          SimplifyAndSetOp) const {
     return BaseT::simplifyDemandedVectorEltsIntrinsic(
         IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
         SimplifyAndSetOp);
@@ -1015,7 +1013,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
     }
   }
 
-  unsigned getMaxInterleaveFactor(ElementCount VF) { return 1; }
+  unsigned getMaxInterleaveFactor(ElementCount VF) const { return 1; }
 
   InstructionCost getArithmeticInstrCost(
       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
@@ -1337,7 +1335,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   }
 
   InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
-                                           VectorType *VecTy, unsigned Index) {
+                                           VectorType *VecTy,
+                                           unsigned Index) const {
     TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
     return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
                                        CostKind, Index, nullptr, nullptr) +
@@ -1417,14 +1416,14 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   InstructionCost getVectorInstrCost(
       unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
       Value *Scalar,
-      ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) {
+      ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) const {
     return thisT()->getVectorInstrCost(Opcode, Val, CostKind, Index, nullptr,
                                        nullptr);
   }
 
   InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
                                      TTI::TargetCostKind CostKind,
-                                     unsigned Index) {
+                                     unsigned Index) const {
     Value *Op0 = nullptr;
     Value *Op1 = nullptr;
     if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
@@ -1554,7 +1553,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-      bool UseMaskForCond = false, bool UseMaskForGaps = false) {
+      bool UseMaskForCond = false, bool UseMaskForGaps = false) const {
 
     // We cannot scalarize scalable vectors, so return Invalid.
     if (isa<ScalableVectorType>(VecTy))
@@ -2886,7 +2885,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   }
 
   InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *,
-                                            const SCEV *) {
+                                            const SCEV *) const {
     return 0;
   }
 
@@ -3067,7 +3066,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
                                            Type *ResTy, VectorType *Ty,
                                            std::optional<FastMathFlags> FMF,
-                                           TTI::TargetCostKind CostKind) {
+                                           TTI::TargetCostKind CostKind) const {
     if (auto *FTy = dyn_cast<FixedVectorType>(Ty);
         FTy && IsUnsigned && Opcode == Instruction::Add &&
         FTy->getElementType() == IntegerType::getInt1Ty(Ty->getContext())) {
@@ -3095,7 +3094,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
 
   InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
                                          VectorType *Ty,
-                                         TTI::TargetCostKind CostKind) {
+                                         TTI::TargetCostKind CostKind) const {
     // Without any native support, this is equivalent to the cost of
     // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
     // vecreduce.add(mul(A, B)).

diff  --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 51fa5237fcc50..720daa384968c 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -415,7 +415,7 @@ AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                                   const APInt &Imm, Type *Ty,
                                                   TTI::TargetCostKind CostKind,
-                                                  Instruction *Inst) {
+                                                  Instruction *Inst) const {
   assert(Ty->isIntegerTy());
 
   unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -483,7 +483,7 @@ InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
 InstructionCost
 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
-                                    TTI::TargetCostKind CostKind) {
+                                    TTI::TargetCostKind CostKind) const {
   assert(Ty->isIntegerTy());
 
   unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -533,7 +533,7 @@ AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
 }
 
 TargetTransformInfo::PopcntSupportKind
-AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
+AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) const {
   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
   if (TyWidth == 32 || TyWidth == 64)
     return TTI::PSK_FastHardware;
@@ -3560,7 +3560,7 @@ InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,
                                                          Type *Dst,
                                                          VectorType *VecTy,
-                                                         unsigned Index) {
+                                                         unsigned Index) const {
 
   // Make sure we were given a valid extend opcode.
   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
@@ -3812,7 +3812,7 @@ InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
 InstructionCost AArch64TTIImpl::getVectorInstrCost(
     unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
     Value *Scalar,
-    ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) {
+    ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) const {
   return getVectorInstrCostHelper(Opcode, Val, CostKind, Index, false, nullptr,
                                   Scalar, ScalarUserAndIdx);
 }
@@ -3820,7 +3820,7 @@ InstructionCost AArch64TTIImpl::getVectorInstrCost(
 InstructionCost AArch64TTIImpl::getVectorInstrCost(const Instruction &I,
                                                    Type *Val,
                                                    TTI::TargetCostKind CostKind,
-                                                   unsigned Index) {
+                                                   unsigned Index) const {
   return getVectorInstrCostHelper(I.getOpcode(), Val, CostKind, Index,
                                   true /* HasRealUse */, &I);
 }
@@ -4149,9 +4149,9 @@ InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
   }
 }
 
-InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty,
-                                                          ScalarEvolution *SE,
-                                                          const SCEV *Ptr) {
+InstructionCost
+AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
+                                          const SCEV *Ptr) const {
   // Address computations in vectorized code with non-consecutive addresses will
   // likely result in more instructions compared to scalar code where the
   // computation can more often be merged into the index mode. The resulting
@@ -4466,7 +4466,7 @@ InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-    bool UseMaskForCond, bool UseMaskForGaps) {
+    bool UseMaskForCond, bool UseMaskForGaps) const {
   assert(Factor >= 2 && "Invalid interleave factor");
   auto *VecVTy = cast<VectorType>(VecTy);
 
@@ -4499,7 +4499,7 @@ InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
 }
 
 InstructionCost
-AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
+AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
   InstructionCost Cost = 0;
   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
   for (auto *I : Tys) {
@@ -4513,7 +4513,7 @@ AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
   return Cost;
 }
 
-unsigned AArch64TTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+unsigned AArch64TTIImpl::getMaxInterleaveFactor(ElementCount VF) const {
   return ST->getMaxInterleaveFactor();
 }
 
@@ -4573,7 +4573,7 @@ getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
 //  1. Has a valid cost, and
 //  2. Has a cost within the supplied budget.
 // Otherwise it returns false.
-static bool isLoopSizeWithinBudget(Loop *L, AArch64TTIImpl &TTI,
+static bool isLoopSizeWithinBudget(Loop *L, const AArch64TTIImpl &TTI,
                                    InstructionCost Budget,
                                    unsigned *FinalSize) {
   // Estimate the size of the loop.
@@ -4601,7 +4601,7 @@ static bool isLoopSizeWithinBudget(Loop *L, AArch64TTIImpl &TTI,
 }
 
 static bool shouldUnrollMultiExitLoop(Loop *L, ScalarEvolution &SE,
-                                      AArch64TTIImpl &TTI) {
+                                      const AArch64TTIImpl &TTI) {
   // Only consider loops with unknown trip counts for which we can determine
   // a symbolic expression. Multi-exit loops with small known trip counts will
   // likely be unrolled anyway.
@@ -4639,7 +4639,7 @@ static bool shouldUnrollMultiExitLoop(Loop *L, ScalarEvolution &SE,
 static void
 getAppleRuntimeUnrollPreferences(Loop *L, ScalarEvolution &SE,
                                  TargetTransformInfo::UnrollingPreferences &UP,
-                                 AArch64TTIImpl &TTI) {
+                                 const AArch64TTIImpl &TTI) {
   // Limit loops with structure that is highly likely to benefit from runtime
   // unrolling; that is we exclude outer loops and loops with many blocks (i.e.
   // likely with complex control flow). Note that the heuristics here may be
@@ -4753,9 +4753,9 @@ getAppleRuntimeUnrollPreferences(Loop *L, ScalarEvolution &SE,
   }
 }
 
-void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
-                                             TTI::UnrollingPreferences &UP,
-                                             OptimizationRemarkEmitter *ORE) {
+void AArch64TTIImpl::getUnrollingPreferences(
+    Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP,
+    OptimizationRemarkEmitter *ORE) const {
   // Enable partial unrolling and runtime unrolling.
   BaseT::getUnrollingPreferences(L, SE, UP, ORE);
 
@@ -4835,12 +4835,13 @@ void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
 }
 
 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                           TTI::PeelingPreferences &PP) {
+                                           TTI::PeelingPreferences &PP) const {
   BaseT::getPeelingPreferences(L, SE, PP);
 }
 
-Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
-                                                         Type *ExpectedType) {
+Value *
+AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+                                                  Type *ExpectedType) const {
   switch (Inst->getIntrinsicID()) {
   default:
     return nullptr;
@@ -4876,7 +4877,7 @@ Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
 }
 
 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
-                                        MemIntrinsicInfo &Info) {
+                                        MemIntrinsicInfo &Info) const {
   switch (Inst->getIntrinsicID()) {
   default:
     break;
@@ -4921,7 +4922,7 @@ bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
 /// sext instructions that sign extended the same initial value. A getelementptr
 /// is considered as "complex" if it has more than 2 operands.
 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
-    const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
+    const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
   bool Considerable = false;
   AllowPromotionWithoutCommonHeader = false;
   if (!isa<SExtInst>(&I))
@@ -5161,7 +5162,7 @@ AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
 
 InstructionCost AArch64TTIImpl::getExtendedReductionCost(
     unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *VecTy,
-    std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) {
+    std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const {
   EVT VecVT = TLI->getValueType(DL, VecTy);
   EVT ResVT = TLI->getValueType(DL, ResTy);
 
@@ -5189,7 +5190,7 @@ InstructionCost AArch64TTIImpl::getExtendedReductionCost(
 InstructionCost
 AArch64TTIImpl::getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
                                        VectorType *VecTy,
-                                       TTI::TargetCostKind CostKind) {
+                                       TTI::TargetCostKind CostKind) const {
   EVT VecVT = TLI->getValueType(DL, VecTy);
   EVT ResVT = TLI->getValueType(DL, ResTy);
 
@@ -5669,7 +5670,7 @@ unsigned AArch64TTIImpl::getEpilogueVectorizationMinVF() const {
   return ST->getEpilogueVectorizationMinVF();
 }
 
-bool AArch64TTIImpl::preferPredicateOverEpilogue(TailFoldingInfo *TFI) {
+bool AArch64TTIImpl::preferPredicateOverEpilogue(TailFoldingInfo *TFI) const {
   if (!ST->hasSVE())
     return false;
 
@@ -5733,7 +5734,8 @@ AArch64TTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
   return InstructionCost::getInvalid();
 }
 
-bool AArch64TTIImpl::shouldTreatInstructionLikeSelect(const Instruction *I) {
+bool AArch64TTIImpl::shouldTreatInstructionLikeSelect(
+    const Instruction *I) const {
   if (EnableOrLikeSelectOpt) {
     // For the binary operators (e.g. or) we need to be more careful than
     // selects, here we only transform them if they are already at a natural
@@ -5751,8 +5753,9 @@ bool AArch64TTIImpl::shouldTreatInstructionLikeSelect(const Instruction *I) {
   return BaseT::shouldTreatInstructionLikeSelect(I);
 }
 
-bool AArch64TTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                                   const TargetTransformInfo::LSRCost &C2) {
+bool AArch64TTIImpl::isLSRCostLess(
+    const TargetTransformInfo::LSRCost &C1,
+    const TargetTransformInfo::LSRCost &C2) const {
   // AArch64 specific here is adding the number of instructions to the
   // comparison (though not as the first consideration, as some targets do)
   // along with changing the priority of the base additions.

diff  --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 46d2dc57b012d..44b076b9a1c75 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -105,20 +105,22 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
                                     TTI::TargetCostKind CostKind,
-                                    Instruction *Inst = nullptr);
+                                    Instruction *Inst = nullptr) const;
   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                       const APInt &Imm, Type *Ty,
-                                      TTI::TargetCostKind CostKind);
-  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+                                      TTI::TargetCostKind CostKind) const;
+  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
 
   /// @}
 
   /// \name Vector TTI Implementations
   /// @{
 
-  bool enableInterleavedAccessVectorization() { return true; }
+  bool enableInterleavedAccessVectorization() const { return true; }
 
-  bool enableMaskedInterleavedAccessVectorization() { return ST->hasSVE(); }
+  bool enableMaskedInterleavedAccessVectorization() const {
+    return ST->hasSVE();
+  }
 
   unsigned getNumberOfRegisters(unsigned ClassID) const {
     bool Vector = (ClassID == 1);
@@ -167,7 +169,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
     return VF.getKnownMinValue() * ST->getVScaleForTuning();
   }
 
-  unsigned getMaxInterleaveFactor(ElementCount VF);
+  unsigned getMaxInterleaveFactor(ElementCount VF) const;
 
   bool prefersVectorizedAddressing() const;
 
@@ -190,7 +192,8 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
                                    const Instruction *I = nullptr) const;
 
   InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
-                                           VectorType *VecTy, unsigned Index);
+                                           VectorType *VecTy,
+                                           unsigned Index) const;
 
   InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
                                  const Instruction *I = nullptr) const;
@@ -207,11 +210,11 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
   InstructionCost getVectorInstrCost(
       unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
       Value *Scalar,
-      ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx);
+      ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) const;
 
   InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
                                      TTI::TargetCostKind CostKind,
-                                     unsigned Index);
+                                     unsigned Index) const;
 
   InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
                                          FastMathFlags FMF,
@@ -232,7 +235,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
       const Instruction *CxtI = nullptr) const;
 
   InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
-                                            const SCEV *Ptr);
+                                            const SCEV *Ptr) const;
 
   InstructionCost getCmpSelInstrCost(
       unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
@@ -251,19 +254,19 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
       TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
       const Instruction *I = nullptr) const;
 
-  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
+  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
 
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
 
   Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
-                                           Type *ExpectedType);
+                                           Type *ExpectedType) const;
 
-  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
+  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
 
   bool isElementTypeLegalForScalableVector(Type *Ty) const {
     if (Ty->isPointerTy())
@@ -282,7 +285,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
     return false;
   }
 
-  bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) {
+  bool isLegalMaskedLoadStore(Type *DataType, Align Alignment) const {
     if (!ST->hasSVE())
       return false;
 
@@ -295,12 +298,12 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
   }
 
   bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned /*AddressSpace*/) {
+                         unsigned /*AddressSpace*/) const {
     return isLegalMaskedLoadStore(DataType, Alignment);
   }
 
   bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned /*AddressSpace*/) {
+                          unsigned /*AddressSpace*/) const {
     return isLegalMaskedLoadStore(DataType, Alignment);
   }
 
@@ -342,7 +345,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
     return false;
   }
 
-  bool isLegalNTStoreLoad(Type *DataType, Align Alignment) {
+  bool isLegalNTStoreLoad(Type *DataType, Align Alignment) const {
     // NOTE: The logic below is mostly geared towards LV, which calls it with
     //       vectors with 2 elements. We might want to improve that, if other
     //       users show up.
@@ -359,11 +362,11 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
     return BaseT::isLegalNTStore(DataType, Alignment);
   }
 
-  bool isLegalNTStore(Type *DataType, Align Alignment) {
+  bool isLegalNTStore(Type *DataType, Align Alignment) const {
     return isLegalNTStoreLoad(DataType, Alignment);
   }
 
-  bool isLegalNTLoad(Type *DataType, Align Alignment) {
+  bool isLegalNTLoad(Type *DataType, Align Alignment) const {
     // Only supports little-endian targets.
     if (ST->isLittleEndian())
       return isLegalNTStoreLoad(DataType, Alignment);
@@ -382,11 +385,10 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
   InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-      bool UseMaskForCond = false, bool UseMaskForGaps = false);
+      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
 
-  bool
-  shouldConsiderAddressTypePromotion(const Instruction &I,
-                                     bool &AllowPromotionWithoutCommonHeader);
+  bool shouldConsiderAddressTypePromotion(
+      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
 
   bool shouldExpandReduction(const IntrinsicInst *II) const { return false; }
 
@@ -411,7 +413,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
 
   unsigned getEpilogueVectorizationMinVF() const;
 
-  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI);
+  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const;
 
   bool supportsScalableVectors() const {
     return ST->isSVEorStreamingSVEAvailable();
@@ -434,11 +436,11 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
   InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
                                            Type *ResTy, VectorType *ValTy,
                                            std::optional<FastMathFlags> FMF,
-                                           TTI::TargetCostKind CostKind);
+                                           TTI::TargetCostKind CostKind) const;
 
   InstructionCost getMulAccReductionCost(
       bool IsUnsigned, Type *ResTy, VectorType *Ty,
-      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput);
+      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
 
   InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
                                  ArrayRef<int> Mask,
@@ -462,9 +464,9 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
                                        StackOffset BaseOffset, bool HasBaseReg,
                                        int64_t Scale, unsigned AddrSpace) const;
 
-  bool enableSelectOptimize() { return ST->enableSelectOptimize(); }
+  bool enableSelectOptimize() const { return ST->enableSelectOptimize(); }
 
-  bool shouldTreatInstructionLikeSelect(const Instruction *I);
+  bool shouldTreatInstructionLikeSelect(const Instruction *I) const;
 
   unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
                              Type *ScalarValTy) const {
@@ -478,7 +480,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
   std::optional<unsigned> getMinPageSize() const { return 4096; }
 
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                     const TargetTransformInfo::LSRCost &C2);
+                     const TargetTransformInfo::LSRCost &C2) const;
 
   bool isProfitableToSinkOperands(Instruction *I,
                                   SmallVectorImpl<Use *> &Ops) const;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 3f2e13f8cfc5b..919e73a2c534d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -107,9 +107,9 @@ AMDGPUTTIImpl::AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
       ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
       TLI(ST->getTargetLowering()) {}
 
-void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
-                                            TTI::UnrollingPreferences &UP,
-                                            OptimizationRemarkEmitter *ORE) {
+void AMDGPUTTIImpl::getUnrollingPreferences(
+    Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP,
+    OptimizationRemarkEmitter *ORE) const {
   const Function &F = *L->getHeader()->getParent();
   UP.Threshold =
       F.getFnAttributeAsParsedInteger("amdgpu-unroll-threshold", 300);
@@ -270,7 +270,7 @@ void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
 }
 
 void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                          TTI::PeelingPreferences &PP) {
+                                          TTI::PeelingPreferences &PP) const {
   BaseT::getPeelingPreferences(L, SE, PP);
 }
 
@@ -485,7 +485,7 @@ void GCNTTIImpl::getMemcpyLoopResidualLoweringType(
   }
 }
 
-unsigned GCNTTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+unsigned GCNTTIImpl::getMaxInterleaveFactor(ElementCount VF) const {
   // Disable unrolling if the loop is not vectorized.
   // TODO: Enable this again.
   if (VF.isScalar())
@@ -1369,12 +1369,12 @@ unsigned GCNTTIImpl::getCallerAllocaCost(const CallBase *CB,
 
 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                          TTI::UnrollingPreferences &UP,
-                                         OptimizationRemarkEmitter *ORE) {
+                                         OptimizationRemarkEmitter *ORE) const {
   CommonTTI.getUnrollingPreferences(L, SE, UP, ORE);
 }
 
 void GCNTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                       TTI::PeelingPreferences &PP) {
+                                       TTI::PeelingPreferences &PP) const {
   CommonTTI.getPeelingPreferences(L, SE, PP);
 }
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index 165ed3d59c35d..1a575c92cce61 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -52,10 +52,10 @@ class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> {
 
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
 
   int64_t getMaxMemIntrinsicInlineSizeThreshold() const;
 };
@@ -108,12 +108,12 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
 
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
 
-  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) {
+  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const {
     assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
     return TTI::PSK_FastHardware;
   }
@@ -149,7 +149,7 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
       unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
       Align SrcAlign, Align DestAlign,
       std::optional<uint32_t> AtomicCpySize) const;
-  unsigned getMaxInterleaveFactor(ElementCount VF);
+  unsigned getMaxInterleaveFactor(ElementCount VF) const;
 
   bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
 

diff  --git a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
index 3f7274bad374c..6cc2611a8d1a8 100644
--- a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
@@ -82,7 +82,7 @@ bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
   return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
 }
 
-unsigned R600TTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+unsigned R600TTIImpl::getMaxInterleaveFactor(ElementCount VF) const {
   // Disable unrolling if the loop is not vectorized.
   // TODO: Enable this again.
   if (VF.isScalar())
@@ -133,13 +133,13 @@ InstructionCost R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
   }
 }
 
-void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
-                                          TTI::UnrollingPreferences &UP,
-                                          OptimizationRemarkEmitter *ORE) {
+void R600TTIImpl::getUnrollingPreferences(
+    Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP,
+    OptimizationRemarkEmitter *ORE) const {
   CommonTTI.getUnrollingPreferences(L, SE, UP, ORE);
 }
 
 void R600TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                        TTI::PeelingPreferences &PP) {
+                                        TTI::PeelingPreferences &PP) const {
   CommonTTI.getPeelingPreferences(L, SE, PP);
 }

diff  --git a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h
index 4adece65a2863..527b225c4eed3 100644
--- a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h
@@ -43,9 +43,9 @@ class R600TTIImpl final : public BasicTTIImplBase<R600TTIImpl> {
 
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
   unsigned getHardwareNumberOfRegisters(bool Vec) const;
   unsigned getNumberOfRegisters(bool Vec) const;
   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const;
@@ -57,7 +57,7 @@ class R600TTIImpl final : public BasicTTIImplBase<R600TTIImpl> {
                                    unsigned AddrSpace) const;
   bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
                                     unsigned AddrSpace) const;
-  unsigned getMaxInterleaveFactor(ElementCount VF);
+  unsigned getMaxInterleaveFactor(ElementCount VF) const;
   InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
                                  const Instruction *I = nullptr) const;
   using BaseT::getVectorInstrCost;

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 9c12e4bf62b67..89740a3d7f477 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -353,7 +353,8 @@ InstructionCost ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
 // Constants smaller than 256 fit in the immediate field of
 // Thumb1 instructions so we return a zero cost and 1 otherwise.
 InstructionCost ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
-                                                  const APInt &Imm, Type *Ty) {
+                                                  const APInt &Imm,
+                                                  Type *Ty) const {
   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
     return 0;
 
@@ -412,7 +413,7 @@ static bool isFPSatMinMaxPattern(Instruction *Inst, const APInt &Imm) {
 InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                               const APInt &Imm, Type *Ty,
                                               TTI::TargetCostKind CostKind,
-                                              Instruction *Inst) {
+                                              Instruction *Inst) const {
   // Division by a constant can be turned into multiplication, but only if we
   // know it's constant. So it's not so much that the immediate is cheap (it's
   // not), but that the alternative is worse.
@@ -1085,7 +1086,7 @@ InstructionCost ARMTTIImpl::getCmpSelInstrCost(
 
 InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty,
                                                       ScalarEvolution *SE,
-                                                      const SCEV *Ptr) {
+                                                      const SCEV *Ptr) const {
   // Address computations in vectorized code with non-consecutive addresses will
   // likely result in more instructions compared to scalar code where the
   // computation can more often be merged into the index mode. The resulting
@@ -1105,7 +1106,7 @@ InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty,
   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
 }
 
-bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
+bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) const {
   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
     // If a VCTP is part of a chain, it's already profitable and shouldn't be
     // optimized, else LSR may block tail-predication.
@@ -1143,7 +1144,7 @@ bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment,
          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
 }
 
-bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
+bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) const {
   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
     return false;
 
@@ -1613,7 +1614,7 @@ ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
 InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost(
     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-    bool UseMaskForCond, bool UseMaskForGaps) {
+    bool UseMaskForCond, bool UseMaskForGaps) const {
   assert(Factor >= 2 && "Invalid interleave factor");
   assert(isa<VectorType>(VecTy) && "Expect a vector type");
 
@@ -1843,7 +1844,7 @@ ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
 
 InstructionCost ARMTTIImpl::getExtendedReductionCost(
     unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy,
-    std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) {
+    std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const {
   EVT ValVT = TLI->getValueType(DL, ValTy);
   EVT ResVT = TLI->getValueType(DL, ResTy);
 
@@ -1878,7 +1879,7 @@ InstructionCost ARMTTIImpl::getExtendedReductionCost(
 InstructionCost
 ARMTTIImpl::getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
                                    VectorType *ValTy,
-                                   TTI::TargetCostKind CostKind) {
+                                   TTI::TargetCostKind CostKind) const {
   EVT ValVT = TLI->getValueType(DL, ValTy);
   EVT ResVT = TLI->getValueType(DL, ResTy);
 
@@ -2170,7 +2171,7 @@ bool ARMTTIImpl::isLoweredToCall(const Function *F) const {
   return BaseT::isLoweredToCall(F);
 }
 
-bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
+bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) const {
   unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
   EVT VT = TLI->getValueType(DL, I.getType(), true);
   if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
@@ -2260,7 +2261,7 @@ bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
                                           AssumptionCache &AC,
                                           TargetLibraryInfo *LibInfo,
-                                          HardwareLoopInfo &HWLoopInfo) {
+                                          HardwareLoopInfo &HWLoopInfo) const {
   // Low-overhead branches are only supported in the 'low-overhead branch'
   // extension of v8.1-m.
   if (!ST->hasLOB() || DisableLowOverheadLoops) {
@@ -2493,7 +2494,7 @@ static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
   return true;
 }
 
-bool ARMTTIImpl::preferPredicateOverEpilogue(TailFoldingInfo *TFI) {
+bool ARMTTIImpl::preferPredicateOverEpilogue(TailFoldingInfo *TFI) const {
   if (!EnableTailPredication) {
     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
     return false;
@@ -2559,7 +2560,7 @@ ARMTTIImpl::getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const {
 }
 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                          TTI::UnrollingPreferences &UP,
-                                         OptimizationRemarkEmitter *ORE) {
+                                         OptimizationRemarkEmitter *ORE) const {
   // Enable Upper bound unrolling universally, providing that we do not see an
   // active lane mask, which will be better kept as a loop to become tail
   // predicated than to be conditionally unrolled.
@@ -2688,7 +2689,7 @@ void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
 }
 
 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                       TTI::PeelingPreferences &PP) {
+                                       TTI::PeelingPreferences &PP) const {
   BaseT::getPeelingPreferences(L, SE, PP);
 }
 

diff  --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index 307a0270fc0a9..b594f07e6a5a9 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -107,7 +107,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
   bool areInlineCompatible(const Function *Caller,
                            const Function *Callee) const;
 
-  bool enableInterleavedAccessVectorization() { return true; }
+  bool enableInterleavedAccessVectorization() const { return true; }
 
   TTI::AddressingModeKind
     getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const;
@@ -115,7 +115,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
   /// Floating-point computation using ARMv8 AArch32 Advanced
   /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD
   /// and Arm MVE are IEEE-754 compliant.
-  bool isFPVectorizationPotentiallyUnsafe() {
+  bool isFPVectorizationPotentiallyUnsafe() const {
     return !ST->isTargetDarwin() && !ST->hasMVEFloatOps();
   }
 
@@ -131,7 +131,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
   /// @{
 
   InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
-                                        const APInt &Imm, Type *Ty);
+                                        const APInt &Imm, Type *Ty) const;
 
   using BaseT::getIntImmCost;
   InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
@@ -140,7 +140,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
                                     TTI::TargetCostKind CostKind,
-                                    Instruction *Inst = nullptr);
+                                    Instruction *Inst = nullptr) const;
 
   /// @}
 
@@ -178,11 +178,11 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
     llvm_unreachable("Unsupported register kind");
   }
 
-  unsigned getMaxInterleaveFactor(ElementCount VF) {
+  unsigned getMaxInterleaveFactor(ElementCount VF) const {
     return ST->getMaxInterleaveFactor();
   }
 
-  bool isProfitableLSRChainElement(Instruction *I);
+  bool isProfitableLSRChainElement(Instruction *I) const;
 
   bool isLegalMaskedLoad(Type *DataTy, Align Alignment,
                          unsigned AddressSpace) const;
@@ -192,7 +192,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
     return isLegalMaskedLoad(DataTy, Alignment, AddressSpace);
   }
 
-  bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
+  bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const {
     // For MVE, we have a custom lowering pass that will already have custom
     // legalised any gathers that we can lower to MVE intrinsics, and want to
     // expand all the rest. The pass runs before the masked intrinsic lowering
@@ -200,13 +200,13 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
     return true;
   }
 
-  bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) {
+  bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const {
     return forceScalarizeMaskedGather(VTy, Alignment);
   }
 
-  bool isLegalMaskedGather(Type *Ty, Align Alignment);
+  bool isLegalMaskedGather(Type *Ty, Align Alignment) const;
 
-  bool isLegalMaskedScatter(Type *Ty, Align Alignment) {
+  bool isLegalMaskedScatter(Type *Ty, Align Alignment) const {
     return isLegalMaskedGather(Ty, Alignment);
   }
 
@@ -253,7 +253,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
                                      Value *Op1) const;
 
   InstructionCost getAddressComputationCost(Type *Val, ScalarEvolution *SE,
-                                            const SCEV *Ptr);
+                                            const SCEV *Ptr) const;
 
   InstructionCost getArithmeticInstrCost(
       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
@@ -275,7 +275,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
   InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-      bool UseMaskForCond = false, bool UseMaskForGaps = false);
+      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
 
   InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
                                          const Value *Ptr, bool VariableMask,
@@ -290,10 +290,10 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
   InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
                                            Type *ResTy, VectorType *ValTy,
                                            std::optional<FastMathFlags> FMF,
-                                           TTI::TargetCostKind CostKind);
+                                           TTI::TargetCostKind CostKind) const;
   InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
                                          VectorType *ValTy,
-                                         TTI::TargetCostKind CostKind);
+                                         TTI::TargetCostKind CostKind) const;
 
   InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
                                          FastMathFlags FMF,
@@ -310,22 +310,21 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
                                        StackOffset BaseOffset, bool HasBaseReg,
                                        int64_t Scale, unsigned AddrSpace) const;
 
-  bool maybeLoweredToCall(Instruction &I);
+  bool maybeLoweredToCall(Instruction &I) const;
   bool isLoweredToCall(const Function *F) const;
   bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
-                                AssumptionCache &AC,
-                                TargetLibraryInfo *LibInfo,
-                                HardwareLoopInfo &HWLoopInfo);
-  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI);
+                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
+                                HardwareLoopInfo &HWLoopInfo) const;
+  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const;
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
 
   TailFoldingStyle
   getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const;
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
   bool shouldBuildLookupTablesForConstant(Constant *C) const {
     // In the ROPI and RWPI relocation models we can't have pointers to global
     // variables or functions in constant data, so don't convert switches to

diff  --git a/llvm/lib/Target/BPF/BPFTargetTransformInfo.h b/llvm/lib/Target/BPF/BPFTargetTransformInfo.h
index 47d11bf6805c9..05a53c8bab824 100644
--- a/llvm/lib/Target/BPF/BPFTargetTransformInfo.h
+++ b/llvm/lib/Target/BPF/BPFTargetTransformInfo.h
@@ -37,7 +37,8 @@ class BPFTTIImpl : public BasicTTIImplBase<BPFTTIImpl> {
       : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
         TLI(ST->getTargetLowering()) {}
 
-  int getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) {
+  int getIntImmCost(const APInt &Imm, Type *Ty,
+                    TTI::TargetCostKind CostKind) const {
     if (Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
       return TTI::TCC_Free;
 

diff  --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
index 4cf3282b108f1..94ebe07d3783b 100644
--- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.cpp
@@ -15,8 +15,8 @@
 
 using namespace llvm;
 
-bool DirectXTTIImpl::isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
-                                                        unsigned ScalarOpdIdx) {
+bool DirectXTTIImpl::isTargetIntrinsicWithScalarOpAtArg(
+    Intrinsic::ID ID, unsigned ScalarOpdIdx) const {
   switch (ID) {
   case Intrinsic::dx_wave_readlane:
     return ScalarOpdIdx == 1;
@@ -26,7 +26,7 @@ bool DirectXTTIImpl::isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
 }
 
 bool DirectXTTIImpl::isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
-                                                            int OpdIdx) {
+                                                            int OpdIdx) const {
   switch (ID) {
   case Intrinsic::dx_asdouble:
     return OpdIdx == 0;

diff  --git a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.h b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.h
index 992d0483de93e..170274c7aa32d 100644
--- a/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.h
+++ b/llvm/lib/Target/DirectX/DirectXTargetTransformInfo.h
@@ -36,8 +36,9 @@ class DirectXTTIImpl : public BasicTTIImplBase<DirectXTTIImpl> {
   unsigned getMinVectorRegisterBitWidth() const { return 32; }
   bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const;
   bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
-                                          unsigned ScalarOpdIdx);
-  bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx);
+                                          unsigned ScalarOpdIdx) const;
+  bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
+                                              int OpdIdx) const;
 };
 } // namespace llvm
 

diff  --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
index 0c2deec0a0dab..7825024f9ac0f 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
@@ -78,14 +78,14 @@ HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
 }
 
 // The Hexagon target can unroll loops with run-time trip counts.
-void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
-                                             TTI::UnrollingPreferences &UP,
-                                             OptimizationRemarkEmitter *ORE) {
+void HexagonTTIImpl::getUnrollingPreferences(
+    Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP,
+    OptimizationRemarkEmitter *ORE) const {
   UP.Runtime = UP.Partial = true;
 }
 
 void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                           TTI::PeelingPreferences &PP) {
+                                           TTI::PeelingPreferences &PP) const {
   BaseT::getPeelingPreferences(L, SE, PP);
   // Only try to peel innermost loops with small runtime trip counts.
   if (L && L->isInnermost() && canPeel(L) &&
@@ -110,7 +110,7 @@ unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
   return 32;
 }
 
-unsigned HexagonTTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+unsigned HexagonTTIImpl::getMaxInterleaveFactor(ElementCount VF) const {
   return useHVX() ? 2 : 1;
 }
 
@@ -157,7 +157,7 @@ HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
 
 InstructionCost HexagonTTIImpl::getAddressComputationCost(Type *Tp,
                                                           ScalarEvolution *SE,
-                                                          const SCEV *S) {
+                                                          const SCEV *S) const {
   return 0;
 }
 
@@ -245,7 +245,7 @@ InstructionCost HexagonTTIImpl::getGatherScatterOpCost(
 InstructionCost HexagonTTIImpl::getInterleavedMemoryOpCost(
     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-    bool UseMaskForCond, bool UseMaskForGaps) {
+    bool UseMaskForCond, bool UseMaskForGaps) const {
   if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
                                              Alignment, AddressSpace,
@@ -340,14 +340,14 @@ InstructionCost HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
 }
 
 bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/,
-                                        unsigned /*AddressSpace*/) {
+                                        unsigned /*AddressSpace*/) const {
   // This function is called from scalarize-masked-mem-intrin, which runs
   // in pre-isel. Use ST directly instead of calling isHVXVectorType.
   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
 }
 
 bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/,
-                                       unsigned /*AddressSpace*/) {
+                                       unsigned /*AddressSpace*/) const {
   // This function is called from scalarize-masked-mem-intrin, which runs
   // in pre-isel. Use ST directly instead of calling isHVXVectorType.
   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
@@ -366,7 +366,7 @@ unsigned HexagonTTIImpl::getCacheLineSize() const {
 InstructionCost
 HexagonTTIImpl::getInstructionCost(const User *U,
                                    ArrayRef<const Value *> Operands,
-                                   TTI::TargetCostKind CostKind) {
+                                   TTI::TargetCostKind CostKind) const {
   auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
     if (!CI->isIntegerCast())
       return false;

diff  --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
index a3820b71b1e4c..9d0c3843e2a7a 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
@@ -63,10 +63,10 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
   // The Hexagon target can unroll loops with run-time trip counts.
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
 
   /// Bias LSR towards creating post-increment opportunities.
   TTI::AddressingModeKind
@@ -82,7 +82,7 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
   /// @{
 
   unsigned getNumberOfRegisters(bool vector) const;
-  unsigned getMaxInterleaveFactor(ElementCount VF);
+  unsigned getMaxInterleaveFactor(ElementCount VF) const;
   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
   unsigned getMinVectorRegisterBitWidth() const;
   ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const;
@@ -91,17 +91,13 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
   shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const {
     return true;
   }
-  bool supportsEfficientVectorElementLoadStore() { return false; }
-  bool hasBranchDivergence(const Function *F = nullptr) { return false; }
-  bool enableAggressiveInterleaving(bool LoopHasReductions) {
+  bool supportsEfficientVectorElementLoadStore() const { return false; }
+  bool hasBranchDivergence(const Function *F = nullptr) const { return false; }
+  bool enableAggressiveInterleaving(bool LoopHasReductions) const {
     return false;
   }
-  bool prefersVectorizedAddressing() {
-    return false;
-  }
-  bool enableInterleavedAccessVectorization() {
-    return true;
-  }
+  bool prefersVectorizedAddressing() const { return false; }
+  bool enableInterleavedAccessVectorization() const { return true; }
 
   InstructionCost getCallInstrCost(Function *F, Type *RetTy,
                                    ArrayRef<Type *> Tys,
@@ -109,7 +105,7 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
   InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                         TTI::TargetCostKind CostKind) const;
   InstructionCost getAddressComputationCost(Type *Tp, ScalarEvolution *SE,
-                                            const SCEV *S);
+                                            const SCEV *S) const;
   InstructionCost getMemoryOpCost(
       unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
       TTI::TargetCostKind CostKind,
@@ -131,7 +127,7 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
   InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-      bool UseMaskForCond = false, bool UseMaskForGaps = false);
+      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
   InstructionCost getCmpSelInstrCost(
       unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
       TTI::TargetCostKind CostKind,
@@ -160,15 +156,15 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
   }
 
   bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned AddressSpace);
+                          unsigned AddressSpace) const;
   bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned AddressSpace);
+                         unsigned AddressSpace) const;
 
   /// @}
 
   InstructionCost getInstructionCost(const User *U,
                                      ArrayRef<const Value *> Operands,
-                                     TTI::TargetCostKind CostKind);
+                                     TTI::TargetCostKind CostKind) const;
 
   // Hexagon specific decision to generate a lookup table.
   bool shouldBuildLookupTables() const;

diff  --git a/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h b/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h
index 14759e7ca8f4a..ad7e561426395 100644
--- a/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h
+++ b/llvm/lib/Target/Lanai/LanaiTargetTransformInfo.h
@@ -43,14 +43,15 @@ class LanaiTTIImpl : public BasicTTIImplBase<LanaiTTIImpl> {
 
   bool shouldBuildLookupTables() const { return false; }
 
-  TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth) {
+  TargetTransformInfo::PopcntSupportKind
+  getPopcntSupport(unsigned TyWidth) const {
     if (TyWidth == 32)
       return TTI::PSK_FastHardware;
     return TTI::PSK_Software;
   }
 
   InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
-                                TTI::TargetCostKind CostKind) {
+                                TTI::TargetCostKind CostKind) const {
     assert(Ty->isIntegerTy());
     unsigned BitSize = Ty->getPrimitiveSizeInBits();
     // There is no cost model for constants with a bit size of 0. Return
@@ -80,13 +81,13 @@ class LanaiTTIImpl : public BasicTTIImplBase<LanaiTTIImpl> {
   InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
                                     TTI::TargetCostKind CostKind,
-                                    Instruction *Inst = nullptr) {
+                                    Instruction *Inst = nullptr) const {
     return getIntImmCost(Imm, Ty, CostKind);
   }
 
   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                       const APInt &Imm, Type *Ty,
-                                      TTI::TargetCostKind CostKind) {
+                                      TTI::TargetCostKind CostKind) const {
     return getIntImmCost(Imm, Ty, CostKind);
   }
 

diff  --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
index cbc9c3f3beca0..ede5477f04bda 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
@@ -67,7 +67,7 @@ unsigned LoongArchTTIImpl::getRegisterClassForType(bool Vector,
   return LoongArchRegisterClass::GPRRC;
 }
 
-unsigned LoongArchTTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+unsigned LoongArchTTIImpl::getMaxInterleaveFactor(ElementCount VF) const {
   return ST->getMaxInterleaveFactor();
 }
 
@@ -84,7 +84,7 @@ const char *LoongArchTTIImpl::getRegisterClassName(unsigned ClassID) const {
 }
 
 TargetTransformInfo::PopcntSupportKind
-LoongArchTTIImpl::getPopcntSupport(unsigned TyWidth) {
+LoongArchTTIImpl::getPopcntSupport(unsigned TyWidth) const {
   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
   return ST->hasExtLSX() ? TTI::PSK_FastHardware : TTI::PSK_Software;
 }

diff  --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
index b3edf131c584c..26d59c3e53a3e 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
@@ -43,9 +43,9 @@ class LoongArchTTIImpl : public BasicTTIImplBase<LoongArchTTIImpl> {
   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
   unsigned getNumberOfRegisters(unsigned ClassID) const;
   unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
-  unsigned getMaxInterleaveFactor(ElementCount VF);
+  unsigned getMaxInterleaveFactor(ElementCount VF) const;
   const char *getRegisterClassName(unsigned ClassID) const;
-  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
 
   unsigned getCacheLineSize() const override;
   unsigned getPrefetchDistance() const override;

diff  --git a/llvm/lib/Target/Mips/MipsTargetTransformInfo.cpp b/llvm/lib/Target/Mips/MipsTargetTransformInfo.cpp
index 00dee75e70663..e861e43c56c75 100644
--- a/llvm/lib/Target/Mips/MipsTargetTransformInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsTargetTransformInfo.cpp
@@ -10,14 +10,14 @@
 
 using namespace llvm;
 
-bool MipsTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
+bool MipsTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) const {
   EVT VT = TLI->getValueType(DL, DataType);
   return TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
                                        VT);
 }
 
 bool MipsTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                                const TargetTransformInfo::LSRCost &C2) {
+                                const TargetTransformInfo::LSRCost &C2) const {
   // MIPS specific here are "instruction number 1st priority".
   // If we need to emit adds inside the loop to add up base registers, then
   // we need at least one extra temporary register.

diff  --git a/llvm/lib/Target/Mips/MipsTargetTransformInfo.h b/llvm/lib/Target/Mips/MipsTargetTransformInfo.h
index f817a386e600e..cf3f873fd0cb0 100644
--- a/llvm/lib/Target/Mips/MipsTargetTransformInfo.h
+++ b/llvm/lib/Target/Mips/MipsTargetTransformInfo.h
@@ -32,10 +32,10 @@ class MipsTTIImpl : public BasicTTIImplBase<MipsTTIImpl> {
       : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
         TLI(ST->getTargetLowering()) {}
 
-  bool hasDivRemOp(Type *DataType, bool IsSigned);
+  bool hasDivRemOp(Type *DataType, bool IsSigned) const;
 
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                     const TargetTransformInfo::LSRCost &C2);
+                     const TargetTransformInfo::LSRCost &C2) const;
 };
 
 } // end namespace llvm

diff  --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index b5f7d90cd29d9..5e5362b0461d0 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -74,7 +74,7 @@ static bool isNVVMAtomic(const IntrinsicInst *II) {
   }
 }
 
-bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) {
+bool NVPTXTTIImpl::isSourceOfDivergence(const Value *V) const {
   // Without inter-procedural analysis, we conservatively assume that arguments
   // to __device__ functions are divergent.
   if (const Argument *Arg = dyn_cast<Argument>(V))
@@ -483,7 +483,7 @@ NVPTXTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
 InstructionCost
 NVPTXTTIImpl::getInstructionCost(const User *U,
                                  ArrayRef<const Value *> Operands,
-                                 TTI::TargetCostKind CostKind) {
+                                 TTI::TargetCostKind CostKind) const {
   if (const auto *CI = dyn_cast<CallInst>(U))
     if (const auto *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
       // Without this implementation getCallCost() would return the number
@@ -538,9 +538,9 @@ InstructionCost NVPTXTTIImpl::getArithmeticInstrCost(
   }
 }
 
-void NVPTXTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
-                                           TTI::UnrollingPreferences &UP,
-                                           OptimizationRemarkEmitter *ORE) {
+void NVPTXTTIImpl::getUnrollingPreferences(
+    Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP,
+    OptimizationRemarkEmitter *ORE) const {
   BaseT::getUnrollingPreferences(L, SE, UP, ORE);
 
   // Enable partial unrolling and runtime unrolling, but reduce the
@@ -552,7 +552,7 @@ void NVPTXTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
 }
 
 void NVPTXTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                         TTI::PeelingPreferences &PP) {
+                                         TTI::PeelingPreferences &PP) const {
   BaseT::getPeelingPreferences(L, SE, PP);
 }
 

diff  --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index 9fd5c17f58959..ecd45a01ffbf2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -42,9 +42,9 @@ class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
       : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl()),
         TLI(ST->getTargetLowering()) {}
 
-  bool hasBranchDivergence(const Function *F = nullptr) { return true; }
+  bool hasBranchDivergence(const Function *F = nullptr) const { return true; }
 
-  bool isSourceOfDivergence(const Value *V);
+  bool isSourceOfDivergence(const Value *V) const;
 
   unsigned getFlatAddressSpace() const {
     return AddressSpace::ADDRESS_SPACE_GENERIC;
@@ -97,7 +97,7 @@ class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
 
   InstructionCost getInstructionCost(const User *U,
                                      ArrayRef<const Value *> Operands,
-                                     TTI::TargetCostKind CostKind);
+                                     TTI::TargetCostKind CostKind) const;
 
   InstructionCost getArithmeticInstrCost(
       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
@@ -144,12 +144,12 @@ class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
 
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
 
-  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) {
+  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const {
     // Volatile loads/stores are only supported for shared and global address
     // spaces, or for generic AS that maps to them.
     if (!(AddrSpace == llvm::ADDRESS_SPACE_GENERIC ||

diff  --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 77149045d6b72..762e2d4d28d27 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -53,7 +53,7 @@ SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
 //===----------------------------------------------------------------------===//
 
 TargetTransformInfo::PopcntSupportKind
-PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
+PPCTTIImpl::getPopcntSupport(unsigned TyWidth) const {
   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
   if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
     return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
@@ -190,9 +190,10 @@ InstructionCost PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
   return 4 * TTI::TCC_Basic;
 }
 
-InstructionCost PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
-                                                const APInt &Imm, Type *Ty,
-                                                TTI::TargetCostKind CostKind) {
+InstructionCost
+PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
+                                const APInt &Imm, Type *Ty,
+                                TTI::TargetCostKind CostKind) const {
   if (DisablePPCConstHoist)
     return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
 
@@ -228,7 +229,7 @@ InstructionCost PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
 InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                               const APInt &Imm, Type *Ty,
                                               TTI::TargetCostKind CostKind,
-                                              Instruction *Inst) {
+                                              Instruction *Inst) const {
   if (DisablePPCConstHoist)
     return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
 
@@ -318,9 +319,9 @@ static bool isMMAType(Type *Ty) {
          (Ty->getPrimitiveSizeInBits() > 128);
 }
 
-InstructionCost PPCTTIImpl::getInstructionCost(const User *U,
-                                               ArrayRef<const Value *> Operands,
-                                               TTI::TargetCostKind CostKind) {
+InstructionCost
+PPCTTIImpl::getInstructionCost(const User *U, ArrayRef<const Value *> Operands,
+                               TTI::TargetCostKind CostKind) const {
   // We already implement getCastInstrCost and getMemoryOpCost where we perform
   // the vector adjustment there.
   if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
@@ -338,7 +339,7 @@ InstructionCost PPCTTIImpl::getInstructionCost(const User *U,
 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
                                           AssumptionCache &AC,
                                           TargetLibraryInfo *LibInfo,
-                                          HardwareLoopInfo &HWLoopInfo) {
+                                          HardwareLoopInfo &HWLoopInfo) const {
   const PPCTargetMachine &TM = ST->getTargetMachine();
   TargetSchedModel SchedModel;
   SchedModel.init(ST);
@@ -397,7 +398,7 @@ bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
 
 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                          TTI::UnrollingPreferences &UP,
-                                         OptimizationRemarkEmitter *ORE) {
+                                         OptimizationRemarkEmitter *ORE) const {
   if (ST->getCPUDirective() == PPC::DIR_A2) {
     // The A2 is in-order with a deep pipeline, and concatenation unrolling
     // helps expose latency-hiding opportunities to the instruction scheduler.
@@ -412,18 +413,18 @@ void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
 }
 
 void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                       TTI::PeelingPreferences &PP) {
+                                       TTI::PeelingPreferences &PP) const {
   BaseT::getPeelingPreferences(L, SE, PP);
 }
 // This function returns true to allow using coldcc calling convention.
 // Returning true results in coldcc being used for functions which are cold at
 // all call sites when the callers of the functions are not calling any other
 // non coldcc functions.
-bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
+bool PPCTTIImpl::useColdCCForColdCall(Function &F) const {
   return EnablePPCColdCC;
 }
 
-bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
+bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) const {
   // On the A2, always unroll aggressively.
   if (ST->getCPUDirective() == PPC::DIR_A2)
     return true;
@@ -439,9 +440,7 @@ PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
   return Options;
 }
 
-bool PPCTTIImpl::enableInterleavedAccessVectorization() {
-  return true;
-}
+bool PPCTTIImpl::enableInterleavedAccessVectorization() const { return true; }
 
 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
   assert(ClassID == GPRRC || ClassID == FPRRC ||
@@ -512,7 +511,7 @@ unsigned PPCTTIImpl::getPrefetchDistance() const {
   return 300;
 }
 
-unsigned PPCTTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+unsigned PPCTTIImpl::getMaxInterleaveFactor(ElementCount VF) const {
   unsigned Directive = ST->getCPUDirective();
   // The 440 has no SIMD support, but floating-point instructions
   // have a 5-cycle latency, so unroll by 5x for latency hiding.
@@ -857,7 +856,7 @@ InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
 InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost(
     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-    bool UseMaskForCond, bool UseMaskForGaps) {
+    bool UseMaskForCond, bool UseMaskForGaps) const {
   InstructionCost CostFactor =
       vectorCostAdjustmentFactor(Opcode, VecTy, nullptr);
   if (!CostFactor.isValid())
@@ -929,7 +928,8 @@ bool PPCTTIImpl::areTypesABICompatible(const Function *Caller,
 
 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
                             LoopInfo *LI, DominatorTree *DT,
-                            AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
+                            AssumptionCache *AC,
+                            TargetLibraryInfo *LibInfo) const {
   // Process nested loops first.
   for (Loop *I : *L)
     if (canSaveCmp(I, BI, SE, LI, DT, AC, LibInfo))
@@ -951,7 +951,7 @@ bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
 }
 
 bool PPCTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                               const TargetTransformInfo::LSRCost &C2) {
+                               const TargetTransformInfo::LSRCost &C2) const {
   // PowerPC default behaviour here is "instruction number 1st priority".
   // If LsrNoInsnsCost is set, call default implementation.
   if (!LsrNoInsnsCost)
@@ -962,9 +962,7 @@ bool PPCTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
   return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
 }
 
-bool PPCTTIImpl::isNumRegsMajorCostOfLSR() {
-  return false;
-}
+bool PPCTTIImpl::isNumRegsMajorCostOfLSR() const { return false; }
 
 bool PPCTTIImpl::shouldBuildRelLookupTables() const {
   const PPCTargetMachine &TM = ST->getTargetMachine();
@@ -975,7 +973,7 @@ bool PPCTTIImpl::shouldBuildRelLookupTables() const {
 }
 
 bool PPCTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
-                                    MemIntrinsicInfo &Info) {
+                                    MemIntrinsicInfo &Info) const {
   switch (Inst->getIntrinsicID()) {
   case Intrinsic::ppc_altivec_lvx:
   case Intrinsic::ppc_altivec_lvxl:
@@ -1059,7 +1057,7 @@ InstructionCost PPCTTIImpl::getVPMemoryOpCost(unsigned Opcode, Type *Src,
                                               Align Alignment,
                                               unsigned AddressSpace,
                                               TTI::TargetCostKind CostKind,
-                                              const Instruction *I) {
+                                              const Instruction *I) const {
   InstructionCost Cost = BaseT::getVPMemoryOpCost(Opcode, Src, Alignment,
                                                   AddressSpace, CostKind, I);
   if (TLI->getValueType(DL, Src, true) == MVT::Other)

diff  --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
index f81082d2d7029..9e4c40bb39295 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
@@ -53,42 +53,41 @@ class PPCTTIImpl : public BasicTTIImplBase<PPCTTIImpl> {
   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
                                     TTI::TargetCostKind CostKind,
-                                    Instruction *Inst = nullptr);
+                                    Instruction *Inst = nullptr) const;
   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                       const APInt &Imm, Type *Ty,
-                                      TTI::TargetCostKind CostKind);
+                                      TTI::TargetCostKind CostKind) const;
 
   InstructionCost getInstructionCost(const User *U,
                                      ArrayRef<const Value *> Operands,
-                                     TTI::TargetCostKind CostKind);
+                                     TTI::TargetCostKind CostKind) const;
 
-  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
   bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
-                                AssumptionCache &AC,
-                                TargetLibraryInfo *LibInfo,
-                                HardwareLoopInfo &HWLoopInfo);
+                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
+                                HardwareLoopInfo &HWLoopInfo) const;
   bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
                   DominatorTree *DT, AssumptionCache *AC,
-                  TargetLibraryInfo *LibInfo);
-  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info);
+                  TargetLibraryInfo *LibInfo) const;
+  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                     const TargetTransformInfo::LSRCost &C2);
-  bool isNumRegsMajorCostOfLSR();
+                     const TargetTransformInfo::LSRCost &C2) const;
+  bool isNumRegsMajorCostOfLSR() const;
   bool shouldBuildRelLookupTables() const;
   /// @}
 
   /// \name Vector TTI Implementations
   /// @{
-  bool useColdCCForColdCall(Function &F);
-  bool enableAggressiveInterleaving(bool LoopHasReductions);
+  bool useColdCCForColdCall(Function &F) const;
+  bool enableAggressiveInterleaving(bool LoopHasReductions) const;
   TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
                                                     bool IsZeroCmp) const;
-  bool enableInterleavedAccessVectorization();
+  bool enableInterleavedAccessVectorization() const;
 
   enum PPCRegisterClass {
     GPRRC, FPRRC, VRRC, VSXRC
@@ -99,7 +98,7 @@ class PPCTTIImpl : public BasicTTIImplBase<PPCTTIImpl> {
   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
   unsigned getCacheLineSize() const override;
   unsigned getPrefetchDistance() const override;
-  unsigned getMaxInterleaveFactor(ElementCount VF);
+  unsigned getMaxInterleaveFactor(ElementCount VF) const;
   InstructionCost vectorCostAdjustmentFactor(unsigned Opcode, Type *Ty1,
                                              Type *Ty2) const;
   InstructionCost getArithmeticInstrCost(
@@ -138,7 +137,7 @@ class PPCTTIImpl : public BasicTTIImplBase<PPCTTIImpl> {
   InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-      bool UseMaskForCond = false, bool UseMaskForGaps = false);
+      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
   InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                         TTI::TargetCostKind CostKind) const;
   bool areInlineCompatible(const Function *Caller,
@@ -150,7 +149,7 @@ class PPCTTIImpl : public BasicTTIImplBase<PPCTTIImpl> {
   InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                                     unsigned AddressSpace,
                                     TTI::TargetCostKind CostKind,
-                                    const Instruction *I = nullptr);
+                                    const Instruction *I = nullptr) const;
   bool supportsTailCallFor(const CallBase *CB) const;
 
 private:

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 6bedc43c92c0f..a6a14837bf473 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -133,8 +133,9 @@ static InstructionCost getIntImmCostImpl(const DataLayout &DL,
                                     /*CompressionCost=*/false, FreeZeroes);
 }
 
-InstructionCost RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
-                                            TTI::TargetCostKind CostKind) {
+InstructionCost
+RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
+                            TTI::TargetCostKind CostKind) const {
   return getIntImmCostImpl(getDataLayout(), getST(), Imm, Ty, CostKind, false);
 }
 
@@ -168,7 +169,7 @@ static bool canUseShiftPair(Instruction *Inst, const APInt &Imm) {
 InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                                 const APInt &Imm, Type *Ty,
                                                 TTI::TargetCostKind CostKind,
-                                                Instruction *Inst) {
+                                                Instruction *Inst) const {
   assert(Ty->isIntegerTy() &&
          "getIntImmCost can only estimate cost of materialising integers");
 
@@ -276,7 +277,7 @@ InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
 InstructionCost
 RISCVTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                   const APInt &Imm, Type *Ty,
-                                  TTI::TargetCostKind CostKind) {
+                                  TTI::TargetCostKind CostKind) const {
   // Prevent hoisting in unknown cases.
   return TTI::TCC_Free;
 }
@@ -286,7 +287,7 @@ bool RISCVTTIImpl::hasActiveVectorLength(unsigned, Type *DataTy, Align) const {
 }
 
 TargetTransformInfo::PopcntSupportKind
-RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) {
+RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) const {
   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
   return ST->hasStdExtZbb() || (ST->hasVendorXCVbitmanip() && !ST->is64Bit())
              ? TTI::PSK_FastHardware
@@ -888,7 +889,7 @@ RISCVTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
 InstructionCost RISCVTTIImpl::getInterleavedMemoryOpCost(
     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-    bool UseMaskForCond, bool UseMaskForGaps) {
+    bool UseMaskForCond, bool UseMaskForGaps) const {
 
   // The interleaved memory access pass will lower interleaved memory ops (i.e
   // a load and store followed by a specific shuffle) to vlseg/vsseg
@@ -1069,7 +1070,7 @@ InstructionCost RISCVTTIImpl::getStridedMemoryOpCost(
 }
 
 InstructionCost
-RISCVTTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
+RISCVTTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
   // FIXME: This is a property of the default vector convention, not
   // all possible calling conventions.  Fixing that will require
   // some TTI API and SLP rework.
@@ -1899,7 +1900,7 @@ RISCVTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
 
 InstructionCost RISCVTTIImpl::getExtendedReductionCost(
     unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy,
-    std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) {
+    std::optional<FastMathFlags> FMF, TTI::TargetCostKind CostKind) const {
   if (isa<FixedVectorType>(ValTy) && !ST->useRVVForFixedLengthVectors())
     return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
                                            FMF, CostKind);
@@ -2444,7 +2445,7 @@ InstructionCost RISCVTTIImpl::getArithmeticInstrCost(
 InstructionCost RISCVTTIImpl::getPointersChainCost(
     ArrayRef<const Value *> Ptrs, const Value *Base,
     const TTI::PointersChainInfo &Info, Type *AccessTy,
-    TTI::TargetCostKind CostKind) {
+    TTI::TargetCostKind CostKind) const {
   InstructionCost Cost = TTI::TCC_Free;
   // In the basic model we take into account GEP instructions only
   // (although here can come alloca instruction, a value, constants and/or
@@ -2488,9 +2489,9 @@ InstructionCost RISCVTTIImpl::getPointersChainCost(
   return Cost;
 }
 
-void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
-                                           TTI::UnrollingPreferences &UP,
-                                           OptimizationRemarkEmitter *ORE) {
+void RISCVTTIImpl::getUnrollingPreferences(
+    Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP,
+    OptimizationRemarkEmitter *ORE) const {
   // TODO: More tuning on benchmarks and metrics with changes as needed
   //       would apply to all settings below to enable performance.
 
@@ -2566,7 +2567,7 @@ void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
 }
 
 void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                         TTI::PeelingPreferences &PP) {
+                                         TTI::PeelingPreferences &PP) const {
   BaseT::getPeelingPreferences(L, SE, PP);
 }
 
@@ -2620,7 +2621,7 @@ RISCVTTIImpl::getPreferredAddressingMode(const Loop *L,
 }
 
 bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                                 const TargetTransformInfo::LSRCost &C2) {
+                                 const TargetTransformInfo::LSRCost &C2) const {
   // RISC-V specific here are "instruction number 1st priority".
   // If we need to emit adds inside the loop to add up base registers, then
   // we need at least one extra temporary register.
@@ -2669,7 +2670,7 @@ bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy,
 /// sext instructions that sign extended the same initial value. A getelementptr
 /// is considered as "complex" if it has more than 2 operands.
 bool RISCVTTIImpl::shouldConsiderAddressTypePromotion(
-    const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
+    const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
   bool Considerable = false;
   AllowPromotionWithoutCommonHeader = false;
   if (!isa<SExtInst>(&I))

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index b72ffc2448313..21f00e80608b1 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -79,14 +79,14 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
                                   TTI::TargetCostKind CostKind) const;
 
   InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
-                                TTI::TargetCostKind CostKind);
+                                TTI::TargetCostKind CostKind) const;
   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
                                     TTI::TargetCostKind CostKind,
-                                    Instruction *Inst = nullptr);
+                                    Instruction *Inst = nullptr) const;
   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                       const APInt &Imm, Type *Ty,
-                                      TTI::TargetCostKind CostKind);
+                                      TTI::TargetCostKind CostKind) const;
 
   /// \name EVL Support for predicated vectorization.
   /// Whether the target supports the %evl parameter of VP intrinsic efficiently
@@ -104,7 +104,8 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
   bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
                              Align Alignment) const;
 
-  TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+  TargetTransformInfo::PopcntSupportKind
+  getPopcntSupport(unsigned TyWidth) const;
 
   bool shouldExpandReduction(const IntrinsicInst *II) const;
   bool supportsScalableVectors() const { return ST->hasVInstructions(); }
@@ -141,14 +142,14 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
                                        const Value *Base,
                                        const TTI::PointersChainInfo &Info,
                                        Type *AccessTy,
-                                       TTI::TargetCostKind CostKind);
+                                       TTI::TargetCostKind CostKind) const;
 
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
 
   unsigned getMinVectorRegisterBitWidth() const {
     return ST->useRVVForFixedLengthVectors() ? 16 : 0;
@@ -173,7 +174,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
   InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-      bool UseMaskForCond = false, bool UseMaskForGaps = false);
+      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
 
   InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
                                          const Value *Ptr, bool VariableMask,
@@ -192,7 +193,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
                                          TTI::TargetCostKind CostKind,
                                          const Instruction *I) const;
 
-  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);
+  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
 
   InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
                                    TTI::CastContextHint CCH,
@@ -211,7 +212,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
   InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
                                            Type *ResTy, VectorType *ValTy,
                                            std::optional<FastMathFlags> FMF,
-                                           TTI::TargetCostKind CostKind);
+                                           TTI::TargetCostKind CostKind) const;
 
   InstructionCost getMemoryOpCost(
       unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
@@ -264,11 +265,11 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
   }
 
   bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned /*AddressSpace*/) {
+                         unsigned /*AddressSpace*/) const {
     return isLegalMaskedLoadStore(DataType, Alignment);
   }
   bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned /*AddressSpace*/) {
+                          unsigned /*AddressSpace*/) const {
     return isLegalMaskedLoadStore(DataType, Alignment);
   }
 
@@ -302,12 +303,12 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
     return isLegalMaskedGatherScatter(DataType, Alignment);
   }
 
-  bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
+  bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const {
     // Scalarize masked gather for RV64 if EEW=64 indices aren't supported.
     return ST->is64Bit() && !ST->hasVInstructionsI64();
   }
 
-  bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) {
+  bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const {
     // Scalarize masked scatter for RV64 if EEW=64 indices aren't supported.
     return ST->is64Bit() && !ST->hasVInstructionsI64();
   }
@@ -318,7 +319,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
   }
 
   bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor,
-                                    Align Alignment, unsigned AddrSpace) {
+                                    Align Alignment, unsigned AddrSpace) const {
     return TLI->isLegalInterleavedAccessType(VTy, Factor, Alignment, AddrSpace,
                                              DL);
   }
@@ -380,7 +381,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
     }
   }
 
-  unsigned getMaxInterleaveFactor(ElementCount VF) {
+  unsigned getMaxInterleaveFactor(ElementCount VF) const {
     // Don't interleave if the loop has been vectorized with scalable vectors.
     if (VF.isScalable())
       return 1;
@@ -389,7 +390,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
     return VF.isScalar() ? 1 : ST->getMaxInterleaveFactor();
   }
 
-  bool enableInterleavedAccessVectorization() { return true; }
+  bool enableInterleavedAccessVectorization() const { return true; }
 
   unsigned getMinTripCountTailFoldingThreshold() const;
 
@@ -446,11 +447,10 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
   }
 
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                     const TargetTransformInfo::LSRCost &C2);
+                     const TargetTransformInfo::LSRCost &C2) const;
 
-  bool
-  shouldConsiderAddressTypePromotion(const Instruction &I,
-                                     bool &AllowPromotionWithoutCommonHeader);
+  bool shouldConsiderAddressTypePromotion(
+      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
   std::optional<unsigned> getMinPageSize() const { return 4096; }
   /// Return true if the (vector) instruction I will be lowered to an
   /// instruction with a scalar splat operand for the given Operand number.

diff  --git a/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h b/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
index 24047f31fab29..5c919c2dd4024 100644
--- a/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
+++ b/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
@@ -39,7 +39,7 @@ class SPIRVTTIImpl : public BasicTTIImplBase<SPIRVTTIImpl> {
       : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
         TLI(ST->getTargetLowering()) {}
 
-  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) {
+  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const {
     // SPIR-V natively supports OpBitcount, per 3.53.14 in the spec, as such it
     // is reasonable to assume the Op is fast / preferable to the expanded loop.
     // Furthermore, this prevents information being lost if transforms are

diff  --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
index 3811657aed83e..bfc50a02558cf 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
@@ -136,8 +136,9 @@ unsigned SystemZTTIImpl::adjustInliningThreshold(const CallBase *CB) const {
   return Bonus;
 }
 
-InstructionCost SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
-                                              TTI::TargetCostKind CostKind) {
+InstructionCost
+SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
+                              TTI::TargetCostKind CostKind) const {
   assert(Ty->isIntegerTy());
 
   unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -173,7 +174,7 @@ InstructionCost SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
 InstructionCost SystemZTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                                   const APInt &Imm, Type *Ty,
                                                   TTI::TargetCostKind CostKind,
-                                                  Instruction *Inst) {
+                                                  Instruction *Inst) const {
   assert(Ty->isIntegerTy());
 
   unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -293,7 +294,7 @@ InstructionCost SystemZTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
 InstructionCost
 SystemZTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
-                                    TTI::TargetCostKind CostKind) {
+                                    TTI::TargetCostKind CostKind) const {
   assert(Ty->isIntegerTy());
 
   unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -342,16 +343,16 @@ SystemZTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
 }
 
 TargetTransformInfo::PopcntSupportKind
-SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
+SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) const {
   assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
   if (ST->hasPopulationCount() && TyWidth <= 64)
     return TTI::PSK_FastHardware;
   return TTI::PSK_Software;
 }
 
-void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
-                                             TTI::UnrollingPreferences &UP,
-                                             OptimizationRemarkEmitter *ORE) {
+void SystemZTTIImpl::getUnrollingPreferences(
+    Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP,
+    OptimizationRemarkEmitter *ORE) const {
   // Find out if L contains a call, what the machine instruction count
   // estimate is, and how many stores there are.
   bool HasCall = false;
@@ -406,12 +407,13 @@ void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
 }
 
 void SystemZTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                                           TTI::PeelingPreferences &PP) {
+                                           TTI::PeelingPreferences &PP) const {
   BaseT::getPeelingPreferences(L, SE, PP);
 }
 
-bool SystemZTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                                   const TargetTransformInfo::LSRCost &C2) {
+bool SystemZTTIImpl::isLSRCostLess(
+    const TargetTransformInfo::LSRCost &C1,
+    const TargetTransformInfo::LSRCost &C2) const {
   // SystemZ specific: check instruction count (first), and don't care about
   // ImmCost, since offsets are checked explicitly.
   return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
@@ -478,7 +480,7 @@ unsigned SystemZTTIImpl::getMinPrefetchStride(unsigned NumMemAccesses,
   return ST->hasMiscellaneousExtensions3() ? 8192 : 2048;
 }
 
-bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
+bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) const {
   EVT VT = TLI->getValueType(DL, DataType);
   return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
 }
@@ -1375,7 +1377,7 @@ InstructionCost SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
 InstructionCost SystemZTTIImpl::getInterleavedMemoryOpCost(
     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-    bool UseMaskForCond, bool UseMaskForGaps) {
+    bool UseMaskForCond, bool UseMaskForGaps) const {
   if (UseMaskForCond || UseMaskForGaps)
     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
                                              Alignment, AddressSpace, CostKind,

diff  --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
index 16a750d561f56..99faab7194892 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
@@ -43,27 +43,27 @@ class SystemZTTIImpl : public BasicTTIImplBase<SystemZTTIImpl> {
   unsigned adjustInliningThreshold(const CallBase *CB) const;
 
   InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
-                                TTI::TargetCostKind CostKind);
+                                TTI::TargetCostKind CostKind) const;
 
   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
                                     TTI::TargetCostKind CostKind,
-                                    Instruction *Inst = nullptr);
+                                    Instruction *Inst = nullptr) const;
   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                       const APInt &Imm, Type *Ty,
-                                      TTI::TargetCostKind CostKind);
+                                      TTI::TargetCostKind CostKind) const;
 
-  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
 
   void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                TTI::UnrollingPreferences &UP,
-                               OptimizationRemarkEmitter *ORE);
+                               OptimizationRemarkEmitter *ORE) const;
 
   void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
-                             TTI::PeelingPreferences &PP);
+                             TTI::PeelingPreferences &PP) const;
 
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                     const TargetTransformInfo::LSRCost &C2);
+                     const TargetTransformInfo::LSRCost &C2) const;
 
   bool areInlineCompatible(const Function *Caller,
                            const Function *Callee) const;
@@ -84,16 +84,16 @@ class SystemZTTIImpl : public BasicTTIImplBase<SystemZTTIImpl> {
                                 bool HasCall) const override;
   bool enableWritePrefetching() const override { return true; }
 
-  bool hasDivRemOp(Type *DataType, bool IsSigned);
-  bool prefersVectorizedAddressing() { return false; }
-  bool LSRWithInstrQueries() { return true; }
+  bool hasDivRemOp(Type *DataType, bool IsSigned) const;
+  bool prefersVectorizedAddressing() const { return false; }
+  bool LSRWithInstrQueries() const { return true; }
   InstructionCost getScalarizationOverhead(VectorType *Ty,
                                            const APInt &DemandedElts,
                                            bool Insert, bool Extract,
                                            TTI::TargetCostKind CostKind,
                                            ArrayRef<Value *> VL = {}) const;
-  bool supportsEfficientVectorElementLoadStore() { return true; }
-  bool enableInterleavedAccessVectorization() { return true; }
+  bool supportsEfficientVectorElementLoadStore() const { return true; }
+  bool enableInterleavedAccessVectorization() const { return true; }
 
   InstructionCost getArithmeticInstrCost(
       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
@@ -137,7 +137,7 @@ class SystemZTTIImpl : public BasicTTIImplBase<SystemZTTIImpl> {
   InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-      bool UseMaskForCond = false, bool UseMaskForGaps = false);
+      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
 
   InstructionCost
   getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,

diff  --git a/llvm/lib/Target/VE/VETargetTransformInfo.h b/llvm/lib/Target/VE/VETargetTransformInfo.h
index f0fa01ef22912..88b46ff128383 100644
--- a/llvm/lib/Target/VE/VETargetTransformInfo.h
+++ b/llvm/lib/Target/VE/VETargetTransformInfo.h
@@ -134,17 +134,17 @@ class VETTIImpl : public BasicTTIImplBase<VETTIImpl> {
 
   // Load & Store {
   bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment,
-                         unsigned /*AddressSpace*/) {
+                         unsigned /*AddressSpace*/) const {
     return isVectorLaneType(*getLaneType(DataType));
   }
   bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment,
-                          unsigned /*AddressSpace*/) {
+                          unsigned /*AddressSpace*/) const {
     return isVectorLaneType(*getLaneType(DataType));
   }
-  bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) {
+  bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) const {
     return isVectorLaneType(*getLaneType(DataType));
   };
-  bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) {
+  bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) const {
     return isVectorLaneType(*getLaneType(DataType));
   }
   // } Load & Store

diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
index dd0e672a2a21c..b8b6051ec5f28 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
@@ -57,7 +57,7 @@ class WebAssemblyTTIImpl final : public BasicTTIImplBase<WebAssemblyTTIImpl> {
   /// \name Vector TTI Implementations
   /// @{
 
-  bool enableInterleavedAccessVectorization() { return true; }
+  bool enableInterleavedAccessVectorization() const { return true; }
 
   unsigned getNumberOfRegisters(unsigned ClassID) const;
   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;

diff  --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index 59bcd3b921799..363019986d76c 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -102,7 +102,7 @@ using CostKindTblEntry = CostTblEntryT<CostKindCosts>;
 using TypeConversionCostKindTblEntry = TypeConversionCostTblEntryT<CostKindCosts>;
 
 TargetTransformInfo::PopcntSupportKind
-X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
+X86TTIImpl::getPopcntSupport(unsigned TyWidth) const {
   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
   // TODO: Currently the __builtin_popcount() implementation using SSE3
   //   instructions is inefficient. Once the problem is fixed, we should
@@ -225,7 +225,7 @@ unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
       .getFixedValue();
 }
 
-unsigned X86TTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+unsigned X86TTIImpl::getMaxInterleaveFactor(ElementCount VF) const {
   // If the loop will not be vectorized, don't interleave the loop.
   // Let regular unroll to unroll the loop, which saves the overflow
   // check and memory check cost.
@@ -5420,11 +5420,10 @@ X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
   return Cost + LT.first;
 }
 
-InstructionCost
-X86TTIImpl::getPointersChainCost(ArrayRef<const Value *> Ptrs,
-                                 const Value *Base,
-                                 const TTI::PointersChainInfo &Info,
-                                 Type *AccessTy, TTI::TargetCostKind CostKind) {
+InstructionCost X86TTIImpl::getPointersChainCost(
+    ArrayRef<const Value *> Ptrs, const Value *Base,
+    const TTI::PointersChainInfo &Info, Type *AccessTy,
+    TTI::TargetCostKind CostKind) const {
   if (Info.isSameBase() && Info.isKnownStride()) {
     // If all the pointers have known stride all the 
diff erences are translated
     // into constants. X86 memory addressing allows encoding it into
@@ -5442,7 +5441,7 @@ X86TTIImpl::getPointersChainCost(ArrayRef<const Value *> Ptrs,
 
 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
                                                       ScalarEvolution *SE,
-                                                      const SCEV *Ptr) {
+                                                      const SCEV *Ptr) const {
   // Address computations in vectorized code with non-consecutive addresses will
   // likely result in more instructions compared to scalar code where the
   // computation can more often be merged into the index mode. The resulting
@@ -5900,7 +5899,7 @@ X86TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *ValTy,
 /// Calculate the cost of materializing a 64-bit value. This helper
 /// method might only calculate a fraction of a larger immediate. Therefore it
 /// is valid to return a cost of ZERO.
-InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
+InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) const {
   if (Val == 0)
     return TTI::TCC_Free;
 
@@ -5911,7 +5910,7 @@ InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
 }
 
 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
-                                          TTI::TargetCostKind CostKind) {
+                                          TTI::TargetCostKind CostKind) const {
   assert(Ty->isIntegerTy());
 
   unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -5948,7 +5947,7 @@ InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                               const APInt &Imm, Type *Ty,
                                               TTI::TargetCostKind CostKind,
-                                              Instruction *Inst) {
+                                              Instruction *Inst) const {
   assert(Ty->isIntegerTy());
 
   unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -6050,9 +6049,10 @@ InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
 }
 
-InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
-                                                const APInt &Imm, Type *Ty,
-                                                TTI::TargetCostKind CostKind) {
+InstructionCost
+X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
+                                const APInt &Imm, Type *Ty,
+                                TTI::TargetCostKind CostKind) const {
   assert(Ty->isIntegerTy());
 
   unsigned BitSize = Ty->getPrimitiveSizeInBits();
@@ -6215,17 +6215,15 @@ InstructionCost X86TTIImpl::getGatherScatterOpCost(
 }
 
 bool X86TTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                               const TargetTransformInfo::LSRCost &C2) {
-    // X86 specific here are "instruction number 1st priority".
-    return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
-                    C1.NumIVMuls, C1.NumBaseAdds,
-                    C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
-           std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
-                    C2.NumIVMuls, C2.NumBaseAdds,
-                    C2.ScaleCost, C2.ImmCost, C2.SetupCost);
+                               const TargetTransformInfo::LSRCost &C2) const {
+  // X86 specific here are "instruction number 1st priority".
+  return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls,
+                  C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
+         std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls,
+                  C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
 }
 
-bool X86TTIImpl::canMacroFuseCmp() {
+bool X86TTIImpl::canMacroFuseCmp() const {
   return ST->hasMacroFusion() || ST->hasBranchFusion();
 }
 
@@ -6279,7 +6277,7 @@ bool X86TTIImpl::isLegalMaskedStore(Type *DataTy, Align Alignment,
   return isLegalMaskedLoadStore(ScalarTy, ST);
 }
 
-bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
+bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) const {
   unsigned DataSize = DL.getTypeStoreSize(DataType);
   // The only supported nontemporal loads are for aligned vectors of 16 or 32
   // bytes.  Note that 32-byte nontemporal vector loads are supported by AVX2
@@ -6290,7 +6288,7 @@ bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
   return false;
 }
 
-bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
+bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) const {
   unsigned DataSize = DL.getTypeStoreSize(DataType);
 
   // SSE4A supports nontemporal stores of float and double at arbitrary
@@ -6322,7 +6320,7 @@ bool X86TTIImpl::isLegalBroadcastLoad(Type *ElementTy,
          ElementTy == Type::getDoubleTy(ElementTy->getContext());
 }
 
-bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) {
+bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) const {
   if (!isa<VectorType>(DataTy))
     return false;
 
@@ -6346,7 +6344,8 @@ bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) {
          ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
 }
 
-bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy, Align Alignment) {
+bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy,
+                                            Align Alignment) const {
   return isLegalMaskedExpandLoad(DataTy, Alignment);
 }
 
@@ -6432,12 +6431,12 @@ bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) const {
   return isLegalMaskedGatherScatter(DataType, Alignment);
 }
 
-bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
+bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) const {
   EVT VT = TLI->getValueType(DL, DataType);
   return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
 }
 
-bool X86TTIImpl::isExpensiveToSpeculativelyExecute(const Instruction* I) {
+bool X86TTIImpl::isExpensiveToSpeculativelyExecute(const Instruction *I) const {
   // FDIV is always expensive, even if it has a very low uop count.
   // TODO: Still necessary for recent CPUs with low latency/throughput fdiv?
   if (I->getOpcode() == Instruction::FDiv)
@@ -6446,9 +6445,7 @@ bool X86TTIImpl::isExpensiveToSpeculativelyExecute(const Instruction* I) {
   return BaseT::isExpensiveToSpeculativelyExecute(I);
 }
 
-bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
-  return false;
-}
+bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return false; }
 
 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
                                      const Function *Callee) const {
@@ -6563,7 +6560,7 @@ bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const {
   return false;
 }
 
-bool X86TTIImpl::enableInterleavedAccessVectorization() {
+bool X86TTIImpl::enableInterleavedAccessVectorization() const {
   // TODO: We expect this to be beneficial regardless of arch,
   // but there are currently some unexplained performance artifacts on Atom.
   // As a temporary solution, disable on Atom.
@@ -6577,7 +6574,8 @@ bool X86TTIImpl::enableInterleavedAccessVectorization() {
 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
     unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
     ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
-    TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
+    TTI::TargetCostKind CostKind, bool UseMaskForCond,
+    bool UseMaskForGaps) const {
   // VecTy for interleave memop is <VF*Factor x Elt>.
   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
   // VecTy = <12 x i32>.
@@ -6731,7 +6729,7 @@ InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
     unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices,
     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-    bool UseMaskForCond, bool UseMaskForGaps) {
+    bool UseMaskForCond, bool UseMaskForGaps) const {
   auto *VecTy = cast<FixedVectorType>(BaseTy);
 
   auto isSupportedOnAVX512 = [&](Type *VecTy) {

diff  --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h
index f93b97e94550f..1534301667ff1 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.h
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h
@@ -116,7 +116,7 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
 
   /// \name Scalar TTI Implementations
   /// @{
-  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
+  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
 
   /// @}
 
@@ -135,7 +135,7 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
   bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const;
   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
   unsigned getLoadStoreVecRegBitWidth(unsigned AS) const;
-  unsigned getMaxInterleaveFactor(ElementCount VF);
+  unsigned getMaxInterleaveFactor(ElementCount VF) const;
   InstructionCost getArithmeticInstrCost(
       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
       TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
@@ -194,9 +194,9 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
                                        const Value *Base,
                                        const TTI::PointersChainInfo &Info,
                                        Type *AccessTy,
-                                       TTI::TargetCostKind CostKind);
+                                       TTI::TargetCostKind CostKind) const;
   InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE,
-                                            const SCEV *Ptr);
+                                            const SCEV *Ptr) const;
 
   std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
                                                     IntrinsicInst &II) const;
@@ -231,17 +231,17 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
   InstructionCost getInterleavedMemoryOpCost(
       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
-      bool UseMaskForCond = false, bool UseMaskForGaps = false);
+      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
   InstructionCost getInterleavedMemoryOpCostAVX512(
       unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
       ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
       TTI::TargetCostKind CostKind, bool UseMaskForCond = false,
-      bool UseMaskForGaps = false);
+      bool UseMaskForGaps = false) const;
 
-  InstructionCost getIntImmCost(int64_t);
+  InstructionCost getIntImmCost(int64_t) const;
 
   InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
-                                TTI::TargetCostKind CostKind);
+                                TTI::TargetCostKind CostKind) const;
 
   InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
                                  const Instruction *I = nullptr) const;
@@ -249,10 +249,10 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                     const APInt &Imm, Type *Ty,
                                     TTI::TargetCostKind CostKind,
-                                    Instruction *Inst = nullptr);
+                                    Instruction *Inst = nullptr) const;
   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                       const APInt &Imm, Type *Ty,
-                                      TTI::TargetCostKind CostKind);
+                                      TTI::TargetCostKind CostKind) const;
   /// Return the cost of the scaling factor used in the addressing
   /// mode represented by AM for this target, for a load/store
   /// of the specified type.
@@ -263,14 +263,14 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
                                        int64_t Scale, unsigned AddrSpace) const;
 
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
-                     const TargetTransformInfo::LSRCost &C2);
-  bool canMacroFuseCmp();
+                     const TargetTransformInfo::LSRCost &C2) const;
+  bool canMacroFuseCmp() const;
   bool isLegalMaskedLoad(Type *DataType, Align Alignment,
                          unsigned AddressSpace) const;
   bool isLegalMaskedStore(Type *DataType, Align Alignment,
                           unsigned AddressSpace) const;
-  bool isLegalNTLoad(Type *DataType, Align Alignment);
-  bool isLegalNTStore(Type *DataType, Align Alignment);
+  bool isLegalNTLoad(Type *DataType, Align Alignment) const;
+  bool isLegalNTStore(Type *DataType, Align Alignment) const;
   bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const;
   bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) const;
   bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) const {
@@ -279,13 +279,13 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
   bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment) const;
   bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
   bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
-  bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
-  bool isLegalMaskedCompressStore(Type *DataType, Align Alignment);
+  bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const;
+  bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const;
   bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
                        const SmallBitVector &OpcodeMask) const;
-  bool hasDivRemOp(Type *DataType, bool IsSigned);
-  bool isExpensiveToSpeculativelyExecute(const Instruction *I);
-  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty);
+  bool hasDivRemOp(Type *DataType, bool IsSigned) const;
+  bool isExpensiveToSpeculativelyExecute(const Instruction *I) const;
+  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
   bool areInlineCompatible(const Function *Caller,
                            const Function *Callee) const;
   bool areTypesABICompatible(const Function *Caller, const Function *Callee,
@@ -300,7 +300,7 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
   bool preferAlternateOpcodeVectorization() const { return false; }
   bool prefersVectorizedAddressing() const;
   bool supportsEfficientVectorElementLoadStore() const;
-  bool enableInterleavedAccessVectorization();
+  bool enableInterleavedAccessVectorization() const;
 
   InstructionCost getBranchMispredictPenalty() const;
 


        


More information about the llvm-commits mailing list