[llvm] dd56e1c - [NFC] Only expose getXXXSize functions in TypeSize

Guillaume Chatelet via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 6 07:25:09 PST 2023


Author: Guillaume Chatelet
Date: 2023-01-06T15:24:52Z
New Revision: dd56e1c92b0e6e6be249f2d2dd40894e0417223f

URL: https://github.com/llvm/llvm-project/commit/dd56e1c92b0e6e6be249f2d2dd40894e0417223f
DIFF: https://github.com/llvm/llvm-project/commit/dd56e1c92b0e6e6be249f2d2dd40894e0417223f.diff

LOG: [NFC] Only expose getXXXSize functions in TypeSize

Currently 'TypeSize' exposes two functions that serve the same purpose:
 - getFixedSize / getFixedValue
 - getKnownMinSize / getKnownMinValue

source : https://github.com/llvm/llvm-project/blob/bf82070ea465969e9ae86a31dfcbf94c2a7b4c4c/llvm/include/llvm/Support/TypeSize.h#L337-L338

This patch offers to remove one of the two and stick to a single function in the code base.

Differential Revision: https://reviews.llvm.org/D141134

Added: 
    

Modified: 
    llvm/include/llvm/Support/TypeSize.h
    llvm/lib/Analysis/ConstantFolding.cpp
    llvm/lib/Analysis/Loads.cpp
    llvm/lib/CodeGen/Analysis.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/lib/CodeGen/StackProtector.cpp
    llvm/lib/IR/DataLayout.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
    llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
    llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
    llvm/utils/TableGen/CodeGenDAGPatterns.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h
index 53c888e0a2e9b..7aae6f3569b46 100644
--- a/llvm/include/llvm/Support/TypeSize.h
+++ b/llvm/include/llvm/Support/TypeSize.h
@@ -311,9 +311,16 @@ class ElementCount
 // the exact size. If the type is a scalable vector, it will represent the known
 // minimum size.
 class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
+  using UP = details::FixedOrScalableQuantity<TypeSize, uint64_t>;
+
   TypeSize(const FixedOrScalableQuantity<TypeSize, uint64_t> &V)
       : FixedOrScalableQuantity(V) {}
 
+  // Make 'getFixedValue' private, it is exposed as 'getFixedSize' below.
+  using UP::getFixedValue;
+  // Make 'getKnownMinValue' private, it is exposed as 'getKnownMinSize' below.
+  using UP::getKnownMinValue;
+
 public:
   constexpr TypeSize(ScalarTy Quantity, bool Scalable)
       : FixedOrScalableQuantity(Quantity, Scalable) {}
@@ -399,7 +406,7 @@ class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
 /// Similar to the alignTo functions in MathExtras.h
 inline constexpr TypeSize alignTo(TypeSize Size, uint64_t Align) {
   assert(Align != 0u && "Align must be non-zero");
-  return {(Size.getKnownMinValue() + Align - 1) / Align * Align,
+  return {(Size.getKnownMinSize() + Align - 1) / Align * Align,
           Size.isScalable()};
 }
 

diff  --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 7a601ad904168..6ad83af464781 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -599,7 +599,7 @@ Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
     return nullptr;
 
   // If we're not accessing anything in this constant, the result is undefined.
-  if (Offset >= (int64_t)InitializerSize.getFixedValue())
+  if (Offset >= (int64_t)InitializerSize.getFixedSize())
     return PoisonValue::get(IntType);
 
   unsigned char RawBytes[32] = {0};

diff  --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index e108df8473210..325c8b2e32dff 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -408,7 +408,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
   TypeSize TySize = DL.getTypeStoreSize(Ty);
   if (TySize.isScalable())
     return false;
-  APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue());
+  APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedSize());
   return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT,
                                      TLI);
 }

diff  --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp
index f5dbaccfcad50..54806297c3e70 100644
--- a/llvm/lib/CodeGen/Analysis.cpp
+++ b/llvm/lib/CodeGen/Analysis.cpp
@@ -101,7 +101,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
   // Given an array type, recursively traverse the elements.
   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     Type *EltTy = ATy->getElementType();
-    uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
+    uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize();
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
       ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
                       StartingOffset + i * EltSize);
@@ -146,7 +146,7 @@ void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
   // Given an array type, recursively traverse the elements.
   if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
     Type *EltTy = ATy->getElementType();
-    uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
+    uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize();
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
       computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
                        StartingOffset + i * EltSize);

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 4d44e962c8cfb..0a0acd3e76ff4 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4062,11 +4062,11 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
                             DAG.getVScale(dl, IntPtr,
                                           APInt(IntPtr.getScalarSizeInBits(),
-                                                TySize.getKnownMinValue())));
+                                                TySize.getKnownMinSize())));
   else
     AllocSize =
         DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
-                    DAG.getConstant(TySize.getFixedValue(), dl, IntPtr));
+                    DAG.getConstant(TySize.getFixedSize(), dl, IntPtr));
 
   // Handle alignment.  If the requested alignment is less than or equal to
   // the stack alignment, ignore it.  If the size is greater than or equal to

diff  --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index f76877facc196..0b16f4f41db65 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -218,7 +218,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI,
       // We can't subtract a fixed size from a scalable one, so in that case
       // assume the scalable value is of minimum size.
       TypeSize NewAllocSize =
-          TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize;
+          TypeSize::Fixed(AllocSize.getKnownMinSize()) - OffsetSize;
       if (HasAddressTaken(I, NewAllocSize))
         return true;
       break;

diff  --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index fb331eec602d8..6fc3f66e617eb 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -67,7 +67,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
 
     getMemberOffsets()[i] = StructSize;
     // Consume space for this data item
-    StructSize += DL.getTypeAllocSize(Ty).getFixedValue();
+    StructSize += DL.getTypeAllocSize(Ty).getFixedSize();
   }
 
   // Add padding to the end of the struct so that it could be put in an array

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b417ce002655e..51538bb661d42 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10468,7 +10468,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
     unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits();
     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
 
-    if (SrcVTSize.getFixedValue() < VTSize) {
+    if (SrcVTSize.getFixedSize() < VTSize) {
       assert(2 * SrcVTSize == VTSize);
       // We can pad out the smaller vector for free, so if it's part of a
       // shuffle...
@@ -10478,7 +10478,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
       continue;
     }
 
-    if (SrcVTSize.getFixedValue() != 2 * VTSize) {
+    if (SrcVTSize.getFixedSize() != 2 * VTSize) {
       LLVM_DEBUG(
           dbgs() << "Reshuffle failed: result vector too small to extract\n");
       return SDValue();

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index e62a1bf2ec0b0..d542fe88a1ba4 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -791,7 +791,7 @@ class AArch64TargetLowering : public TargetLowering {
 
     TypeSize TS = VT.getSizeInBits();
     // TODO: We should be able to use bic/bif too for SVE.
-    return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic'
+    return !TS.isScalable() && TS.getFixedSize() >= 64; // vector 'bic'
   }
 
   bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index 11ba5c91dae9b..d2ea11f285bc6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -650,7 +650,7 @@ class AMDGPULowerModuleLDS : public ModulePass {
         continue;
       }
       CandidateTy Candidate(GV, K.second.size(),
-                      DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
+                      DL.getTypeAllocSize(GV->getValueType()).getFixedSize());
       if (MostUsed < Candidate)
         MostUsed = Candidate;
     }

diff  --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index 3ff6e9b469398..c2eb11a2ca66d 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -1816,9 +1816,9 @@ auto HexagonVectorCombine::getSizeOf(const Type *Ty, SizeKind Kind) const
   auto *NcTy = const_cast<Type *>(Ty);
   switch (Kind) {
   case Store:
-    return DL.getTypeStoreSize(NcTy).getFixedValue();
+    return DL.getTypeStoreSize(NcTy).getFixedSize();
   case Alloc:
-    return DL.getTypeAllocSize(NcTy).getFixedValue();
+    return DL.getTypeAllocSize(NcTy).getFixedSize();
   }
   llvm_unreachable("Unhandled SizeKind enum");
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 22eebe165259c..753242a24e185 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -138,7 +138,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       if (VT.getVectorMinNumElements() < MinElts)
         return;
 
-      unsigned Size = VT.getSizeInBits().getKnownMinValue();
+      unsigned Size = VT.getSizeInBits().getKnownMinSize();
       const TargetRegisterClass *RC;
       if (Size <= RISCV::RVVBitsPerBlock)
         RC = &RISCV::VRRegClass;
@@ -1589,7 +1589,7 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
 
 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
   assert(VT.isScalableVector() && "Expecting a scalable vector type");
-  unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
+  unsigned KnownSize = VT.getSizeInBits().getKnownMinSize();
   if (VT.getVectorElementType() == MVT::i1)
     KnownSize *= 8;
 
@@ -5443,7 +5443,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
     // Optimize for constant AVL
     if (isa<ConstantSDNode>(AVL)) {
       unsigned EltSize = VT.getScalarSizeInBits();
-      unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
+      unsigned MinSize = VT.getSizeInBits().getKnownMinSize();
 
       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
       unsigned MaxVLMAX =
@@ -6419,7 +6419,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2);
   }
   unsigned EltSize = VecVT.getScalarSizeInBits();
-  unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
+  unsigned MinSize = VecVT.getSizeInBits().getKnownMinSize();
   unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
   unsigned MaxVLMAX =
     RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 02ce1b135f7f2..54dfe3f9b3df2 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1008,7 +1008,7 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
 unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) {
   if (isa<ScalableVectorType>(Ty)) {
     const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType());
-    const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue();
+    const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinSize();
     const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock;
     return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize);
   }
@@ -1472,7 +1472,7 @@ unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) {
   TypeSize Size = DL.getTypeSizeInBits(Ty);
   if (Ty->isVectorTy()) {
     if (Size.isScalable() && ST->hasVInstructions())
-      return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
+      return divideCeil(Size.getKnownMinSize(), RISCV::RVVBitsPerBlock);
 
     if (ST->useRVVForFixedLengthVectors())
       return divideCeil(Size, ST->getRealMinVLen());

diff  --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 7dace08a8bf33..da9abdc2f1f7f 100644
--- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -543,7 +543,7 @@ static bool findArgParts(Argument *Arg, const DataLayout &DL, AAResults &AAR,
       if (!isAligned(I->getAlign(), Off))
         return false;
 
-      NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedValue());
+      NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedSize());
       NeededAlign = std::max(NeededAlign, I->getAlign());
     }
 

diff  --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index bbe04fc769f3d..79f85773ed327 100644
--- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -764,8 +764,8 @@ bool TypeInfer::EnforceSameNumElts(TypeSetByHwMode &V, TypeSetByHwMode &W) {
 namespace {
 struct TypeSizeComparator {
   bool operator()(const TypeSize &LHS, const TypeSize &RHS) const {
-    return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
-           std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
+    return std::make_tuple(LHS.isScalable(), LHS.getKnownMinSize()) <
+           std::make_tuple(RHS.isScalable(), RHS.getKnownMinSize());
   }
 };
 } // end anonymous namespace


        


More information about the llvm-commits mailing list