[llvm] [NFCI] replace getValueType with new getGlobalSize query (PR #177186)

Jameson Nash via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 21 07:43:23 PST 2026


https://github.com/vtjnash created https://github.com/llvm/llvm-project/pull/177186

Returns uint64_t to simplify callers, since scalable globals are not permitted. The goal is to later split this into several queries (such as getMinObjectSize and isKnownSize) where getMinObjectSize is provided instead of a Type during create, and isKnownSize is true if the initializer is defined for linking, or if the flag is set explicitly.

>From 4525c2b1695279c51e2945396f934e32e484acaa Mon Sep 17 00:00:00 2001
From: Jameson Nash <vtjnash at gmail.com>
Date: Tue, 20 Jan 2026 19:29:23 +0000
Subject: [PATCH] [NFCI] replace getValueType with new getGlobalSize query

Returns uint64_t to simplify callers, since scalable globals are not
permitted. The goal is to later split this into several queries
(knownMinSize / knownMaxSize) where knownMinSize is provided instead of
a Type during create, or zero if not provided such as for a Function,
and knownMaxSize is the size of the initializer if present, or
knownMinSize if it is only initialized externally.
---
 llvm/include/llvm/IR/GlobalVariable.h               |  4 ++++
 llvm/lib/Analysis/MemoryBuiltins.cpp                |  4 ++--
 llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp          |  2 +-
 llvm/lib/CodeGen/CodeGenPrepare.cpp                 |  2 +-
 llvm/lib/CodeGen/GlobalMerge.cpp                    |  3 +--
 llvm/lib/IR/Globals.cpp                             |  6 ++++++
 llvm/lib/LTO/LTO.cpp                                |  2 +-
 llvm/lib/Linker/LinkModules.cpp                     | 10 ++++++----
 llvm/lib/Object/IRSymtab.cpp                        |  3 +--
 llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp         |  2 +-
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp       |  2 +-
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp      |  9 ++++-----
 llvm/lib/Target/AMDGPU/AMDGPULowerExecSync.cpp      |  4 ++--
 llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp | 13 +++++--------
 llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp    | 11 +++++------
 llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp        |  2 +-
 llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp      |  2 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp           |  6 +++---
 llvm/lib/Target/BPF/BTFDebug.cpp                    |  2 +-
 llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp           |  6 +++---
 llvm/lib/Target/TargetMachine.cpp                   |  2 +-
 .../Target/WebAssembly/WebAssemblyAsmPrinter.cpp    |  2 +-
 llvm/lib/Transforms/IPO/GlobalOpt.cpp               | 12 ++++++------
 llvm/lib/Transforms/IPO/LowerTypeTests.cpp          |  2 +-
 .../InstCombine/InstCombineLoadStoreAlloca.cpp      |  2 +-
 .../Instrumentation/IndirectCallPromotion.cpp       |  3 +--
 .../Transforms/Instrumentation/InstrProfiling.cpp   |  3 +--
 llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp    |  2 +-
 .../Transforms/Utils/RelLookupTableConverter.cpp    |  2 +-
 29 files changed, 64 insertions(+), 61 deletions(-)

diff --git a/llvm/include/llvm/IR/GlobalVariable.h b/llvm/include/llvm/IR/GlobalVariable.h
index 8e6af8ae6e203..79618bb6750ab 100644
--- a/llvm/include/llvm/IR/GlobalVariable.h
+++ b/llvm/include/llvm/IR/GlobalVariable.h
@@ -32,6 +32,7 @@
 namespace llvm {
 
 class Constant;
+class DataLayout;
 class Module;
 
 template <typename ValueSubClass, typename... Args> class SymbolTableListTraits;
@@ -170,6 +171,9 @@ class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
   /// it isn't explicitly set.
   LLVM_ABI void replaceInitializer(Constant *InitVal);
 
+  /// Get the size of this global variable in bytes.
+  LLVM_ABI uint64_t getGlobalSize(const DataLayout &DL) const;
+
   /// If the value is a global constant, its value is immutable throughout the
   /// runtime execution of the program.  Assigning a value into the constant
   /// leads to undefined behavior.
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 8de8df8f1769a..a329f9ae24c13 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -615,7 +615,7 @@ std::optional<TypeSize> llvm::getBaseObjectSize(const Value *Ptr,
     if (!GV->getValueType()->isSized() || GV->hasExternalWeakLinkage() ||
         !GV->hasInitializer() || GV->isInterposable())
       return std::nullopt;
-    return Align(DL.getTypeAllocSize(GV->getValueType()), GV->getAlign());
+    return Align(TypeSize::getFixed(GV->getGlobalSize(DL)), GV->getAlign());
   }
 
   if (auto *A = dyn_cast<Argument>(Ptr)) {
@@ -1048,7 +1048,7 @@ OffsetSpan ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV) {
        Options.EvalMode != ObjectSizeOpts::Mode::Min))
     return ObjectSizeOffsetVisitor::unknown();
 
-  APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType()));
+  APInt Size(IntTyBits, GV.getGlobalSize(DL));
   return OffsetSpan(Zero, align(Size, GV.getAlign()));
 }
 
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 38c966eb95450..420f09c514d63 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -811,7 +811,7 @@ void AsmPrinter::emitGlobalVariable(const GlobalVariable *GV) {
   SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM);
 
   const DataLayout &DL = GV->getDataLayout();
-  uint64_t Size = DL.getTypeAllocSize(GV->getValueType());
+  uint64_t Size = GV->getGlobalSize(DL);
 
   // If the alignment is specified, we *must* obey it.  Overaligning a global
   // with a specified alignment is a prompt way to break globals emitted to
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index a3347df45e4e0..db103b8664fca 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2674,7 +2674,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
       GlobalVariable *GV;
       if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
           GV->getPointerAlignment(*DL) < PrefAlign &&
-          DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2)
+          GV->getGlobalSize(*DL) >= MinSize + Offset2)
         GV->setAlignment(PrefAlign);
     }
   }
diff --git a/llvm/lib/CodeGen/GlobalMerge.cpp b/llvm/lib/CodeGen/GlobalMerge.cpp
index bdc43992448d3..f7f5641306f9a 100644
--- a/llvm/lib/CodeGen/GlobalMerge.cpp
+++ b/llvm/lib/CodeGen/GlobalMerge.cpp
@@ -267,8 +267,7 @@ bool GlobalMergeImpl::doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
   llvm::stable_sort(
       Globals, [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
         // We don't support scalable global variables.
-        return DL.getTypeAllocSize(GV1->getValueType()).getFixedValue() <
-               DL.getTypeAllocSize(GV2->getValueType()).getFixedValue();
+        return GV1->getGlobalSize(DL) < GV2->getGlobalSize(DL);
       });
 
   // If we want to just blindly group all globals together, do so.
diff --git a/llvm/lib/IR/Globals.cpp b/llvm/lib/IR/Globals.cpp
index ae81e45202517..844266c72d773 100644
--- a/llvm/lib/IR/Globals.cpp
+++ b/llvm/lib/IR/Globals.cpp
@@ -14,6 +14,7 @@
 #include "LLVMContextImpl.h"
 #include "llvm/IR/ConstantRange.h"
 #include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
 #include "llvm/IR/DerivedTypes.h"
 #include "llvm/IR/GlobalAlias.h"
 #include "llvm/IR/GlobalValue.h"
@@ -557,6 +558,11 @@ void GlobalVariable::replaceInitializer(Constant *InitVal) {
   setInitializer(InitVal);
 }
 
+uint64_t GlobalVariable::getGlobalSize(const DataLayout &DL) const {
+  // We don't support scalable global variables.
+  return DL.getTypeAllocSize(getValueType()).getFixedValue();
+}
+
 /// Copy all additional attributes (those not needed to create a GlobalVariable)
 /// from the GlobalVariable Src to this one.
 void GlobalVariable::copyAttributesFrom(const GlobalVariable *Src) {
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 955a19db48607..c337da27921f0 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -1315,7 +1315,7 @@ Error LTO::runRegularLTO(AddStreamFn AddStream) {
       // Don't do anything if no instance of this common was prevailing.
       continue;
     GlobalVariable *OldGV = RegularLTO.CombinedModule->getNamedGlobal(I.first);
-    if (OldGV && DL.getTypeAllocSize(OldGV->getValueType()) == I.second.Size) {
+    if (OldGV && OldGV->getGlobalSize(DL) == I.second.Size) {
       // Don't create a new global if the type is already correct, just make
       // sure the alignment is correct.
       OldGV->setAlignment(I.second.Alignment);
diff --git a/llvm/lib/Linker/LinkModules.cpp b/llvm/lib/Linker/LinkModules.cpp
index 485ac106d4ebb..fa4ea6a8c8054 100644
--- a/llvm/lib/Linker/LinkModules.cpp
+++ b/llvm/lib/Linker/LinkModules.cpp
@@ -192,8 +192,8 @@ bool ModuleLinker::computeResultingSelectionKind(StringRef ComdatName,
 
     const DataLayout &DstDL = DstM.getDataLayout();
     const DataLayout &SrcDL = SrcM->getDataLayout();
-    uint64_t DstSize = DstDL.getTypeAllocSize(DstGV->getValueType());
-    uint64_t SrcSize = SrcDL.getTypeAllocSize(SrcGV->getValueType());
+    uint64_t DstSize = DstGV->getGlobalSize(DstDL);
+    uint64_t SrcSize = SrcGV->getGlobalSize(SrcDL);
     if (Result == Comdat::SelectionKind::ExactMatch) {
       if (SrcGV->getInitializer() != DstGV->getInitializer())
         return emitError("Linking COMDATs named '" + ComdatName +
@@ -292,8 +292,10 @@ bool ModuleLinker::shouldLinkFromSource(bool &LinkFromSrc,
     }
 
     const DataLayout &DL = Dest.getDataLayout();
-    uint64_t DestSize = DL.getTypeAllocSize(Dest.getValueType());
-    uint64_t SrcSize = DL.getTypeAllocSize(Src.getValueType());
+    // Functions and aliases may not have common linkage, so both must be
+    // GlobalVariables here
+    uint64_t DestSize = cast<GlobalVariable>(Dest).getGlobalSize(DL);
+    uint64_t SrcSize = cast<GlobalVariable>(Src).getGlobalSize(DL);
     LinkFromSrc = SrcSize > DestSize;
     return false;
   }
diff --git a/llvm/lib/Object/IRSymtab.cpp b/llvm/lib/Object/IRSymtab.cpp
index 20610262f6a1c..60dea4cf17f36 100644
--- a/llvm/lib/Object/IRSymtab.cpp
+++ b/llvm/lib/Object/IRSymtab.cpp
@@ -284,8 +284,7 @@ Error Builder::addSymbol(const ModuleSymbolTable &Msymtab,
     if (!GVar)
       return make_error<StringError>("Only variables can have common linkage!",
                                      inconvertibleErrorCode());
-    Uncommon().CommonSize =
-        GV->getDataLayout().getTypeAllocSize(GV->getValueType());
+    Uncommon().CommonSize = GVar->getGlobalSize(GV->getDataLayout());
     Uncommon().CommonAlign = GVar->getAlign() ? GVar->getAlign()->value() : 0;
   }
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index d0c86a785cc3d..dca561b8ebb23 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -323,7 +323,7 @@ void AMDGPUAsmPrinter::emitGlobalVariable(const GlobalVariable *GV) {
                          "' is already defined");
 
     const DataLayout &DL = GV->getDataLayout();
-    uint64_t Size = DL.getTypeAllocSize(GV->getValueType());
+    uint64_t Size = GV->getGlobalSize(DL);
     Align Alignment = GV->getAlign().value_or(Align(4));
 
     emitVisibility(GVSym, GV->getVisibility(), !GV->isDeclaration());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index c5e720ce26bc0..8cc9c64b1c580 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1535,7 +1535,7 @@ SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
     if (std::optional<uint32_t> Address =
             AMDGPUMachineFunction::getLDSAbsoluteAddress(*GV)) {
       if (IsNamedBarrier) {
-        unsigned BarCnt = DL.getTypeAllocSize(GV->getValueType()) / 16;
+        unsigned BarCnt = cast<GlobalVariable>(GV)->getGlobalSize(DL) / 16;
         MFI->recordNumNamedBarriers(Address.value(), BarCnt);
       }
       return DAG.getConstant(*Address, SDLoc(Op), Op.getValueType());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 3698b0062b8d0..c4202abf4b082 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -3169,16 +3169,16 @@ bool AMDGPULegalizerInfo::legalizeGlobalValue(
       return true; // Leave in place;
     }
 
+    const GlobalVariable &GVar = *cast<GlobalVariable>(GV);
     if (AS == AMDGPUAS::LOCAL_ADDRESS && GV->hasExternalLinkage()) {
-      Type *Ty = GV->getValueType();
       // HIP uses an unsized array `extern __shared__ T s[]` or similar
       // zero-sized type in other languages to declare the dynamic shared
       // memory which size is not known at the compile time. They will be
       // allocated by the runtime and placed directly after the static
       // allocated ones. They all share the same offset.
-      if (B.getDataLayout().getTypeAllocSize(Ty).isZero()) {
+      if (GVar.getGlobalSize(GVar.getDataLayout()) == 0) {
         // Adjust alignment for that dynamic shared memory array.
-        MFI->setDynLDSAlign(MF.getFunction(), *cast<GlobalVariable>(GV));
+        MFI->setDynLDSAlign(MF.getFunction(), GVar);
         LLT S32 = LLT::scalar(32);
         auto Sz = B.buildIntrinsic(Intrinsic::amdgcn_groupstaticsize, {S32});
         B.buildIntToPtr(DstReg, Sz);
@@ -3187,8 +3187,7 @@ bool AMDGPULegalizerInfo::legalizeGlobalValue(
       }
     }
 
-    B.buildConstant(DstReg, MFI->allocateLDSGlobal(B.getDataLayout(),
-                                                   *cast<GlobalVariable>(GV)));
+    B.buildConstant(DstReg, MFI->allocateLDSGlobal(B.getDataLayout(), GVar));
     MI.eraseFromParent();
     return true;
   }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerExecSync.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerExecSync.cpp
index 38b01dc1a551c..c26e97360efef 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerExecSync.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerExecSync.cpp
@@ -117,7 +117,7 @@ static bool lowerExecSyncGlobalVariables(
   for (GlobalVariable *GV : OrderedGVs) {
     unsigned BarrierScope = AMDGPU::Barrier::BARRIER_SCOPE_WORKGROUP;
     unsigned BarId = NumAbsolutes + 1;
-    unsigned BarCnt = DL.getTypeAllocSize(GV->getValueType()) / 16;
+    unsigned BarCnt = GV->getGlobalSize(DL) / 16;
     NumAbsolutes += BarCnt;
 
     // 4 bits for alignment, 5 bits for the barrier num,
@@ -160,7 +160,7 @@ static bool lowerExecSyncGlobalVariables(
       unsigned BarrierScope = AMDGPU::Barrier::BARRIER_SCOPE_WORKGROUP;
       unsigned BarId = Kernel2BarId[F];
       BarId += NumAbsolutes + 1;
-      unsigned BarCnt = DL.getTypeAllocSize(GV->getValueType()) / 16;
+      unsigned BarCnt = GV->getGlobalSize(DL) / 16;
       Kernel2BarId[F] += BarCnt;
       unsigned Offset = 0x802000u | BarrierScope << 9 | BarId << 4;
       recordLDSAbsoluteAddress(&M, NewGV, Offset);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index be30128655176..588eee036a4e2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -501,9 +501,7 @@ class AMDGPULowerModuleLDS {
         // strategy
         continue;
       }
-      CandidateTy Candidate(
-          GV, K.second.size(),
-          DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
+      CandidateTy Candidate(GV, K.second.size(), GV->getGlobalSize(DL));
       if (MostUsed < Candidate)
         MostUsed = Candidate;
     }
@@ -1061,14 +1059,14 @@ class AMDGPULowerModuleLDS {
         if (AllocateModuleScopeStruct) {
           // Allocated at zero, recorded once on construction, not once per
           // kernel
-          Offset += DL.getTypeAllocSize(MaybeModuleScopeStruct->getValueType());
+          Offset += MaybeModuleScopeStruct->getGlobalSize(DL);
         }
 
         if (AllocateKernelScopeStruct) {
           GlobalVariable *KernelStruct = Replacement->second.SGV;
           Offset = alignTo(Offset, AMDGPU::getAlign(DL, KernelStruct));
           recordLDSAbsoluteAddress(&M, KernelStruct, Offset);
-          Offset += DL.getTypeAllocSize(KernelStruct->getValueType());
+          Offset += KernelStruct->getGlobalSize(DL);
         }
 
         // If there is dynamic allocation, the alignment needed is included in
@@ -1138,7 +1136,7 @@ class AMDGPULowerModuleLDS {
       }
 
       Align Alignment = AMDGPU::getAlign(DL, &GV);
-      TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType());
+      uint64_t GVSize = GV.getGlobalSize(DL);
 
       if (GVSize > 8) {
         // We might want to use a b96 or b128 load/store
@@ -1184,8 +1182,7 @@ class AMDGPULowerModuleLDS {
           LDSVarsToTransform.begin(), LDSVarsToTransform.end()));
 
       for (GlobalVariable *GV : Sorted) {
-        OptimizedStructLayoutField F(GV,
-                                     DL.getTypeAllocSize(GV->getValueType()),
+        OptimizedStructLayoutField F(GV, GV->getGlobalSize(DL),
                                      AMDGPU::getAlign(DL, GV));
         LayoutFields.emplace_back(F);
       }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
index 664a15ca55f53..6606c31dd248a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
@@ -107,7 +107,7 @@ unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
       if (!BarAddr)
         llvm_unreachable("named barrier should have an assigned address");
       Entry.first->second = BarAddr.value();
-      unsigned BarCnt = DL.getTypeAllocSize(GV.getValueType()) / 16;
+      unsigned BarCnt = GV.getGlobalSize(DL) / 16;
       recordNumNamedBarriers(BarAddr.value(), BarCnt);
       return BarAddr.value();
     }
@@ -135,8 +135,7 @@ unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
         // section, and not within some other non-absolute-address object
         // allocated here, but the extra error detection is minimal and we would
         // have to pass the Function around or cache the attribute value.
-        uint32_t ObjectEnd =
-            ObjectStart + DL.getTypeAllocSize(GV.getValueType());
+        uint32_t ObjectEnd = ObjectStart + GV.getGlobalSize(DL);
         if (ObjectEnd > StaticLDSSize) {
           report_fatal_error(
               "Absolute address LDS variable outside of static frame");
@@ -152,7 +151,7 @@ unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
     /// during lowering.
     Offset = StaticLDSSize = alignTo(StaticLDSSize, Alignment);
 
-    StaticLDSSize += DL.getTypeAllocSize(GV.getValueType());
+    StaticLDSSize += GV.getGlobalSize(DL);
 
     // Align LDS size to trailing, e.g. for aligning dynamic shared memory
     LDSSize = alignTo(StaticLDSSize, Trailing);
@@ -161,7 +160,7 @@ unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
            "expected region address space");
 
     Offset = StaticGDSSize = alignTo(StaticGDSSize, Alignment);
-    StaticGDSSize += DL.getTypeAllocSize(GV.getValueType());
+    StaticGDSSize += GV.getGlobalSize(DL);
 
     // FIXME: Apply alignment of dynamic GDS
     GDSSize = StaticGDSSize;
@@ -210,7 +209,7 @@ void AMDGPUMachineFunction::setDynLDSAlign(const Function &F,
                                            const GlobalVariable &GV) {
   const Module *M = F.getParent();
   const DataLayout &DL = M->getDataLayout();
-  assert(DL.getTypeAllocSize(GV.getValueType()).isZero());
+  assert(GV.getGlobalSize(DL) == 0);
 
   Align Alignment =
       DL.getValueOrABITypeAlignment(GV.getAlign(), GV.getValueType());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
index 4e16c13e30e91..9fbb19df1ba53 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMemoryUtils.cpp
@@ -70,7 +70,7 @@ bool isDynamicLDS(const GlobalVariable &GV) {
   const DataLayout &DL = M->getDataLayout();
   if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
     return false;
-  return DL.getTypeAllocSize(GV.getValueType()) == 0;
+  return GV.getGlobalSize(DL) == 0;
 }
 
 bool isLDSVariableToLower(const GlobalVariable &GV) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index fc72b915faf54..3355c277e50d2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -1538,7 +1538,7 @@ bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
   for (const GlobalVariable *GV : UsedLDS) {
     Align Alignment =
         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
-    uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
+    uint64_t AllocSize = GV->getGlobalSize(DL);
 
     // HIP uses an extern unsized array in local address space for dynamically
     // allocated shared memory.  In that case, we have to disable the promotion.
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ae5e9fa2cb695..c42174b8be3c5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -9035,17 +9035,17 @@ SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
       GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
     if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
         GV->hasExternalLinkage()) {
-      Type *Ty = GV->getValueType();
+      const GlobalVariable &GVar = *cast<GlobalVariable>(GV);
       // HIP uses an unsized array `extern __shared__ T s[]` or similar
       // zero-sized type in other languages to declare the dynamic shared
       // memory which size is not known at the compile time. They will be
       // allocated by the runtime and placed directly after the static
       // allocated ones. They all share the same offset.
-      if (DAG.getDataLayout().getTypeAllocSize(Ty).isZero()) {
+      if (GVar.getGlobalSize(GVar.getDataLayout()) == 0) {
         assert(PtrVT == MVT::i32 && "32-bit pointer is expected.");
         // Adjust alignment for that dynamic shared memory array.
         Function &F = DAG.getMachineFunction().getFunction();
-        MFI->setDynLDSAlign(F, *cast<GlobalVariable>(GV));
+        MFI->setDynLDSAlign(F, GVar);
         MFI->setUsesDynamicLDS(true);
         return SDValue(
             DAG.getMachineNode(AMDGPU::GET_GROUPSTATICSIZE, DL, PtrVT), 0);
diff --git a/llvm/lib/Target/BPF/BTFDebug.cpp b/llvm/lib/Target/BPF/BTFDebug.cpp
index 54d60c05e3c3e..4d9b1b5d7d7a5 100644
--- a/llvm/lib/Target/BPF/BTFDebug.cpp
+++ b/llvm/lib/Target/BPF/BTFDebug.cpp
@@ -1615,7 +1615,7 @@ void BTFDebug::processGlobals(bool ProcessingMapDef) {
 
     // Calculate symbol size
     const DataLayout &DL = Global.getDataLayout();
-    uint32_t Size = DL.getTypeAllocSize(Global.getValueType());
+    uint32_t Size = Global.getGlobalSize(DL);
 
     It->second->addDataSecEntry(VarId, Asm->getSymbol(&Global), Size);
 
diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index 718d51c5a0673..2e938ae2e2860 100644
--- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -2813,7 +2813,7 @@ void PPCAIXAsmPrinter::emitGlobalVariableHelper(const GlobalVariable *GV) {
   if (GV->hasCommonLinkage() || GVKind.isBSSLocal() ||
       GVKind.isThreadBSSLocal()) {
     Align Alignment = GV->getAlign().value_or(DL.getPreferredAlign(GV));
-    uint64_t Size = DL.getTypeAllocSize(GV->getValueType());
+    uint64_t Size = GV->getGlobalSize(DL);
     GVSym->setStorageClass(
         TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GV));
 
@@ -2927,7 +2927,7 @@ void PPCAIXAsmPrinter::emitPGORefs(Module &M) {
   const DataLayout &DL = M.getDataLayout();
   for (GlobalVariable &GV : M.globals())
     if (GV.hasSection() && GV.getSection() == "__llvm_prf_cnts" &&
-        DL.getTypeAllocSize(GV.getValueType()) > 0) {
+        GV.getGlobalSize(DL) > 0) {
       HasNonZeroLengthPrfCntsSection = true;
       break;
     }
@@ -3090,7 +3090,7 @@ bool PPCAIXAsmPrinter::doInitialization(Module &M) {
     if (G.isThreadLocal() && !G.isDeclaration()) {
       TLSVarAddress = alignTo(TLSVarAddress, getGVAlignment(&G, DL));
       TLSVarsToAddressMapping[&G] = TLSVarAddress;
-      TLSVarAddress += DL.getTypeAllocSize(G.getValueType());
+      TLSVarAddress += G.getGlobalSize(DL);
     }
   }
 
diff --git a/llvm/lib/Target/TargetMachine.cpp b/llvm/lib/Target/TargetMachine.cpp
index 9bda8a42fd89e..f8f13a042fec0 100644
--- a/llvm/lib/Target/TargetMachine.cpp
+++ b/llvm/lib/Target/TargetMachine.cpp
@@ -133,7 +133,7 @@ bool TargetMachine::isLargeGlobalValue(const GlobalValue *GVal) const {
             .isReadOnlyWithRel())
       return false;
     const DataLayout &DL = GV->getDataLayout();
-    uint64_t Size = DL.getTypeAllocSize(GV->getValueType());
+    uint64_t Size = GV->getGlobalSize(DL);
     return Size == 0 || Size > LargeDataThreshold;
   }
 
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
index 526420bb2b294..44733f1d58924 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
@@ -404,7 +404,7 @@ void WebAssemblyAsmPrinter::emitEndOfAsmFile(Module &M) {
     if (!G.hasInitializer() && G.hasExternalLinkage() &&
         !WebAssembly::isWasmVarAddressSpace(G.getAddressSpace()) &&
         G.getValueType()->isSized()) {
-      uint16_t Size = M.getDataLayout().getTypeAllocSize(G.getValueType());
+      uint16_t Size = G.getGlobalSize(M.getDataLayout());
       OutStreamer->emitELFSize(getSymbol(&G),
                                MCConstantExpr::create(Size, OutContext));
     }
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index f0e82c9ca8350..60abe66d8b9e8 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -567,7 +567,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
   }
 
   // Some accesses go beyond the end of the global, don't bother.
-  if (Offset > DL.getTypeAllocSize(GV->getValueType()))
+  if (Offset > GV->getGlobalSize(DL))
     return nullptr;
 
   LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
@@ -1229,8 +1229,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
         DIGlobalVariable *DGV = GVe->getVariable();
         DIExpression *E = GVe->getExpression();
         const DataLayout &DL = GV->getDataLayout();
-        unsigned SizeInOctets =
-            DL.getTypeAllocSizeInBits(NewGV->getValueType()) / 8;
+        unsigned SizeInOctets = NewGV->getGlobalSize(DL);
 
         // It is expected that the address of global optimized variable is on
         // top of the stack. After optimization, value of that variable will
@@ -1570,7 +1569,8 @@ processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS,
       return true;
   }
   Value *StoredOnceValue = GS.getStoredOnceValue();
-  if (GS.StoredType == GlobalStatus::StoredOnce && StoredOnceValue) {
+  if (GS.StoredType == GlobalStatus::StoredOnce && StoredOnceValue &&
+      StoredOnceValue->getType()->isSized()) {
     Function &StoreFn =
         const_cast<Function &>(*GS.StoredOnceStore->getFunction());
     bool CanHaveNonUndefGlobalInitializer =
@@ -1585,8 +1585,8 @@ processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS,
     // shared memory (AS 3).
     auto *SOVConstant = dyn_cast<Constant>(StoredOnceValue);
     if (SOVConstant && isa<UndefValue>(GV->getInitializer()) &&
-        DL.getTypeAllocSize(SOVConstant->getType()) ==
-            DL.getTypeAllocSize(GV->getValueType()) &&
+        DL.getTypeAllocSize(SOVConstant->getType()).getFixedValue() ==
+            GV->getGlobalSize(DL) &&
         CanHaveNonUndefGlobalInitializer) {
       if (SOVConstant->getType() == GV->getValueType()) {
         // Change the initializer in place.
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index 1feb61d12e1b5..ae24476e57706 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -851,7 +851,7 @@ void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
     }
 
     GlobalInits.push_back(GV->getInitializer());
-    uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
+    uint64_t InitSize = GV->getGlobalSize(DL);
     CurOffset = GVOffset + InitSize;
 
     // Compute the amount of padding that we'd like for the next element.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 280ea3112c7bd..c41e21a6b8209 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -883,7 +883,7 @@ static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
       if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
         return false;
 
-      uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
+      uint64_t InitSize = GV->getGlobalSize(DL);
       if (InitSize > MaxSize)
         return false;
       continue;
diff --git a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
index 1e5946a009caa..48131955836b3 100644
--- a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
+++ b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
@@ -213,8 +213,7 @@ static Constant *getVTableAddressPointOffset(GlobalVariable *VTable,
                                              uint32_t AddressPointOffset) {
   Module &M = *VTable->getParent();
   LLVMContext &Context = M.getContext();
-  assert(AddressPointOffset <
-             M.getDataLayout().getTypeAllocSize(VTable->getValueType()) &&
+  assert(AddressPointOffset < VTable->getGlobalSize(M.getDataLayout()) &&
          "Out-of-bound access");
 
   return ConstantExpr::getInBoundsGetElementPtr(
diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index c4c4446364aa2..f6f73fb69f7a9 100644
--- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -1536,8 +1536,7 @@ void InstrLowerer::getOrCreateVTableProfData(GlobalVariable *GV) {
   const std::string PGOVTableName = getPGOName(*GV);
   // Record the length of the vtable. This is needed since vtable pointers
   // loaded from C++ objects might be from the middle of a vtable definition.
-  uint32_t VTableSizeVal =
-      M.getDataLayout().getTypeAllocSize(GV->getValueType());
+  uint32_t VTableSizeVal = GV->getGlobalSize(M.getDataLayout());
 
   Constant *DataVals[] = {
 #define INSTR_PROF_VTABLE_DATA(Type, LLVMType, Name, Init) Init,
diff --git a/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp b/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp
index 291f9b2502948..0c320f524be96 100644
--- a/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpTableToSwitch.cpp
@@ -78,7 +78,7 @@ static std::optional<JumpTableTy> parseJumpTable(GetElementPtrInst *GEP,
   if (!ConstantOffset.isZero())
     return std::nullopt;
   APInt StrideBytes = VariableOffsets.front().second;
-  const uint64_t JumpTableSizeBytes = DL.getTypeAllocSize(GV->getValueType());
+  const uint64_t JumpTableSizeBytes = GV->getGlobalSize(DL);
   if (JumpTableSizeBytes % StrideBytes.getZExtValue() != 0)
     return std::nullopt;
   const uint64_t N = JumpTableSizeBytes / StrideBytes.getZExtValue();
diff --git a/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp b/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
index a814867652cd1..1891dacd19a7a 100644
--- a/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
+++ b/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
@@ -91,7 +91,7 @@ static bool shouldConvertToRelLookupTable(LookupTableInfo &Info, Module &M,
       || (TT.isX86() && TT.isOSDarwin());
 
   APInt Offset(IndexWidth, 0);
-  uint64_t GVSize = DL.getTypeAllocSize(GV.getValueType());
+  uint64_t GVSize = GV.getGlobalSize(DL);
   for (; Offset.ult(GVSize); Offset += Stride) {
     Constant *C =
         ConstantFoldLoadFromConst(GV.getInitializer(), ElemType, Offset, DL);



More information about the llvm-commits mailing list