[clang] [mlir] [llvm] [llvm][TypeSize] Consider TypeSize of '0' to be fixed/scalable-agnostic. (PR #72994)
Sander de Smalen via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 21 07:53:30 PST 2023
https://github.com/sdesmalen-arm updated https://github.com/llvm/llvm-project/pull/72994
>From ffc1922935f6c5e9fc66db7fcce72ad0f5d8bef8 Mon Sep 17 00:00:00 2001
From: Sander de Smalen <sander.desmalen at arm.com>
Date: Tue, 21 Nov 2023 13:50:33 +0000
Subject: [PATCH 1/3] [llvm][TypeSize] Fix addition/subtraction in TypeSize.
It seems TypeSize is currently broken in the sense that:
TypeSize::Fixed(4) + TypeSize::Scalable(4) => TypeSize::Fixed(8)
without failing its assert that explicitly tests for this case:
assert(LHS.Scalable == RHS.Scalable && ...);
The reason this fails is that `Scalable` is a static method of class TypeSize,
and LHS and RHS are both objects of class TypeSize. So this is evaluating
if the pointer to the function Scalable == the pointer to the function Scalable,
which is always true because LHS and RHS have the same class.
This patch fixes the issue by renaming `TypeSize::Scalable` ->
`TypeSize::getScalable`, as well as `TypeSize::Fixed` to `TypeSize::getFixed`,
so that it no longer clashes with the variable in FixedOrScalableQuantity.
The new methods now also better match the coding standard, which specifies that:
* Variable names should be nouns (as they represent state)
* Function names should be verb phrases (as they represent actions)
---
clang/lib/CodeGen/CGGPUBuiltin.cpp | 2 +-
.../llvm/Analysis/TargetTransformInfoImpl.h | 2 +-
llvm/include/llvm/CodeGen/BasicTTIImpl.h | 2 +-
llvm/include/llvm/CodeGen/LowLevelType.h | 2 +-
.../include/llvm/CodeGen/TargetRegisterInfo.h | 2 +-
llvm/include/llvm/IR/DataLayout.h | 19 +++--
llvm/include/llvm/Support/TypeSize.h | 4 +-
.../Instrumentation/AddressSanitizerCommon.h | 2 +-
llvm/lib/Analysis/BasicAliasAnalysis.cpp | 4 +-
llvm/lib/Analysis/Local.cpp | 2 +-
.../CodeGen/GlobalISel/LegalizerHelper.cpp | 4 +-
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 19 +++--
llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 19 +++--
.../SelectionDAG/LegalizeIntegerTypes.cpp | 10 +--
.../SelectionDAG/LegalizeTypesGeneric.cpp | 6 +-
.../SelectionDAG/LegalizeVectorTypes.cpp | 4 +-
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 12 +--
.../SelectionDAG/SelectionDAGBuilder.cpp | 2 +-
.../CodeGen/SelectionDAG/TargetLowering.cpp | 14 ++--
llvm/lib/CodeGen/StackProtector.cpp | 4 +-
llvm/lib/CodeGen/ValueTypes.cpp | 2 +-
llvm/lib/IR/DataLayout.cpp | 10 +--
llvm/lib/IR/DebugInfo.cpp | 2 +-
llvm/lib/IR/Type.cpp | 32 +++++---
.../Target/AArch64/AArch64FrameLowering.cpp | 2 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 8 +-
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 82 +++++++++----------
.../AArch64/AArch64SelectionDAGInfo.cpp | 8 +-
.../AArch64/AArch64TargetTransformInfo.cpp | 13 +--
.../AArch64/GISel/AArch64RegisterBankInfo.cpp | 10 +--
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 2 +-
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 2 +-
.../AMDGPU/AMDGPUTargetTransformInfo.cpp | 6 +-
.../Target/AMDGPU/R600TargetTransformInfo.cpp | 2 +-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 +-
llvm/lib/Target/ARM/ARMISelLowering.cpp | 12 +--
llvm/lib/Target/ARM/ARMTargetTransformInfo.h | 10 +--
llvm/lib/Target/DirectX/CBufferDataLayout.cpp | 6 +-
.../Target/Hexagon/HexagonISelLowering.cpp | 4 +-
.../Target/Hexagon/HexagonISelLoweringHVX.cpp | 3 +-
.../Hexagon/HexagonTargetTransformInfo.cpp | 6 +-
.../Target/NVPTX/NVPTXTargetTransformInfo.h | 2 +-
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 24 +++---
.../Target/PowerPC/PPCTargetTransformInfo.cpp | 6 +-
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 4 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 +-
.../Target/RISCV/RISCVTargetTransformInfo.cpp | 13 +--
.../SystemZ/SystemZTargetTransformInfo.cpp | 6 +-
llvm/lib/Target/VE/VETargetTransformInfo.h | 6 +-
.../WebAssemblyTargetTransformInfo.cpp | 6 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 24 +++---
.../lib/Target/X86/X86TargetTransformInfo.cpp | 12 +--
llvm/lib/Transforms/Coroutines/CoroFrame.cpp | 2 +-
.../Scalar/DeadStoreElimination.cpp | 2 +-
.../lib/Transforms/Scalar/MemCpyOptimizer.cpp | 6 +-
llvm/lib/Transforms/Utils/Local.cpp | 4 +-
llvm/unittests/CodeGen/LowLevelTypeTest.cpp | 6 +-
.../SelectionDAGAddressAnalysisTest.cpp | 14 ++--
llvm/unittests/IR/InstructionsTest.cpp | 14 ++--
llvm/unittests/Support/TypeSizeTest.cpp | 31 ++++---
mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp | 16 ++--
61 files changed, 294 insertions(+), 269 deletions(-)
diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp
index de4ee68c0da1e79..e465789a003eba3 100644
--- a/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -101,7 +101,7 @@ packArgsIntoNVPTXFormatBuffer(CodeGenFunction *CGF, const CallArgList &Args) {
// If there are no args, pass a null pointer and size 0
llvm::Value *BufferPtr =
llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(Ctx));
- return {BufferPtr, llvm::TypeSize::Fixed(0)};
+ return {BufferPtr, llvm::TypeSize::getFixed(0)};
} else {
llvm::SmallVector<llvm::Type *, 8> ArgTypes;
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 2ccf57c22234f9a..e6fc178365626ba 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -443,7 +443,7 @@ class TargetTransformInfoImplBase {
}
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
- return TypeSize::Fixed(32);
+ return TypeSize::getFixed(32);
}
unsigned getMinVectorRegisterBitWidth() const { return 128; }
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 7a8f36da58ceccb..752b554d6989919 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -714,7 +714,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
/// @{
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
- return TypeSize::Fixed(32);
+ return TypeSize::getFixed(32);
}
std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
diff --git a/llvm/include/llvm/CodeGen/LowLevelType.h b/llvm/include/llvm/CodeGen/LowLevelType.h
index d6f1c3b31fbbb5d..81559c1bac55c71 100644
--- a/llvm/include/llvm/CodeGen/LowLevelType.h
+++ b/llvm/include/llvm/CodeGen/LowLevelType.h
@@ -182,7 +182,7 @@ class LLT {
/// Returns the total size of the type. Must only be called on sized types.
constexpr TypeSize getSizeInBits() const {
if (isPointer() || isScalar())
- return TypeSize::Fixed(getScalarSizeInBits());
+ return TypeSize::getFixed(getScalarSizeInBits());
auto EC = getElementCount();
return TypeSize(getScalarSizeInBits() * EC.getKnownMinValue(),
EC.isScalable());
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 4fb6ba7c26930af..5a6da209e8da686 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -284,7 +284,7 @@ class TargetRegisterInfo : public MCRegisterInfo {
/// Return the size in bits of a register from class RC.
TypeSize getRegSizeInBits(const TargetRegisterClass &RC) const {
- return TypeSize::Fixed(getRegClassInfo(RC).RegSize);
+ return TypeSize::getFixed(getRegClassInfo(RC).RegSize);
}
/// Return the size in bytes of the stack slot allocated to hold a spilled
diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h
index b3633b67b9debda..71f7f51d8ee431a 100644
--- a/llvm/include/llvm/IR/DataLayout.h
+++ b/llvm/include/llvm/IR/DataLayout.h
@@ -673,9 +673,10 @@ inline TypeSize DataLayout::getTypeSizeInBits(Type *Ty) const {
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
case Type::LabelTyID:
- return TypeSize::Fixed(getPointerSizeInBits(0));
+ return TypeSize::getFixed(getPointerSizeInBits(0));
case Type::PointerTyID:
- return TypeSize::Fixed(getPointerSizeInBits(Ty->getPointerAddressSpace()));
+ return TypeSize::getFixed(
+ getPointerSizeInBits(Ty->getPointerAddressSpace()));
case Type::ArrayTyID: {
ArrayType *ATy = cast<ArrayType>(Ty);
return ATy->getNumElements() *
@@ -685,24 +686,24 @@ inline TypeSize DataLayout::getTypeSizeInBits(Type *Ty) const {
// Get the layout annotation... which is lazily created on demand.
return getStructLayout(cast<StructType>(Ty))->getSizeInBits();
case Type::IntegerTyID:
- return TypeSize::Fixed(Ty->getIntegerBitWidth());
+ return TypeSize::getFixed(Ty->getIntegerBitWidth());
case Type::HalfTyID:
case Type::BFloatTyID:
- return TypeSize::Fixed(16);
+ return TypeSize::getFixed(16);
case Type::FloatTyID:
- return TypeSize::Fixed(32);
+ return TypeSize::getFixed(32);
case Type::DoubleTyID:
case Type::X86_MMXTyID:
- return TypeSize::Fixed(64);
+ return TypeSize::getFixed(64);
case Type::PPC_FP128TyID:
case Type::FP128TyID:
- return TypeSize::Fixed(128);
+ return TypeSize::getFixed(128);
case Type::X86_AMXTyID:
- return TypeSize::Fixed(8192);
+ return TypeSize::getFixed(8192);
// In memory objects this is always aligned to a higher boundary, but
// only 80 bits contain information.
case Type::X86_FP80TyID:
- return TypeSize::Fixed(80);
+ return TypeSize::getFixed(80);
case Type::FixedVectorTyID:
case Type::ScalableVectorTyID: {
VectorType *VTy = cast<VectorType>(Ty);
diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h
index 8e638f8278e828b..ada98d809fc236f 100644
--- a/llvm/include/llvm/Support/TypeSize.h
+++ b/llvm/include/llvm/Support/TypeSize.h
@@ -321,10 +321,10 @@ class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable) {
return TypeSize(Quantity, Scalable);
}
- static constexpr TypeSize Fixed(ScalarTy ExactSize) {
+ static constexpr TypeSize getFixed(ScalarTy ExactSize) {
return TypeSize(ExactSize, false);
}
- static constexpr TypeSize Scalable(ScalarTy MinimumSize) {
+ static constexpr TypeSize getScalable(ScalarTy MinimumSize) {
return TypeSize(MinimumSize, true);
}
diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
index 4affc11429016c9..6322df90b18e1dd 100644
--- a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
+++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h
@@ -27,7 +27,7 @@ class InterestingMemoryOperand {
Use *PtrUse;
bool IsWrite;
Type *OpType;
- TypeSize TypeStoreSize = TypeSize::Fixed(0);
+ TypeSize TypeStoreSize = TypeSize::getFixed(0);
MaybeAlign Alignment;
// The mask Value, if we're looking at a masked load/store.
Value *MaybeMask;
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 476028bd91b23cd..62504f8fa7c87da 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -111,7 +111,7 @@ static std::optional<TypeSize> getObjectSize(const Value *V,
Opts.RoundToAlign = RoundToAlign;
Opts.NullIsUnknownSize = NullIsValidLoc;
if (getObjectSize(V, Size, DL, &TLI, Opts))
- return TypeSize::Fixed(Size);
+ return TypeSize::getFixed(Size);
return std::nullopt;
}
@@ -177,7 +177,7 @@ static TypeSize getMinimalExtentFrom(const Value &V,
// accessed, thus valid.
if (LocSize.isPrecise())
DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
- return TypeSize::Fixed(DerefBytes);
+ return TypeSize::getFixed(DerefBytes);
}
/// Returns true if we can prove that the object specified by V has size Size.
diff --git a/llvm/lib/Analysis/Local.cpp b/llvm/lib/Analysis/Local.cpp
index 8dd1a7e2975347d..30757abeb098027 100644
--- a/llvm/lib/Analysis/Local.cpp
+++ b/llvm/lib/Analysis/Local.cpp
@@ -65,7 +65,7 @@ Value *llvm::emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL,
if (Op->getType() != IntIdxTy)
Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName() + ".c");
TypeSize TSize = DL.getTypeAllocSize(GTI.getIndexedType());
- if (TSize != TypeSize::Fixed(1)) {
+ if (TSize != TypeSize::getFixed(1)) {
Value *Scale = Builder->CreateTypeSize(IntIdxTy->getScalarType(), TSize);
if (IntIdxTy->isVectorTy())
Scale = Builder->CreateVectorSplat(
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index dd5577d47f97764..47d676e94720765 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -6968,8 +6968,8 @@ LegalizerHelper::lowerExtractInsertVectorElt(MachineInstr &MI) {
Align EltAlign;
MachinePointerInfo PtrInfo;
- auto StackTemp = createStackTemporary(TypeSize::Fixed(VecTy.getSizeInBytes()),
- VecAlign, PtrInfo);
+ auto StackTemp = createStackTemporary(
+ TypeSize::getFixed(VecTy.getSizeInBytes()), VecAlign, PtrInfo);
MIRBuilder.buildStore(SrcVec, StackTemp, PtrInfo, VecAlign);
// Get the pointer to the element, and be sure not to hit undefined behavior
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 2fc9a2866c32dba..051974fbc4295a6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10806,7 +10806,7 @@ SDValue DAGCombiner::visitFunnelShift(SDNode *N) {
RHS->getMemOperand()->getFlags(), &Fast) &&
Fast) {
SDValue NewPtr = DAG.getMemBasePlusOffset(
- RHS->getBasePtr(), TypeSize::Fixed(PtrOff), DL);
+ RHS->getBasePtr(), TypeSize::getFixed(PtrOff), DL);
AddToWorklist(NewPtr.getNode());
SDValue Load = DAG.getLoad(
VT, DL, RHS->getChain(), NewPtr,
@@ -12928,7 +12928,7 @@ SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align,
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
- BasePtr = DAG.getMemBasePlusOffset(BasePtr, TypeSize::Fixed(Stride), DL);
+ BasePtr = DAG.getMemBasePlusOffset(BasePtr, TypeSize::getFixed(Stride), DL);
Loads.push_back(SplitLoad.getValue(0));
Chains.push_back(SplitLoad.getValue(1));
@@ -14329,8 +14329,8 @@ SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {
// The original load itself didn't wrap, so an offset within it doesn't.
SDNodeFlags Flags;
Flags.setNoUnsignedWrap(true);
- SDValue NewPtr = DAG.getMemBasePlusOffset(LN0->getBasePtr(),
- TypeSize::Fixed(PtrOff), DL, Flags);
+ SDValue NewPtr = DAG.getMemBasePlusOffset(
+ LN0->getBasePtr(), TypeSize::getFixed(PtrOff), DL, Flags);
AddToWorklist(NewPtr.getNode());
SDValue Load;
@@ -19491,7 +19491,7 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
SDValue Ptr = St->getBasePtr();
if (StOffset) {
SDLoc DL(IVal);
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(StOffset), DL);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(StOffset), DL);
}
++OpsNarrowed;
@@ -19617,7 +19617,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
return SDValue();
SDValue NewPtr =
- DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(PtrOff), SDLoc(LD));
+ DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(PtrOff), SDLoc(LD));
SDValue NewLD =
DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr,
LD->getPointerInfo().getWithOffset(PtrOff), NewAlign,
@@ -20956,7 +20956,7 @@ SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) {
SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
ST->getOriginalAlign(), MMOFlags, AAInfo);
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(4), DL);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(4), DL);
SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr,
ST->getPointerInfo().getWithOffset(4),
ST->getOriginalAlign(), MMOFlags, AAInfo);
@@ -21018,7 +21018,7 @@ SDValue DAGCombiner::replaceStoreOfInsertLoad(StoreSDNode *ST) {
// info
if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
unsigned COffset = CIdx->getSExtValue() * EltVT.getSizeInBits() / 8;
- NewPtr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(COffset), DL);
+ NewPtr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(COffset), DL);
PointerInfo = ST->getPointerInfo().getWithOffset(COffset);
}
@@ -21415,7 +21415,8 @@ SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {
// Lower value store.
SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(),
ST->getOriginalAlign(), MMOFlags, AAInfo);
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(HalfValBitSize / 8), DL);
+ Ptr =
+ DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(HalfValBitSize / 8), DL);
// Higher value store.
SDValue St1 = DAG.getStore(
St0, DL, Hi, Ptr, ST->getPointerInfo().getWithOffset(HalfValBitSize / 8),
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index ad5a4506efbd828..7a54141fa711a18 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -482,7 +482,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
Lo = DAG.getStore(Chain, dl, Lo, Ptr, ST->getPointerInfo(),
ST->getOriginalAlign(), MMOFlags, AAInfo);
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(4), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(4), dl);
Hi = DAG.getStore(Chain, dl, Hi, Ptr,
ST->getPointerInfo().getWithOffset(4),
ST->getOriginalAlign(), MMOFlags, AAInfo);
@@ -591,7 +591,8 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// Store the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr =
+ DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
Hi = DAG.getNode(
ISD::SRL, dl, Value.getValueType(), Value,
DAG.getConstant(RoundWidth, dl,
@@ -804,7 +805,8 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
// Load the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr =
+ DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, LD->getOriginalAlign(), MMOFlags, AAInfo);
@@ -832,7 +834,8 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
// Load the remaining ExtraWidth bits.
IncrementSize = RoundWidth / 8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr =
+ DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, Node->getValueType(0), Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
ExtraVT, LD->getOriginalAlign(), MMOFlags, AAInfo);
@@ -1521,7 +1524,8 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
unsigned Offset = TypeByteSize*i;
- SDValue Idx = DAG.getMemBasePlusOffset(FIPtr, TypeSize::Fixed(Offset), dl);
+ SDValue Idx =
+ DAG.getMemBasePlusOffset(FIPtr, TypeSize::getFixed(Offset), dl);
if (Truncate)
Stores.push_back(DAG.getTruncStore(DAG.getEntryNode(), dl,
@@ -1583,7 +1587,7 @@ void SelectionDAGLegalize::getSignAsIntValue(FloatSignAsInt &State,
// Advance the pointer so that the loaded byte will contain the sign bit.
unsigned ByteOffset = (NumBits / 8) - 1;
IntPtr =
- DAG.getMemBasePlusOffset(StackPtr, TypeSize::Fixed(ByteOffset), DL);
+ DAG.getMemBasePlusOffset(StackPtr, TypeSize::getFixed(ByteOffset), DL);
State.IntPointerInfo = MachinePointerInfo::getFixedStack(MF, FI,
ByteOffset);
}
@@ -2652,7 +2656,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(SDNode *Node,
SDValue Store1 = DAG.getStore(MemChain, dl, Lo, StackSlot,
MachinePointerInfo());
// Store the hi of the constructed double.
- SDValue HiPtr = DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), dl);
+ SDValue HiPtr =
+ DAG.getMemBasePlusOffset(StackSlot, TypeSize::getFixed(4), dl);
SDValue Store2 =
DAG.getStore(MemChain, dl, Hi, HiPtr, MachinePointerInfo());
MemChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 011cde568786226..4f292263c38f543 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -3898,7 +3898,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits()/8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize), NEVT,
N->getOriginalAlign(), MMOFlags, AAInfo);
@@ -3922,7 +3922,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
N->getOriginalAlign(), MMOFlags, AAInfo);
// Increment the pointer to the other half.
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
// Load the rest of the low bits.
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, NVT, Ch, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
@@ -4430,7 +4430,7 @@ void DAGTypeLegalizer::ExpandIntRes_ShiftThroughStack(SDNode *N, SDValue &Lo,
// FIXME: should we be more picky about alignment?
Align StackSlotAlignment(1);
SDValue StackPtr = DAG.CreateStackTemporary(
- TypeSize::Fixed(StackSlotByteWidth), StackSlotAlignment);
+ TypeSize::getFixed(StackSlotByteWidth), StackSlotAlignment);
EVT PtrTy = StackPtr.getValueType();
SDValue Ch = DAG.getEntryNode();
@@ -5445,7 +5445,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits()/8;
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::getFixed(IncrementSize));
Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
NEVT, N->getOriginalAlign(), MMOFlags, AAInfo);
@@ -5480,7 +5480,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
N->getOriginalAlign(), MMOFlags, AAInfo);
// Increment the pointer to the other half.
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::getFixed(IncrementSize));
// Store the lowest ExcessBits bits in the second half.
Lo = DAG.getTruncStore(Ch, dl, Lo, Ptr,
N->getPointerInfo().getWithOffset(IncrementSize),
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 296242c00401c2f..a55364ea2c4e5b3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -176,7 +176,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Increment the pointer to the other half.
unsigned IncrementSize = NOutVT.getSizeInBits() / 8;
StackPtr =
- DAG.getMemBasePlusOffset(StackPtr, TypeSize::Fixed(IncrementSize), dl);
+ DAG.getMemBasePlusOffset(StackPtr, TypeSize::getFixed(IncrementSize), dl);
// Load the second half from the stack slot.
Hi = DAG.getLoad(NOutVT, dl, Store, StackPtr,
@@ -265,7 +265,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
// Increment the pointer to the other half.
unsigned IncrementSize = NVT.getSizeInBits() / 8;
- Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(IncrementSize), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(IncrementSize), dl);
Hi = DAG.getLoad(
NVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(IncrementSize),
LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), AAInfo);
@@ -479,7 +479,7 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
St->getOriginalAlign(), St->getMemOperand()->getFlags(),
AAInfo);
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::getFixed(IncrementSize));
Hi = DAG.getStore(
Chain, dl, Hi, Ptr, St->getPointerInfo().getWithOffset(IncrementSize),
St->getOriginalAlign(), St->getMemOperand()->getFlags(), AAInfo);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 29e78a8bfdfa089..e24f514273591a7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1239,7 +1239,7 @@ void DAGTypeLegalizer::IncrementPointer(MemSDNode *N, EVT MemVT,
} else {
MPI = N->getPointerInfo().getWithOffset(IncrementSize);
// Increment the pointer to the other half.
- Ptr = DAG.getObjectPtrOffset(DL, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(DL, Ptr, TypeSize::getFixed(IncrementSize));
}
}
@@ -7207,7 +7207,7 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
unsigned i = 0, Offset = Increment;
for (i=1; i < NumElts; ++i, Offset += Increment) {
SDValue NewBasePtr =
- DAG.getObjectPtrOffset(dl, BasePtr, TypeSize::Fixed(Offset));
+ DAG.getObjectPtrOffset(dl, BasePtr, TypeSize::getFixed(Offset));
Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr,
LD->getPointerInfo().getWithOffset(Offset), LdEltVT,
LD->getOriginalAlign(), MMOFlags, AAInfo);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index cb79b7a73cd3c72..8a9e74d6de2bde1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7561,7 +7561,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
if (Value.getNode()) {
Store = DAG.getStore(
Chain, dl, Value,
- DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
+ DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl),
DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
OutChains.push_back(Store);
}
@@ -7586,14 +7586,14 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
Value = DAG.getExtLoad(
ISD::EXTLOAD, dl, NVT, Chain,
- DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
+ DAG.getMemBasePlusOffset(Src, TypeSize::getFixed(SrcOff), dl),
SrcPtrInfo.getWithOffset(SrcOff), VT,
commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags, NewAAInfo);
OutLoadChains.push_back(Value.getValue(1));
Store = DAG.getTruncStore(
Chain, dl, Value,
- DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
+ DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl),
DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
OutStoreChains.push_back(Store);
}
@@ -7730,7 +7730,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
Value = DAG.getLoad(
VT, dl, Chain,
- DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
+ DAG.getMemBasePlusOffset(Src, TypeSize::getFixed(SrcOff), dl),
SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1));
@@ -7745,7 +7745,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
Store = DAG.getStore(
Chain, dl, LoadValues[i],
- DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
+ DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl),
DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
OutChains.push_back(Store);
DstOff += VTSize;
@@ -7877,7 +7877,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
assert(Value.getValueType() == VT && "Value with wrong type.");
SDValue Store = DAG.getStore(
Chain, dl, Value,
- DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
+ DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl),
DstPtrInfo.getWithOffset(DstOff), Alignment,
isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone,
NewAAInfo);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 2fa37f4c519e071..da2515b38f4b84a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2056,7 +2056,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
// An aggregate return value cannot wrap around the address space, so
// offsets to its parts don't wrap either.
SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
- TypeSize::Fixed(Offsets[i]));
+ TypeSize::getFixed(Offsets[i]));
SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
if (MemVTs[i] != ValueVTs[i])
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 127c2410985b403..c5977546828f638 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -4609,8 +4609,8 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) {
SDValue Ptr = Lod->getBasePtr();
if (bestOffset != 0)
- Ptr =
- DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl);
+ Ptr = DAG.getMemBasePlusOffset(Ptr, TypeSize::getFixed(bestOffset),
+ dl);
SDValue NewLoad =
DAG.getLoad(newVT, dl, Lod->getChain(), Ptr,
Lod->getPointerInfo().getWithOffset(bestOffset),
@@ -9346,7 +9346,7 @@ TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
SrcEltVT, LD->getOriginalAlign(),
LD->getMemOperand()->getFlags(), LD->getAAInfo());
- BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride));
+ BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::getFixed(Stride));
Vals.push_back(ScalarLoad.getValue(0));
LoadChains.push_back(ScalarLoad.getValue(1));
@@ -9421,7 +9421,7 @@ SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
DAG.getVectorIdxConstant(Idx, SL));
SDValue Ptr =
- DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride));
+ DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::getFixed(Idx * Stride));
// This scalar TruncStore may be illegal, but we legalize it later.
SDValue Store = DAG.getTruncStore(
@@ -9557,7 +9557,7 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
LD->getAAInfo());
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::getFixed(IncrementSize));
Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
@@ -9567,7 +9567,7 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
LD->getAAInfo());
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::getFixed(IncrementSize));
Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr,
LD->getPointerInfo().getWithOffset(IncrementSize),
NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(),
@@ -9709,7 +9709,7 @@ SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST,
Ptr, ST->getPointerInfo(), NewStoredVT, Alignment,
ST->getMemOperand()->getFlags());
- Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize));
+ Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::getFixed(IncrementSize));
Store2 = DAG.getTruncStore(
Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr,
ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment,
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 9a4d7ff328e7797..48dc7cb232e3051 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -215,14 +215,14 @@ static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
APInt Offset(IndexSize, 0);
if (!GEP->accumulateConstantOffset(DL, Offset))
return true;
- TypeSize OffsetSize = TypeSize::Fixed(Offset.getLimitedValue());
+ TypeSize OffsetSize = TypeSize::getFixed(Offset.getLimitedValue());
if (!TypeSize::isKnownGT(AllocSize, OffsetSize))
return true;
// Adjust AllocSize to be the space remaining after this offset.
// We can't subtract a fixed size from a scalable one, so in that case
// assume the scalable value is of minimum size.
TypeSize NewAllocSize =
- TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize;
+ TypeSize::getFixed(AllocSize.getKnownMinValue()) - OffsetSize;
if (HasAddressTaken(I, NewAllocSize, M, VisitedPHIs))
return true;
break;
diff --git a/llvm/lib/CodeGen/ValueTypes.cpp b/llvm/lib/CodeGen/ValueTypes.cpp
index 2d16ff2dfb2fbf5..30507ffea932525 100644
--- a/llvm/lib/CodeGen/ValueTypes.cpp
+++ b/llvm/lib/CodeGen/ValueTypes.cpp
@@ -143,7 +143,7 @@ ElementCount EVT::getExtendedVectorElementCount() const {
TypeSize EVT::getExtendedSizeInBits() const {
assert(isExtended() && "Type is not extended!");
if (IntegerType *ITy = dyn_cast<IntegerType>(LLVMTy))
- return TypeSize::Fixed(ITy->getBitWidth());
+ return TypeSize::getFixed(ITy->getBitWidth());
if (VectorType *VTy = dyn_cast<VectorType>(LLVMTy))
return VTy->getPrimitiveSizeInBits();
llvm_unreachable("Unrecognized extended type!");
diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index c8ef232082554bf..e28f043cf9e0d06 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -46,7 +46,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
StructLayout::StructLayout(StructType *ST, const DataLayout &DL)
- : StructSize(TypeSize::Fixed(0)) {
+ : StructSize(TypeSize::getFixed(0)) {
assert(!ST->isOpaque() && "Cannot get layout of opaque structs");
IsPadded = false;
NumElements = ST->getNumElements();
@@ -55,7 +55,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL)
for (unsigned i = 0, e = NumElements; i != e; ++i) {
Type *Ty = ST->getElementType(i);
if (i == 0 && Ty->isScalableTy())
- StructSize = TypeSize::Scalable(0);
+ StructSize = TypeSize::getScalable(0);
const Align TyAlign = ST->isPacked() ? Align(1) : DL.getABITypeAlign(Ty);
@@ -68,7 +68,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL)
// contains both fixed size and scalable size data type members).
if (!StructSize.isScalable() && !isAligned(TyAlign, StructSize)) {
IsPadded = true;
- StructSize = TypeSize::Fixed(alignTo(StructSize, TyAlign));
+ StructSize = TypeSize::getFixed(alignTo(StructSize, TyAlign));
}
// Keep track of maximum alignment constraint.
@@ -83,7 +83,7 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL)
// and all array elements would be aligned correctly.
if (!StructSize.isScalable() && !isAligned(StructAlignment, StructSize)) {
IsPadded = true;
- StructSize = TypeSize::Fixed(alignTo(StructSize, StructAlignment));
+ StructSize = TypeSize::getFixed(alignTo(StructSize, StructAlignment));
}
}
@@ -93,7 +93,7 @@ unsigned StructLayout::getElementContainingOffset(uint64_t FixedOffset) const {
assert(!StructSize.isScalable() &&
"Cannot get element at offset for structure containing scalable "
"vector types");
- TypeSize Offset = TypeSize::Fixed(FixedOffset);
+ TypeSize Offset = TypeSize::getFixed(FixedOffset);
ArrayRef<TypeSize> MemberOffsets = getMemberOffsets();
const auto *SI =
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index 4f9073fe0c00a2d..95dfc28ca29bbd0 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -1967,7 +1967,7 @@ std::optional<AssignmentInfo> at::getAssignmentInfo(const DataLayout &DL,
// We can't use a non-const size, bail.
return std::nullopt;
uint64_t SizeInBits = 8 * ConstLengthInBytes->getZExtValue();
- return getAssignmentInfoImpl(DL, StoreDest, TypeSize::Fixed(SizeInBits));
+ return getAssignmentInfoImpl(DL, StoreDest, TypeSize::getFixed(SizeInBits));
}
std::optional<AssignmentInfo> at::getAssignmentInfo(const DataLayout &DL,
diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp
index 8a1bf9654fdc6eb..3d2e203a20dac77 100644
--- a/llvm/lib/IR/Type.cpp
+++ b/llvm/lib/IR/Type.cpp
@@ -172,17 +172,26 @@ bool Type::isEmptyTy() const {
TypeSize Type::getPrimitiveSizeInBits() const {
switch (getTypeID()) {
- case Type::HalfTyID: return TypeSize::Fixed(16);
- case Type::BFloatTyID: return TypeSize::Fixed(16);
- case Type::FloatTyID: return TypeSize::Fixed(32);
- case Type::DoubleTyID: return TypeSize::Fixed(64);
- case Type::X86_FP80TyID: return TypeSize::Fixed(80);
- case Type::FP128TyID: return TypeSize::Fixed(128);
- case Type::PPC_FP128TyID: return TypeSize::Fixed(128);
- case Type::X86_MMXTyID: return TypeSize::Fixed(64);
- case Type::X86_AMXTyID: return TypeSize::Fixed(8192);
+ case Type::HalfTyID:
+ return TypeSize::getFixed(16);
+ case Type::BFloatTyID:
+ return TypeSize::getFixed(16);
+ case Type::FloatTyID:
+ return TypeSize::getFixed(32);
+ case Type::DoubleTyID:
+ return TypeSize::getFixed(64);
+ case Type::X86_FP80TyID:
+ return TypeSize::getFixed(80);
+ case Type::FP128TyID:
+ return TypeSize::getFixed(128);
+ case Type::PPC_FP128TyID:
+ return TypeSize::getFixed(128);
+ case Type::X86_MMXTyID:
+ return TypeSize::getFixed(64);
+ case Type::X86_AMXTyID:
+ return TypeSize::getFixed(8192);
case Type::IntegerTyID:
- return TypeSize::Fixed(cast<IntegerType>(this)->getBitWidth());
+ return TypeSize::getFixed(cast<IntegerType>(this)->getBitWidth());
case Type::FixedVectorTyID:
case Type::ScalableVectorTyID: {
const VectorType *VTy = cast<VectorType>(this);
@@ -191,7 +200,8 @@ TypeSize Type::getPrimitiveSizeInBits() const {
assert(!ETS.isScalable() && "Vector type should have fixed-width elements");
return {ETS.getFixedValue() * EC.getKnownMinValue(), EC.isScalable()};
}
- default: return TypeSize::Fixed(0);
+ default:
+ return TypeSize::getFixed(0);
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index b036f7582323700..fd29ab5af001385 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1214,7 +1214,7 @@ static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
SEH->eraseFromParent();
}
- TypeSize Scale = TypeSize::Fixed(1);
+ TypeSize Scale = TypeSize::getFixed(1);
unsigned Width;
int64_t MinOffset, MaxOffset;
bool Success = static_cast<const AArch64InstrInfo *>(TII)->getMemOpInfo(
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d42ae4ff93a4442..a3b88e68288f812 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10976,7 +10976,7 @@ SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
EVT SrcVT = Src.ShuffleVec.getValueType();
TypeSize SrcVTSize = SrcVT.getSizeInBits();
- if (SrcVTSize == TypeSize::Fixed(VTSize))
+ if (SrcVTSize == TypeSize::getFixed(VTSize))
continue;
// This stage of the search produces a source with the same element type as
@@ -20973,7 +20973,7 @@ static SDValue performLOADCombine(SDNode *N,
for (unsigned I = 0; I < Num256Loads; I++) {
unsigned PtrOffset = I * 32;
SDValue NewPtr = DAG.getMemBasePlusOffset(
- BasePtr, TypeSize::Fixed(PtrOffset), DL, Flags);
+ BasePtr, TypeSize::getFixed(PtrOffset), DL, Flags);
Align NewAlign = commonAlignment(LD->getAlign(), PtrOffset);
SDValue NewLoad = DAG.getLoad(
NewVT, DL, Chain, NewPtr, LD->getPointerInfo().getWithOffset(PtrOffset),
@@ -20991,8 +20991,8 @@ static SDValue performLOADCombine(SDNode *N,
MVT RemainingVT = MVT::getVectorVT(
MemVT.getVectorElementType().getSimpleVT(),
BitsRemaining / MemVT.getVectorElementType().getSizeInBits());
- SDValue NewPtr =
- DAG.getMemBasePlusOffset(BasePtr, TypeSize::Fixed(PtrOffset), DL, Flags);
+ SDValue NewPtr = DAG.getMemBasePlusOffset(
+ BasePtr, TypeSize::getFixed(PtrOffset), DL, Flags);
Align NewAlign = commonAlignment(LD->getAlign(), PtrOffset);
SDValue RemainingLoad =
DAG.getLoad(RemainingVT, DL, Chain, NewPtr,
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 6fdf5363bae2928..a95ab3c7e0f288a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -3517,21 +3517,21 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
switch (Opcode) {
// Not a memory operation or something we want to handle.
default:
- Scale = TypeSize::Fixed(0);
+ Scale = TypeSize::getFixed(0);
Width = 0;
MinOffset = MaxOffset = 0;
return false;
case AArch64::STRWpost:
case AArch64::LDRWpost:
Width = 32;
- Scale = TypeSize::Fixed(4);
+ Scale = TypeSize::getFixed(4);
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::LDURQi:
case AArch64::STURQi:
Width = 16;
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
MinOffset = -256;
MaxOffset = 255;
break;
@@ -3543,7 +3543,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::STURDi:
case AArch64::STLURXi:
Width = 8;
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
MinOffset = -256;
MaxOffset = 255;
break;
@@ -3556,7 +3556,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::STURSi:
case AArch64::STLURWi:
Width = 4;
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
MinOffset = -256;
MaxOffset = 255;
break;
@@ -3571,7 +3571,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::STURHHi:
case AArch64::STLURHi:
Width = 2;
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
MinOffset = -256;
MaxOffset = 255;
break;
@@ -3586,7 +3586,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::STURBBi:
case AArch64::STLURBi:
Width = 1;
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
MinOffset = -256;
MaxOffset = 255;
break;
@@ -3594,14 +3594,14 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDNPQi:
case AArch64::STPQi:
case AArch64::STNPQi:
- Scale = TypeSize::Fixed(16);
+ Scale = TypeSize::getFixed(16);
Width = 32;
MinOffset = -64;
MaxOffset = 63;
break;
case AArch64::LDRQui:
case AArch64::STRQui:
- Scale = TypeSize::Fixed(16);
+ Scale = TypeSize::getFixed(16);
Width = 16;
MinOffset = 0;
MaxOffset = 4095;
@@ -3614,7 +3614,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::STPDi:
case AArch64::STNPXi:
case AArch64::STNPDi:
- Scale = TypeSize::Fixed(8);
+ Scale = TypeSize::getFixed(8);
Width = 16;
MinOffset = -64;
MaxOffset = 63;
@@ -3624,14 +3624,14 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDRDui:
case AArch64::STRXui:
case AArch64::STRDui:
- Scale = TypeSize::Fixed(8);
+ Scale = TypeSize::getFixed(8);
Width = 8;
MinOffset = 0;
MaxOffset = 4095;
break;
case AArch64::StoreSwiftAsyncContext:
// Store is an STRXui, but there might be an ADDXri in the expansion too.
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
Width = 8;
MinOffset = 0;
MaxOffset = 4095;
@@ -3644,7 +3644,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::STPSi:
case AArch64::STNPWi:
case AArch64::STNPSi:
- Scale = TypeSize::Fixed(4);
+ Scale = TypeSize::getFixed(4);
Width = 8;
MinOffset = -64;
MaxOffset = 63;
@@ -3654,7 +3654,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDRSWui:
case AArch64::STRWui:
case AArch64::STRSui:
- Scale = TypeSize::Fixed(4);
+ Scale = TypeSize::getFixed(4);
Width = 4;
MinOffset = 0;
MaxOffset = 4095;
@@ -3665,7 +3665,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDRSHXui:
case AArch64::STRHui:
case AArch64::STRHHui:
- Scale = TypeSize::Fixed(2);
+ Scale = TypeSize::getFixed(2);
Width = 2;
MinOffset = 0;
MaxOffset = 4095;
@@ -3676,7 +3676,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDRSBXui:
case AArch64::STRBui:
case AArch64::STRBBui:
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
Width = 1;
MinOffset = 0;
MaxOffset = 4095;
@@ -3685,14 +3685,14 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDPXpost:
case AArch64::STPDpre:
case AArch64::LDPDpost:
- Scale = TypeSize::Fixed(8);
+ Scale = TypeSize::getFixed(8);
Width = 8;
MinOffset = -512;
MaxOffset = 504;
break;
case AArch64::STPQpre:
case AArch64::LDPQpost:
- Scale = TypeSize::Fixed(16);
+ Scale = TypeSize::getFixed(16);
Width = 16;
MinOffset = -1024;
MaxOffset = 1008;
@@ -3701,26 +3701,26 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::STRDpre:
case AArch64::LDRXpost:
case AArch64::LDRDpost:
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
Width = 8;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::STRQpre:
case AArch64::LDRQpost:
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
Width = 16;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::ADDG:
- Scale = TypeSize::Fixed(16);
+ Scale = TypeSize::getFixed(16);
Width = 0;
MinOffset = 0;
MaxOffset = 63;
break;
case AArch64::TAGPstack:
- Scale = TypeSize::Fixed(16);
+ Scale = TypeSize::getFixed(16);
Width = 0;
// TAGP with a negative offset turns into SUBP, which has a maximum offset
// of 63 (not 64!).
@@ -3730,42 +3730,42 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDG:
case AArch64::STGi:
case AArch64::STZGi:
- Scale = TypeSize::Fixed(16);
+ Scale = TypeSize::getFixed(16);
Width = 16;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::STR_ZZZZXI:
case AArch64::LDR_ZZZZXI:
- Scale = TypeSize::Scalable(16);
+ Scale = TypeSize::getScalable(16);
Width = SVEMaxBytesPerVector * 4;
MinOffset = -256;
MaxOffset = 252;
break;
case AArch64::STR_ZZZXI:
case AArch64::LDR_ZZZXI:
- Scale = TypeSize::Scalable(16);
+ Scale = TypeSize::getScalable(16);
Width = SVEMaxBytesPerVector * 3;
MinOffset = -256;
MaxOffset = 253;
break;
case AArch64::STR_ZZXI:
case AArch64::LDR_ZZXI:
- Scale = TypeSize::Scalable(16);
+ Scale = TypeSize::getScalable(16);
Width = SVEMaxBytesPerVector * 2;
MinOffset = -256;
MaxOffset = 254;
break;
case AArch64::LDR_PXI:
case AArch64::STR_PXI:
- Scale = TypeSize::Scalable(2);
+ Scale = TypeSize::getScalable(2);
Width = SVEMaxBytesPerVector / 8;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::LDR_ZXI:
case AArch64::STR_ZXI:
- Scale = TypeSize::Scalable(16);
+ Scale = TypeSize::getScalable(16);
Width = SVEMaxBytesPerVector;
MinOffset = -256;
MaxOffset = 255;
@@ -3792,7 +3792,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDNF1D_IMM:
// A full vectors worth of data
// Width = mbytes * elements
- Scale = TypeSize::Scalable(16);
+ Scale = TypeSize::getScalable(16);
Width = SVEMaxBytesPerVector;
MinOffset = -8;
MaxOffset = 7;
@@ -3805,7 +3805,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::ST2H_IMM:
case AArch64::ST2W_IMM:
case AArch64::ST2D_IMM:
- Scale = TypeSize::Scalable(32);
+ Scale = TypeSize::getScalable(32);
Width = SVEMaxBytesPerVector * 2;
MinOffset = -8;
MaxOffset = 7;
@@ -3818,7 +3818,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::ST3H_IMM:
case AArch64::ST3W_IMM:
case AArch64::ST3D_IMM:
- Scale = TypeSize::Scalable(48);
+ Scale = TypeSize::getScalable(48);
Width = SVEMaxBytesPerVector * 3;
MinOffset = -8;
MaxOffset = 7;
@@ -3831,7 +3831,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::ST4H_IMM:
case AArch64::ST4W_IMM:
case AArch64::ST4D_IMM:
- Scale = TypeSize::Scalable(64);
+ Scale = TypeSize::getScalable(64);
Width = SVEMaxBytesPerVector * 4;
MinOffset = -8;
MaxOffset = 7;
@@ -3853,7 +3853,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDNF1SW_D_IMM:
// A half vector worth of data
// Width = mbytes * elements
- Scale = TypeSize::Scalable(8);
+ Scale = TypeSize::getScalable(8);
Width = SVEMaxBytesPerVector / 2;
MinOffset = -8;
MaxOffset = 7;
@@ -3870,7 +3870,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDNF1SH_D_IMM:
// A quarter vector worth of data
// Width = mbytes * elements
- Scale = TypeSize::Scalable(4);
+ Scale = TypeSize::getScalable(4);
Width = SVEMaxBytesPerVector / 4;
MinOffset = -8;
MaxOffset = 7;
@@ -3882,20 +3882,20 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LDNF1SB_D_IMM:
// A eighth vector worth of data
// Width = mbytes * elements
- Scale = TypeSize::Scalable(2);
+ Scale = TypeSize::getScalable(2);
Width = SVEMaxBytesPerVector / 8;
MinOffset = -8;
MaxOffset = 7;
break;
case AArch64::ST2Gi:
case AArch64::STZ2Gi:
- Scale = TypeSize::Fixed(16);
+ Scale = TypeSize::getFixed(16);
Width = 32;
MinOffset = -256;
MaxOffset = 255;
break;
case AArch64::STGPi:
- Scale = TypeSize::Fixed(16);
+ Scale = TypeSize::getFixed(16);
Width = 16;
MinOffset = -64;
MaxOffset = 63;
@@ -3907,7 +3907,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LD1RSB_H_IMM:
case AArch64::LD1RSB_S_IMM:
case AArch64::LD1RSB_D_IMM:
- Scale = TypeSize::Fixed(1);
+ Scale = TypeSize::getFixed(1);
Width = 1;
MinOffset = 0;
MaxOffset = 63;
@@ -3917,7 +3917,7 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LD1RH_D_IMM:
case AArch64::LD1RSH_S_IMM:
case AArch64::LD1RSH_D_IMM:
- Scale = TypeSize::Fixed(2);
+ Scale = TypeSize::getFixed(2);
Width = 2;
MinOffset = 0;
MaxOffset = 63;
@@ -3925,13 +3925,13 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
case AArch64::LD1RW_IMM:
case AArch64::LD1RW_D_IMM:
case AArch64::LD1RSW_IMM:
- Scale = TypeSize::Fixed(4);
+ Scale = TypeSize::getFixed(4);
Width = 4;
MinOffset = 0;
MaxOffset = 63;
break;
case AArch64::LD1RD_IMM:
- Scale = TypeSize::Fixed(8);
+ Scale = TypeSize::getFixed(8);
Width = 8;
MinOffset = 0;
MaxOffset = 63;
diff --git a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
index 10d2e768cb29abe..1a76f354589eee5 100644
--- a/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SelectionDAGInfo.cpp
@@ -140,8 +140,8 @@ static SDValue EmitUnrolledSetTag(SelectionDAG &DAG, const SDLoc &dl,
unsigned OffsetScaled = 0;
while (OffsetScaled < ObjSizeScaled) {
if (ObjSizeScaled - OffsetScaled >= 2) {
- SDValue AddrNode =
- DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(OffsetScaled * 16), dl);
+ SDValue AddrNode = DAG.getMemBasePlusOffset(
+ Ptr, TypeSize::getFixed(OffsetScaled * 16), dl);
SDValue St = DAG.getMemIntrinsicNode(
OpCode2, dl, DAG.getVTList(MVT::Other),
{Chain, TagSrc, AddrNode},
@@ -153,8 +153,8 @@ static SDValue EmitUnrolledSetTag(SelectionDAG &DAG, const SDLoc &dl,
}
if (ObjSizeScaled - OffsetScaled > 0) {
- SDValue AddrNode =
- DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(OffsetScaled * 16), dl);
+ SDValue AddrNode = DAG.getMemBasePlusOffset(
+ Ptr, TypeSize::getFixed(OffsetScaled * 16), dl);
SDValue St = DAG.getMemIntrinsicNode(
OpCode1, dl, DAG.getVTList(MVT::Other),
{Chain, TagSrc, AddrNode},
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 0eaa3e817c0b62d..b5b8b68291786dc 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -2037,20 +2037,21 @@ TypeSize
AArch64TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(64);
+ return TypeSize::getFixed(64);
case TargetTransformInfo::RGK_FixedWidthVector:
if (!ST->isNeonAvailable() && !EnableFixedwidthAutovecInStreamingMode)
- return TypeSize::Fixed(0);
+ return TypeSize::getFixed(0);
if (ST->hasSVE())
- return TypeSize::Fixed(std::max(ST->getMinSVEVectorSizeInBits(), 128u));
+ return TypeSize::getFixed(
+ std::max(ST->getMinSVEVectorSizeInBits(), 128u));
- return TypeSize::Fixed(ST->hasNEON() ? 128 : 0);
+ return TypeSize::getFixed(ST->hasNEON() ? 128 : 0);
case TargetTransformInfo::RGK_ScalableVector:
if (!ST->isSVEAvailable() && !EnableScalableAutovecInStreamingMode)
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
- return TypeSize::Scalable(ST->hasSVE() ? 128 : 0);
+ return TypeSize::getScalable(ST->hasSVE() ? 128 : 0);
}
llvm_unreachable("Unsupported register kind");
}
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 4ca5b3674461d89..323e5cf282744ac 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -342,14 +342,14 @@ AArch64RegisterBankInfo::getInstrAlternativeMappings(
/*ID*/ 3,
/*Cost*/
copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
- TypeSize::Fixed(Size)),
+ TypeSize::getFixed(Size)),
getCopyMapping(AArch64::FPRRegBankID, AArch64::GPRRegBankID, Size),
/*NumOperands*/ 2);
const InstructionMapping &FPRToGPRMapping = getInstructionMapping(
/*ID*/ 3,
/*Cost*/
copyCost(AArch64::GPRRegBank, AArch64::FPRRegBank,
- TypeSize::Fixed(Size)),
+ TypeSize::getFixed(Size)),
getCopyMapping(AArch64::GPRRegBankID, AArch64::FPRRegBankID, Size),
/*NumOperands*/ 2);
@@ -713,7 +713,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
assert(DstRB && SrcRB && "Both RegBank were nullptr");
unsigned Size = getSizeInBits(DstReg, MRI, TRI);
return getInstructionMapping(
- DefaultMappingID, copyCost(*DstRB, *SrcRB, TypeSize::Fixed(Size)),
+ DefaultMappingID, copyCost(*DstRB, *SrcRB, TypeSize::getFixed(Size)),
getCopyMapping(DstRB->getID(), SrcRB->getID(), Size),
// We only care about the mapping of the destination.
/*NumOperands*/ 1);
@@ -732,7 +732,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const RegisterBank &SrcRB =
SrcIsGPR ? AArch64::GPRRegBank : AArch64::FPRRegBank;
return getInstructionMapping(
- DefaultMappingID, copyCost(DstRB, SrcRB, TypeSize::Fixed(Size)),
+ DefaultMappingID, copyCost(DstRB, SrcRB, TypeSize::getFixed(Size)),
getCopyMapping(DstRB.getID(), SrcRB.getID(), Size),
// We only care about the mapping of the destination for COPY.
/*NumOperands*/ Opc == TargetOpcode::G_BITCAST ? 2 : 1);
@@ -825,7 +825,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
Cost = copyCost(
*AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[0]].RegBank,
*AArch64GenRegisterBankInfo::PartMappings[OpRegBankIdx[1]].RegBank,
- TypeSize::Fixed(OpSize[0]));
+ TypeSize::getFixed(OpSize[0]));
break;
case TargetOpcode::G_LOAD: {
// Loading in vector unit is slightly more expensive.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index c87851f10ffb3e5..b4e091fe0214238 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1721,7 +1721,7 @@ SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
Load->getChain(), BasePtr, SrcValue, LoMemVT,
BaseAlign, Load->getMemOperand()->getFlags());
- SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Size));
+ SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::getFixed(Size));
SDValue HiLoad =
DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 49322109bdb74f0..62996a3b3fb79fb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3542,7 +3542,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
if (MI.getOpcode() != AMDGPU::G_FREEZE &&
- cannotCopy(*DstBank, *SrcBank, TypeSize::Fixed(Size)))
+ cannotCopy(*DstBank, *SrcBank, TypeSize::getFixed(Size)))
return getInvalidInstructionMapping();
const ValueMapping &ValMap = getValueMapping(0, Size, *DstBank);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index fa302b68263f7f6..cb877a4695f1ece 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -321,11 +321,11 @@ TypeSize
GCNTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(32);
+ return TypeSize::getFixed(32);
case TargetTransformInfo::RGK_FixedWidthVector:
- return TypeSize::Fixed(ST->hasPackedFP32Ops() ? 64 : 32);
+ return TypeSize::getFixed(ST->hasPackedFP32Ops() ? 64 : 32);
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
}
llvm_unreachable("Unsupported register kind");
}
diff --git a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
index 6ca821174bd832d..1a1be4a442857be 100644
--- a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
@@ -38,7 +38,7 @@ unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
TypeSize
R600TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
- return TypeSize::Fixed(32);
+ return TypeSize::getFixed(32);
}
unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const { return 32; }
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 35e252fe8d675ea..6d00cea80d7b24c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1793,7 +1793,7 @@ SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT);
- return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Offset));
+ return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::getFixed(Offset));
}
SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
@@ -6242,7 +6242,7 @@ SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
SDValue Ptr =
- DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::Fixed(StructOffset));
+ DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::getFixed(StructOffset));
// TODO: Use custom target PseudoSourceValue.
// TODO: We should use the value from the IR intrinsic call, but it might not
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 9d92e5ab36622d9..308a9ea92fcd828 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -16644,7 +16644,7 @@ static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8;
SDValue NewPtr =
- DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset));
+ DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(NewOffset));
SDValue Extract =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0),
@@ -16693,7 +16693,7 @@ static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St,
unsigned NewOffset =
i * FromVT.getVectorNumElements() * ToVT.getScalarSizeInBits() / 8;
SDValue NewPtr =
- DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset));
+ DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(NewOffset));
SDValue Extract = Trunc.getOperand(i);
SDValue Store = DAG.getTruncStore(
@@ -17773,7 +17773,7 @@ static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) {
for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8;
SDValue NewPtr =
- DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset));
+ DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(NewOffset));
SDValue NewLoad =
DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset,
@@ -18610,7 +18610,7 @@ SDValue ARMTargetLowering::PerformMVETruncCombine(
if (!DCI.isAfterLegalizeDAG())
return SDValue();
- SDValue StackPtr = DAG.CreateStackTemporary(TypeSize::Fixed(16), Align(4));
+ SDValue StackPtr = DAG.CreateStackTemporary(TypeSize::getFixed(16), Align(4));
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
int NumIns = N->getNumOperands();
assert((NumIns == 2 || NumIns == 4) &&
@@ -18687,7 +18687,7 @@ static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N,
for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8;
SDValue NewPtr =
- DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::Fixed(NewOffset));
+ DAG.getObjectPtrOffset(DL, BasePtr, TypeSize::getFixed(NewOffset));
SDValue NewLoad =
DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset,
@@ -18778,7 +18778,7 @@ SDValue ARMTargetLowering::PerformMVEExtCombine(
// Lower to a stack store and reload:
// VSTRW.32 a, stack; VLDRH.32 stack; VLDRH.32 stack+8;
- SDValue StackPtr = DAG.CreateStackTemporary(TypeSize::Fixed(16), Align(4));
+ SDValue StackPtr = DAG.CreateStackTemporary(TypeSize::getFixed(16), Align(4));
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
int NumOuts = N->getNumValues();
assert((NumOuts == 2 || NumOuts == 4) &&
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index 71e8a24afc401ec..bb4b321b5300916 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -165,15 +165,15 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(32);
+ return TypeSize::getFixed(32);
case TargetTransformInfo::RGK_FixedWidthVector:
if (ST->hasNEON())
- return TypeSize::Fixed(128);
+ return TypeSize::getFixed(128);
if (ST->hasMVEIntegerOps())
- return TypeSize::Fixed(128);
- return TypeSize::Fixed(0);
+ return TypeSize::getFixed(128);
+ return TypeSize::getFixed(0);
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
}
llvm_unreachable("Unsupported register kind");
}
diff --git a/llvm/lib/Target/DirectX/CBufferDataLayout.cpp b/llvm/lib/Target/DirectX/CBufferDataLayout.cpp
index e539bfe7e6d5172..3ebd7a2b54d2663 100644
--- a/llvm/lib/Target/DirectX/CBufferDataLayout.cpp
+++ b/llvm/lib/Target/DirectX/CBufferDataLayout.cpp
@@ -76,12 +76,12 @@ TypeSize LegacyCBufferLayout::getTypeAllocSize(Type *Ty) {
} else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
unsigned NumElts = AT->getNumElements();
if (NumElts == 0)
- return TypeSize::Fixed(0);
+ return TypeSize::getFixed(0);
TypeSize EltSize = getTypeAllocSize(AT->getElementType());
TypeSize AlignedEltSize = alignTo4Dwords(EltSize);
// Each new element start 4 dwords aligned.
- return TypeSize::Fixed(AlignedEltSize * (NumElts - 1) + EltSize);
+ return TypeSize::getFixed(AlignedEltSize * (NumElts - 1) + EltSize);
} else {
// NOTE: Use type store size, not align to ABI on basic types for legacy
// layout.
@@ -95,7 +95,7 @@ LegacyCBufferLayout::getStructLayout(StructType *ST) {
if (it != StructLayouts.end())
return it->second;
- TypeSize Offset = TypeSize::Fixed(0);
+ TypeSize Offset = TypeSize::getFixed(0);
LegacyStructLayout Layout;
Layout.ST = ST;
for (Type *EltTy : ST->elements()) {
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index e950b44341c9225..a7d452e7227d79a 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -3214,9 +3214,9 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
DAG.getConstant(NeedAlign, dl, MVT::i32))
: BO.first;
SDValue Base0 =
- DAG.getMemBasePlusOffset(BaseNoOff, TypeSize::Fixed(BO.second), dl);
+ DAG.getMemBasePlusOffset(BaseNoOff, TypeSize::getFixed(BO.second), dl);
SDValue Base1 = DAG.getMemBasePlusOffset(
- BaseNoOff, TypeSize::Fixed(BO.second + LoadLen), dl);
+ BaseNoOff, TypeSize::getFixed(BO.second + LoadLen), dl);
MachineMemOperand *WideMMO = nullptr;
if (MachineMemOperand *MMO = LN->getMemOperand()) {
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
index bbb9136a4e4d2a5..db416a500f597b8 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLoweringHVX.cpp
@@ -2974,7 +2974,8 @@ HexagonTargetLowering::SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const {
MVT SingleTy = typeSplit(MemTy).first;
SDValue Chain = MemN->getChain();
SDValue Base0 = MemN->getBasePtr();
- SDValue Base1 = DAG.getMemBasePlusOffset(Base0, TypeSize::Fixed(HwLen), dl);
+ SDValue Base1 =
+ DAG.getMemBasePlusOffset(Base0, TypeSize::getFixed(HwLen), dl);
unsigned MemOpc = MemN->getOpcode();
MachineMemOperand *MOp0 = nullptr, *MOp1 = nullptr;
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
index 6d4463108c1c04c..cf4b66f8bf8619b 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
@@ -118,11 +118,11 @@ TypeSize
HexagonTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(32);
+ return TypeSize::getFixed(32);
case TargetTransformInfo::RGK_FixedWidthVector:
- return TypeSize::Fixed(getMinVectorRegisterBitWidth());
+ return TypeSize::getFixed(getMinVectorRegisterBitWidth());
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
}
llvm_unreachable("Unsupported register kind");
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index 7c90d66d6ae7d7d..3ce2675560c4df0 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -78,7 +78,7 @@ class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
// Only <2 x half> should be vectorized, so always return 32 for the vector
// register size.
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
- return TypeSize::Fixed(32);
+ return TypeSize::getFixed(32);
}
unsigned getMinVectorRegisterBitWidth() const { return 32; }
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index f221715835be2ab..ca65048e2355f39 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -7254,7 +7254,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
// be future work.
SDValue Store = DAG.getStore(
CopyFrom.getValue(1), dl, CopyFrom,
- DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
+ DAG.getObjectPtrOffset(dl, FIN, TypeSize::getFixed(Offset)),
MachinePointerInfo::getFixedStack(MF, FI, Offset));
MemOps.push_back(Store);
@@ -7434,12 +7434,12 @@ SDValue PPCTargetLowering::LowerCall_AIX(
}
auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
- return DAG.getExtLoad(
- ISD::ZEXTLOAD, dl, PtrVT, Chain,
- (LoadOffset != 0)
- ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
- : Arg,
- MachinePointerInfo(), VT);
+ return DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain,
+ (LoadOffset != 0)
+ ? DAG.getObjectPtrOffset(
+ dl, Arg, TypeSize::getFixed(LoadOffset))
+ : Arg,
+ MachinePointerInfo(), VT);
};
unsigned LoadOffset = 0;
@@ -7469,11 +7469,11 @@ SDValue PPCTargetLowering::LowerCall_AIX(
// Only memcpy the bytes that don't pass in register.
MemcpyFlags.setByValSize(ByValSize - LoadOffset);
Chain = CallSeqStart = createMemcpyOutsideCallSeq(
- (LoadOffset != 0)
- ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
- : Arg,
- DAG.getObjectPtrOffset(dl, StackPtr,
- TypeSize::Fixed(ByValVA.getLocMemOffset())),
+ (LoadOffset != 0) ? DAG.getObjectPtrOffset(
+ dl, Arg, TypeSize::getFixed(LoadOffset))
+ : Arg,
+ DAG.getObjectPtrOffset(
+ dl, StackPtr, TypeSize::getFixed(ByValVA.getLocMemOffset())),
CallSeqStart, MemcpyFlags, DAG, dl);
continue;
}
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 4a1247991e34298..aa385d7c3b202ad 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -489,11 +489,11 @@ TypeSize
PPCTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(ST->isPPC64() ? 64 : 32);
+ return TypeSize::getFixed(ST->isPPC64() ? 64 : 32);
case TargetTransformInfo::RGK_FixedWidthVector:
- return TypeSize::Fixed(ST->hasAltivec() ? 128 : 0);
+ return TypeSize::getFixed(ST->hasAltivec() ? 128 : 0);
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
}
llvm_unreachable("Unsupported register kind");
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index e1375f05cdecdc7..605a54071ea282e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -92,7 +92,7 @@ void RISCVDAGToDAGISel::PreprocessISelDAG() {
// Create temporary stack for each expanding node.
SDValue StackSlot =
- CurDAG->CreateStackTemporary(TypeSize::Fixed(8), Align(8));
+ CurDAG->CreateStackTemporary(TypeSize::getFixed(8), Align(8));
int FI = cast<FrameIndexSDNode>(StackSlot.getNode())->getIndex();
MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
@@ -100,7 +100,7 @@ void RISCVDAGToDAGISel::PreprocessISelDAG() {
Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
SDValue OffsetSlot =
- CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
+ CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::getFixed(4), DL);
Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
Align(8));
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 26190337eb3bd1b..c5bf383eeb94c1c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4624,8 +4624,8 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
auto *Ld = cast<LoadSDNode>(V);
Offset *= SVT.getStoreSize();
- SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
- TypeSize::Fixed(Offset), DL);
+ SDValue NewAddr = DAG.getMemBasePlusOffset(
+ Ld->getBasePtr(), TypeSize::getFixed(Offset), DL);
// If this is SEW=64 on RV32, use a strided load with a stride of x0.
if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 51a8b2384c0e17a..3a2f2f39cd1c9b0 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -209,15 +209,16 @@ RISCVTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
llvm::bit_floor(std::clamp<unsigned>(RVVRegisterWidthLMUL, 1, 8));
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(ST->getXLen());
+ return TypeSize::getFixed(ST->getXLen());
case TargetTransformInfo::RGK_FixedWidthVector:
- return TypeSize::Fixed(
+ return TypeSize::getFixed(
ST->useRVVForFixedLengthVectors() ? LMUL * ST->getRealMinVLen() : 0);
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable((ST->hasVInstructions() &&
- ST->getRealMinVLen() >= RISCV::RVVBitsPerBlock)
- ? LMUL * RISCV::RVVBitsPerBlock
- : 0);
+ return TypeSize::getScalable(
+ (ST->hasVInstructions() &&
+ ST->getRealMinVLen() >= RISCV::RVVBitsPerBlock)
+ ? LMUL * RISCV::RVVBitsPerBlock
+ : 0);
}
llvm_unreachable("Unsupported register kind");
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
index b49f223e6b68f4f..1f97e0f761c04de 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
@@ -366,11 +366,11 @@ TypeSize
SystemZTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(64);
+ return TypeSize::getFixed(64);
case TargetTransformInfo::RGK_FixedWidthVector:
- return TypeSize::Fixed(ST->hasVector() ? 128 : 0);
+ return TypeSize::getFixed(ST->hasVector() ? 128 : 0);
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
}
llvm_unreachable("Unsupported register kind");
diff --git a/llvm/lib/Target/VE/VETargetTransformInfo.h b/llvm/lib/Target/VE/VETargetTransformInfo.h
index 8c9ef850b9581a5..c688447088782a0 100644
--- a/llvm/lib/Target/VE/VETargetTransformInfo.h
+++ b/llvm/lib/Target/VE/VETargetTransformInfo.h
@@ -98,12 +98,12 @@ class VETTIImpl : public BasicTTIImplBase<VETTIImpl> {
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(64);
+ return TypeSize::getFixed(64);
case TargetTransformInfo::RGK_FixedWidthVector:
// TODO report vregs once vector isel is stable.
- return TypeSize::Fixed(0);
+ return TypeSize::getFixed(0);
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
}
llvm_unreachable("Unsupported register kind");
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
index 306db7cf7614fab..9a434d9b1db54a9 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
@@ -40,11 +40,11 @@ TypeSize WebAssemblyTTIImpl::getRegisterBitWidth(
TargetTransformInfo::RegisterKind K) const {
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(64);
+ return TypeSize::getFixed(64);
case TargetTransformInfo::RGK_FixedWidthVector:
- return TypeSize::Fixed(getST()->hasSIMD128() ? 128 : 64);
+ return TypeSize::getFixed(getST()->hasSIMD128() ? 128 : 64);
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
}
llvm_unreachable("Unsupported register kind");
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7a4fa16edb7de49..20c01f0f27a0412 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -6221,8 +6221,8 @@ static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
if (!Mem || !Mem->readMem() || !Mem->isSimple() || Mem->isNonTemporal())
return SDValue();
- SDValue Ptr =
- DAG.getMemBasePlusOffset(Mem->getBasePtr(), TypeSize::Fixed(Offset), DL);
+ SDValue Ptr = DAG.getMemBasePlusOffset(Mem->getBasePtr(),
+ TypeSize::getFixed(Offset), DL);
SDVTList Tys = DAG.getVTList(VT, MVT::Other);
SDValue Ops[] = {Mem->getChain(), Ptr};
SDValue BcstLd = DAG.getMemIntrinsicNode(
@@ -12420,7 +12420,7 @@ static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
unsigned Offset = BroadcastIdx * SVT.getStoreSize();
assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
SDValue NewAddr =
- DAG.getMemBasePlusOffset(BaseAddr, TypeSize::Fixed(Offset), DL);
+ DAG.getMemBasePlusOffset(BaseAddr, TypeSize::getFixed(Offset), DL);
// Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
// than MOVDDUP.
@@ -19673,7 +19673,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
if (SrcVT == MVT::i32) {
SDValue OffsetSlot =
- DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), dl);
+ DAG.getMemBasePlusOffset(StackSlot, TypeSize::getFixed(4), dl);
SDValue Store1 = DAG.getStore(Chain, dl, Src, StackSlot, MPI, SlotAlign);
SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
OffsetSlot, MPI.getWithOffset(4), SlotAlign);
@@ -24352,7 +24352,7 @@ static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
unsigned HalfOffset = Value0.getValueType().getStoreSize();
SDValue Ptr0 = Store->getBasePtr();
SDValue Ptr1 =
- DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(HalfOffset), DL);
+ DAG.getMemBasePlusOffset(Ptr0, TypeSize::getFixed(HalfOffset), DL);
SDValue Ch0 =
DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
Store->getOriginalAlign(),
@@ -24388,7 +24388,7 @@ static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
for (unsigned i = 0; i != NumElems; ++i) {
unsigned Offset = i * ScalarSize;
SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(),
- TypeSize::Fixed(Offset), DL);
+ TypeSize::getFixed(Offset), DL);
SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
DAG.getIntPtrConstant(i, DL));
SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
@@ -24784,7 +24784,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MemOps.push_back(Store);
// Store fp_offset
- FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::Fixed(4), DL);
+ FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::getFixed(4), DL);
Store = DAG.getStore(
Op.getOperand(0), DL,
DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
@@ -39948,8 +39948,8 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
LN->isSimple()) {
unsigned Offset = ShiftAmt / 8;
SDVTList Tys = DAG.getVTList(VT, MVT::Other);
- SDValue Ptr = DAG.getMemBasePlusOffset(LN->getBasePtr(),
- TypeSize::Fixed(Offset), DL);
+ SDValue Ptr = DAG.getMemBasePlusOffset(
+ LN->getBasePtr(), TypeSize::getFixed(Offset), DL);
SDValue Ops[] = { LN->getChain(), Ptr };
SDValue BcastLd = DAG.getMemIntrinsicNode(
X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
@@ -49841,7 +49841,7 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
unsigned HalfOffset = 16;
SDValue Ptr1 = Ld->getBasePtr();
SDValue Ptr2 =
- DAG.getMemBasePlusOffset(Ptr1, TypeSize::Fixed(HalfOffset), dl);
+ DAG.getMemBasePlusOffset(Ptr1, TypeSize::getFixed(HalfOffset), dl);
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
NumElems / 2);
SDValue Load1 =
@@ -50016,7 +50016,7 @@ static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
Addr = MaskedOp->getBasePtr();
if (TrueMaskElt != 0) {
Offset = TrueMaskElt * EltVT.getStoreSize();
- Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::Fixed(Offset),
+ Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::getFixed(Offset),
SDLoc(MaskedOp));
}
@@ -50310,7 +50310,7 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
Hi = combinevXi1ConstantToInteger(Hi, DAG);
SDValue Ptr0 = St->getBasePtr();
- SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(4), dl);
+ SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::getFixed(4), dl);
SDValue Ch0 =
DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index 3c91fc12bcdc9d1..8a04987e768a126 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -178,17 +178,17 @@ X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
unsigned PreferVectorWidth = ST->getPreferVectorWidth();
switch (K) {
case TargetTransformInfo::RGK_Scalar:
- return TypeSize::Fixed(ST->is64Bit() ? 64 : 32);
+ return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
case TargetTransformInfo::RGK_FixedWidthVector:
if (ST->hasAVX512() && ST->hasEVEX512() && PreferVectorWidth >= 512)
- return TypeSize::Fixed(512);
+ return TypeSize::getFixed(512);
if (ST->hasAVX() && PreferVectorWidth >= 256)
- return TypeSize::Fixed(256);
+ return TypeSize::getFixed(256);
if (ST->hasSSE1() && PreferVectorWidth >= 128)
- return TypeSize::Fixed(128);
- return TypeSize::Fixed(0);
+ return TypeSize::getFixed(128);
+ return TypeSize::getFixed(0);
case TargetTransformInfo::RGK_ScalableVector:
- return TypeSize::Scalable(0);
+ return TypeSize::getScalable(0);
}
llvm_unreachable("Unsupported register kind");
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index fdc39c7fa478a64..fef1a698e146390 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -1075,7 +1075,7 @@ static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
RetType = CharSizeType;
else {
if (Size % 8 != 0)
- Size = TypeSize::Fixed(Size + 8 - (Size % 8));
+ Size = TypeSize::getFixed(Size + 8 - (Size % 8));
RetType = Builder.createArrayType(
Size, Layout.getPrefTypeAlign(Ty).value(), CharSizeType,
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index a7b2c95e5b2c8bc..dd0a290252dae33 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -212,7 +212,7 @@ static std::optional<TypeSize> getPointerSize(const Value *V,
Opts.NullIsUnknownSize = NullPointerIsDefined(F);
if (getObjectSize(V, Size, DL, &TLI, Opts))
- return TypeSize::Fixed(Size);
+ return TypeSize::getFixed(Size);
return std::nullopt;
}
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 7789458364caeed..0e55249d63a8294 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1725,7 +1725,7 @@ bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
if (auto *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
if (auto *C = dyn_cast<CallInst>(MI)) {
if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
- TypeSize::Fixed(CopySize->getZExtValue()),
+ TypeSize::getFixed(CopySize->getZExtValue()),
M->getDestAlign().valueOrOne(), BAA,
[C]() -> CallInst * { return C; })) {
LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"
@@ -1771,7 +1771,7 @@ bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
if (Len == nullptr)
return false;
if (performStackMoveOptzn(M, M, DestAlloca, SrcAlloca,
- TypeSize::Fixed(Len->getZExtValue()), BAA)) {
+ TypeSize::getFixed(Len->getZExtValue()), BAA)) {
// Avoid invalidating the iterator.
BBI = M->getNextNonDebugInstruction()->getIterator();
eraseInstruction(M);
@@ -1834,7 +1834,7 @@ bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
// The length of the memcpy must be larger or equal to the size of the byval.
auto *C1 = dyn_cast<ConstantInt>(MDep->getLength());
if (!C1 || !TypeSize::isKnownGE(
- TypeSize::Fixed(C1->getValue().getZExtValue()), ByValSize))
+ TypeSize::getFixed(C1->getValue().getZExtValue()), ByValSize))
return false;
// Get the alignment of the byval. If the call doesn't specify the alignment,
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 0b392e77abaa1ea..8c6c112ebacff2d 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1597,7 +1597,7 @@ static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
const DataLayout &DL = DII->getModule()->getDataLayout();
TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
if (std::optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits())
- return TypeSize::isKnownGE(ValueSize, TypeSize::Fixed(*FragmentSize));
+ return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
// We can't always calculate the size of the DI variable (e.g. if it is a
// VLA). Try to use the size of the alloca that the dbg intrinsic describes
@@ -1623,7 +1623,7 @@ static bool valueCoversEntireFragment(Type *ValTy, DPValue *DPV) {
const DataLayout &DL = DPV->getModule()->getDataLayout();
TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
if (std::optional<uint64_t> FragmentSize = DPV->getFragmentSizeInBits())
- return TypeSize::isKnownGE(ValueSize, TypeSize::Fixed(*FragmentSize));
+ return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
// We can't always calculate the size of the DI variable (e.g. if it is a
// VLA). Try to use the size of the alloca that the dbg intrinsic describes
diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
index 8c909c095cd11ad..d13cfeeffe56a0d 100644
--- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
+++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
@@ -84,7 +84,7 @@ TEST(LowLevelTypeTest, Vector) {
if (!EC.isScalable())
EXPECT_EQ(S * EC.getFixedValue(), VTy.getSizeInBits());
else
- EXPECT_EQ(TypeSize::Scalable(S * EC.getKnownMinValue()),
+ EXPECT_EQ(TypeSize::getScalable(S * EC.getKnownMinValue()),
VTy.getSizeInBits());
// Test equality operators.
@@ -382,8 +382,8 @@ static_assert(CEV2P1.isVector());
static_assert(CEV2P1.getElementCount() == ElementCount::getFixed(2));
static_assert(CEV2P1.getElementCount() != ElementCount::getFixed(1));
static_assert(CEV2S32.getElementCount() == ElementCount::getFixed(2));
-static_assert(CEV2S32.getSizeInBits() == TypeSize::Fixed(64));
-static_assert(CEV2P1.getSizeInBits() == TypeSize::Fixed(128));
+static_assert(CEV2S32.getSizeInBits() == TypeSize::getFixed(64));
+static_assert(CEV2P1.getSizeInBits() == TypeSize::getFixed(128));
static_assert(CEV2P1.getScalarType() == LLT::pointer(1, 64));
static_assert(CES32.getScalarType() == CES32);
static_assert(CEV2S32.getScalarType() == CES32);
diff --git a/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp b/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
index d95fc2fb4f1d457..7426884217a08e3 100644
--- a/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
+++ b/llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
@@ -105,7 +105,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObject) {
SDValue FIPtr = DAG->CreateStackTemporary(VecVT);
int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FI);
- TypeSize Offset = TypeSize::Fixed(0);
+ TypeSize Offset = TypeSize::getFixed(0);
SDValue Value = DAG->getConstant(0, Loc, VecVT);
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
@@ -128,7 +128,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObjectUnknownSize) {
SDValue FIPtr = DAG->CreateStackTemporary(VecVT);
int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FI);
- TypeSize Offset = TypeSize::Fixed(0);
+ TypeSize Offset = TypeSize::getFixed(0);
SDValue Value = DAG->getConstant(0, Loc, VecVT);
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
@@ -157,7 +157,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, noAliasingFrameObjects) {
int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FI);
SDValue Value = DAG->getConstant(0, Loc, SubVecVT);
- TypeSize Offset0 = TypeSize::Fixed(0);
+ TypeSize Offset0 = TypeSize::getFixed(0);
TypeSize Offset1 = SubVecVT.getStoreSize();
SDValue Index0 = DAG->getMemBasePlusOffset(FIPtr, Offset0, Loc);
SDValue Index1 = DAG->getMemBasePlusOffset(FIPtr, Offset1, Loc);
@@ -216,7 +216,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithFrameObject) {
int FI = cast<FrameIndexSDNode>(FIPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FI);
SDValue Value = DAG->getConstant(0, Loc, VecVT);
- TypeSize Offset = TypeSize::Fixed(0);
+ TypeSize Offset = TypeSize::getFixed(0);
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
PtrInfo.getWithOffset(Offset));
@@ -282,7 +282,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, fixedSizeFrameObjectsWithinDiff) {
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FI);
SDValue Value0 = DAG->getConstant(0, Loc, SubFixedVecVT2xi8);
SDValue Value1 = DAG->getConstant(0, Loc, SubVecVT);
- TypeSize Offset0 = TypeSize::Fixed(0);
+ TypeSize Offset0 = TypeSize::getFixed(0);
TypeSize Offset1 = SubFixedVecVT2xi8.getStoreSize();
SDValue Index0 = DAG->getMemBasePlusOffset(FIPtr, Offset0, Loc);
SDValue Index1 = DAG->getMemBasePlusOffset(FIPtr, Offset1, Loc);
@@ -323,7 +323,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, fixedSizeFrameObjectsOutOfDiff) {
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FI);
SDValue Value0 = DAG->getConstant(0, Loc, SubFixedVecVT4xi8);
SDValue Value1 = DAG->getConstant(0, Loc, SubVecVT);
- TypeSize Offset0 = TypeSize::Fixed(0);
+ TypeSize Offset0 = TypeSize::getFixed(0);
TypeSize Offset1 = SubFixedVecVT2xi8.getStoreSize();
SDValue Index0 = DAG->getMemBasePlusOffset(FIPtr, Offset0, Loc);
SDValue Index1 = DAG->getMemBasePlusOffset(FIPtr, Offset1, Loc);
@@ -358,7 +358,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, twoFixedStackObjects) {
MachinePointerInfo PtrInfo1 = MachinePointerInfo::getFixedStack(*MF, FI1);
SDValue Value0 = DAG->getConstant(0, Loc, FixedVecVT);
SDValue Value1 = DAG->getConstant(0, Loc, VecVT);
- TypeSize Offset0 = TypeSize::Fixed(0);
+ TypeSize Offset0 = TypeSize::getFixed(0);
SDValue Index0 = DAG->getMemBasePlusOffset(FIPtr0, Offset0, Loc);
SDValue Index1 = DAG->getMemBasePlusOffset(FIPtr1, Offset0, Loc);
SDValue Store0 = DAG->getStore(DAG->getEntryNode(), Loc, Value0, Index0,
diff --git a/llvm/unittests/IR/InstructionsTest.cpp b/llvm/unittests/IR/InstructionsTest.cpp
index 803bc56bd6d530f..20b8529b386324f 100644
--- a/llvm/unittests/IR/InstructionsTest.cpp
+++ b/llvm/unittests/IR/InstructionsTest.cpp
@@ -1746,14 +1746,14 @@ TEST(InstructionsTest, AllocaInst) {
AllocaInst &F = cast<AllocaInst>(*It++);
AllocaInst &G = cast<AllocaInst>(*It++);
AllocaInst &H = cast<AllocaInst>(*It++);
- EXPECT_EQ(A.getAllocationSizeInBits(DL), TypeSize::Fixed(32));
- EXPECT_EQ(B.getAllocationSizeInBits(DL), TypeSize::Fixed(128));
+ EXPECT_EQ(A.getAllocationSizeInBits(DL), TypeSize::getFixed(32));
+ EXPECT_EQ(B.getAllocationSizeInBits(DL), TypeSize::getFixed(128));
EXPECT_FALSE(C.getAllocationSizeInBits(DL));
- EXPECT_EQ(D.getAllocationSizeInBits(DL), TypeSize::Fixed(512));
- EXPECT_EQ(E.getAllocationSizeInBits(DL), TypeSize::Scalable(512));
- EXPECT_EQ(F.getAllocationSizeInBits(DL), TypeSize::Fixed(32));
- EXPECT_EQ(G.getAllocationSizeInBits(DL), TypeSize::Fixed(768));
- EXPECT_EQ(H.getAllocationSizeInBits(DL), TypeSize::Fixed(160));
+ EXPECT_EQ(D.getAllocationSizeInBits(DL), TypeSize::getFixed(512));
+ EXPECT_EQ(E.getAllocationSizeInBits(DL), TypeSize::getScalable(512));
+ EXPECT_EQ(F.getAllocationSizeInBits(DL), TypeSize::getFixed(32));
+ EXPECT_EQ(G.getAllocationSizeInBits(DL), TypeSize::getFixed(768));
+ EXPECT_EQ(H.getAllocationSizeInBits(DL), TypeSize::getFixed(160));
}
TEST(InstructionsTest, InsertAtBegin) {
diff --git a/llvm/unittests/Support/TypeSizeTest.cpp b/llvm/unittests/Support/TypeSizeTest.cpp
index 2850705d39f69f5..33169a3d8b198c0 100644
--- a/llvm/unittests/Support/TypeSizeTest.cpp
+++ b/llvm/unittests/Support/TypeSizeTest.cpp
@@ -59,26 +59,31 @@ static_assert(ElementCount::getFixed(8).multiplyCoefficientBy(3) ==
ElementCount::getFixed(24));
static_assert(ElementCount::getFixed(8).isKnownMultipleOf(2));
-constexpr TypeSize TSFixed0 = TypeSize::Fixed(0);
-constexpr TypeSize TSFixed1 = TypeSize::Fixed(1);
-constexpr TypeSize TSFixed32 = TypeSize::Fixed(32);
+constexpr TypeSize TSFixed0 = TypeSize::getFixed(0);
+constexpr TypeSize TSFixed1 = TypeSize::getFixed(1);
+constexpr TypeSize TSFixed32 = TypeSize::getFixed(32);
static_assert(TSFixed0.getFixedValue() == 0);
static_assert(TSFixed1.getFixedValue() == 1);
static_assert(TSFixed32.getFixedValue() == 32);
static_assert(TSFixed32.getKnownMinValue() == 32);
-static_assert(TypeSize::Scalable(32).getKnownMinValue() == 32);
+static_assert(TypeSize::getScalable(32).getKnownMinValue() == 32);
-static_assert(TSFixed32 * 2 == TypeSize::Fixed(64));
-static_assert(TSFixed32 * 2u == TypeSize::Fixed(64));
-static_assert(TSFixed32 * INT64_C(2) == TypeSize::Fixed(64));
-static_assert(TSFixed32 * UINT64_C(2) == TypeSize::Fixed(64));
+static_assert(TSFixed32 * 2 == TypeSize::getFixed(64));
+static_assert(TSFixed32 * 2u == TypeSize::getFixed(64));
+static_assert(TSFixed32 * INT64_C(2) == TypeSize::getFixed(64));
+static_assert(TSFixed32 * UINT64_C(2) == TypeSize::getFixed(64));
-static_assert(2 * TSFixed32 == TypeSize::Fixed(64));
-static_assert(2u * TSFixed32 == TypeSize::Fixed(64));
-static_assert(INT64_C(2) * TSFixed32 == TypeSize::Fixed(64));
-static_assert(UINT64_C(2) * TSFixed32 == TypeSize::Fixed(64));
-static_assert(alignTo(TypeSize::Fixed(7), 8) == TypeSize::Fixed(8));
+static_assert(2 * TSFixed32 == TypeSize::getFixed(64));
+static_assert(2u * TSFixed32 == TypeSize::getFixed(64));
+static_assert(INT64_C(2) * TSFixed32 == TypeSize::getFixed(64));
+static_assert(UINT64_C(2) * TSFixed32 == TypeSize::getFixed(64));
+static_assert(alignTo(TypeSize::getFixed(7), 8) == TypeSize::getFixed(8));
+
+TEST(TypeSize, FailIncompatibleTypes) {
+ EXPECT_DEBUG_DEATH(TypeSize::getFixed(8) + TypeSize::getScalable(8),
+ "Incompatible types");
+}
} // namespace
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
index 8841aa8362569a0..0c7f62104e44cbe 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
@@ -963,16 +963,16 @@ llvm::TypeSize mlir::LLVM::getPrimitiveTypeSizeInBits(Type type) {
return llvm::TypeSwitch<Type, llvm::TypeSize>(type)
.Case<BFloat16Type, Float16Type>(
- [](Type) { return llvm::TypeSize::Fixed(16); })
- .Case<Float32Type>([](Type) { return llvm::TypeSize::Fixed(32); })
+ [](Type) { return llvm::TypeSize::getFixed(16); })
+ .Case<Float32Type>([](Type) { return llvm::TypeSize::getFixed(32); })
.Case<Float64Type, LLVMX86MMXType>(
- [](Type) { return llvm::TypeSize::Fixed(64); })
- .Case<Float80Type>([](Type) { return llvm::TypeSize::Fixed(80); })
- .Case<Float128Type>([](Type) { return llvm::TypeSize::Fixed(128); })
+ [](Type) { return llvm::TypeSize::getFixed(64); })
+ .Case<Float80Type>([](Type) { return llvm::TypeSize::getFixed(80); })
+ .Case<Float128Type>([](Type) { return llvm::TypeSize::getFixed(128); })
.Case<IntegerType>([](IntegerType intTy) {
- return llvm::TypeSize::Fixed(intTy.getWidth());
+ return llvm::TypeSize::getFixed(intTy.getWidth());
})
- .Case<LLVMPPCFP128Type>([](Type) { return llvm::TypeSize::Fixed(128); })
+ .Case<LLVMPPCFP128Type>([](Type) { return llvm::TypeSize::getFixed(128); })
.Case<LLVMFixedVectorType>([](LLVMFixedVectorType t) {
llvm::TypeSize elementSize =
getPrimitiveTypeSizeInBits(t.getElementType());
@@ -993,7 +993,7 @@ llvm::TypeSize mlir::LLVM::getPrimitiveTypeSizeInBits(Type type) {
LLVMPointerType, LLVMFunctionType, LLVMTargetExtType>(
ty)) &&
"unexpected missing support for primitive type");
- return llvm::TypeSize::Fixed(0);
+ return llvm::TypeSize::getFixed(0);
});
}
>From 500c40ed28c2c07fb715e0bb83244d8cf4b660af Mon Sep 17 00:00:00 2001
From: Sander de Smalen <sander.desmalen at arm.com>
Date: Tue, 21 Nov 2023 15:24:52 +0000
Subject: [PATCH 2/3] Fix formatting of MLIR file
---
mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
index 0c7f62104e44cbe..d7fe720d1bda63d 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
@@ -972,7 +972,8 @@ llvm::TypeSize mlir::LLVM::getPrimitiveTypeSizeInBits(Type type) {
.Case<IntegerType>([](IntegerType intTy) {
return llvm::TypeSize::getFixed(intTy.getWidth());
})
- .Case<LLVMPPCFP128Type>([](Type) { return llvm::TypeSize::getFixed(128); })
+ .Case<LLVMPPCFP128Type>(
+ [](Type) { return llvm::TypeSize::getFixed(128); })
.Case<LLVMFixedVectorType>([](LLVMFixedVectorType t) {
llvm::TypeSize elementSize =
getPrimitiveTypeSizeInBits(t.getElementType());
>From af75da057f55590542ef0a6833b684e0c0f0497b Mon Sep 17 00:00:00 2001
From: Sander de Smalen <sander.desmalen at arm.com>
Date: Tue, 21 Nov 2023 13:51:09 +0000
Subject: [PATCH 3/3] [llvm][TypeSize] Consider TypeSize of '0' to be
fixed/scalable-agnostic.
This patch allows adding any quantity to a zero-initialized TypeSize, such
that e.g.:
TypeSize::Scalable(0) + TypeSize::Fixed(4) == TypeSize::Fixed(4)
TypeSize::Fixed(0) + TypeSize::Scalable(4) == TypeSize::Scalable(4)
This makes it easier to implement add-reductions using TypeSize where
the 'scalable' flag is not yet known before starting the reduction.
---
llvm/include/llvm/Support/TypeSize.h | 20 +++++++++++++++++---
llvm/unittests/Support/TypeSizeTest.cpp | 15 +++++++++++++++
2 files changed, 32 insertions(+), 3 deletions(-)
diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h
index ada98d809fc236f..988c62dae66dfb7 100644
--- a/llvm/include/llvm/Support/TypeSize.h
+++ b/llvm/include/llvm/Support/TypeSize.h
@@ -97,17 +97,29 @@ template <typename LeafTy, typename ValueTy> class FixedOrScalableQuantity {
constexpr FixedOrScalableQuantity() = default;
constexpr FixedOrScalableQuantity(ScalarTy Quantity, bool Scalable)
- : Quantity(Quantity), Scalable(Scalable) {}
+ : Quantity(Quantity), Scalable(Quantity ? Scalable : false) {}
friend constexpr LeafTy &operator+=(LeafTy &LHS, const LeafTy &RHS) {
- assert(LHS.Scalable == RHS.Scalable && "Incompatible types");
+ assert((LHS.Quantity == 0 || RHS.Quantity == 0 ||
+ LHS.Scalable == RHS.Scalable) &&
+ "Incompatible types");
LHS.Quantity += RHS.Quantity;
+ if (!LHS.Quantity)
+ LHS.Scalable = false;
+ else if (RHS.Quantity)
+ LHS.Scalable = RHS.Scalable;
return LHS;
}
friend constexpr LeafTy &operator-=(LeafTy &LHS, const LeafTy &RHS) {
- assert(LHS.Scalable == RHS.Scalable && "Incompatible types");
+ assert((LHS.Quantity == 0 || RHS.Quantity == 0 ||
+ LHS.Scalable == RHS.Scalable) &&
+ "Incompatible types");
LHS.Quantity -= RHS.Quantity;
+ if (!LHS.Quantity)
+ LHS.Scalable = false;
+ else if (RHS.Quantity)
+ LHS.Scalable = RHS.Scalable;
return LHS;
}
@@ -315,6 +327,8 @@ class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
: FixedOrScalableQuantity(V) {}
public:
+ constexpr TypeSize() : FixedOrScalableQuantity(0, false) {}
+
constexpr TypeSize(ScalarTy Quantity, bool Scalable)
: FixedOrScalableQuantity(Quantity, Scalable) {}
diff --git a/llvm/unittests/Support/TypeSizeTest.cpp b/llvm/unittests/Support/TypeSizeTest.cpp
index 33169a3d8b198c0..f1cdd8e22779e8c 100644
--- a/llvm/unittests/Support/TypeSizeTest.cpp
+++ b/llvm/unittests/Support/TypeSizeTest.cpp
@@ -81,6 +81,21 @@ static_assert(INT64_C(2) * TSFixed32 == TypeSize::getFixed(64));
static_assert(UINT64_C(2) * TSFixed32 == TypeSize::getFixed(64));
static_assert(alignTo(TypeSize::getFixed(7), 8) == TypeSize::getFixed(8));
+static_assert(TypeSize() == TypeSize::getFixed(0));
+static_assert(TypeSize() == TypeSize::getScalable(0));
+static_assert(TypeSize::getFixed(0) == TypeSize::getScalable(0));
+static_assert(TypeSize::getFixed(0) ==
+ (TypeSize::getScalable(4) - TypeSize::getScalable(4)));
+static_assert(!TypeSize().isScalable());
+static_assert(TypeSize::getFixed(0) + TypeSize::getScalable(8) ==
+ TypeSize::getScalable(8));
+static_assert(TypeSize::getScalable(8) + TypeSize::getFixed(0) ==
+ TypeSize::getScalable(8));
+static_assert(TypeSize::getFixed(8) + TypeSize::getScalable(0) ==
+ TypeSize::getFixed(8));
+static_assert(TypeSize::getScalable(0) + TypeSize::getFixed(8) ==
+ TypeSize::getFixed(8));
+
TEST(TypeSize, FailIncompatibleTypes) {
EXPECT_DEBUG_DEATH(TypeSize::getFixed(8) + TypeSize::getScalable(8),
"Incompatible types");
More information about the llvm-commits
mailing list