[llvm] 3334c35 - [TTI] Fix discrepancies in prototypes between interface and implementations (NFCI) (#136655)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 22 01:40:16 PDT 2025
Author: Sergei Barannikov
Date: 2025-04-22T11:40:12+03:00
New Revision: 3334c3597dd51f5a102e5005738e3bf4ef7530e2
URL: https://github.com/llvm/llvm-project/commit/3334c3597dd51f5a102e5005738e3bf4ef7530e2
DIFF: https://github.com/llvm/llvm-project/commit/3334c3597dd51f5a102e5005738e3bf4ef7530e2.diff
LOG: [TTI] Fix discrepancies in prototypes between interface and implementations (NFCI) (#136655)
These are not diagnosed because implementations hide the methods of the base class rather than overriding them.
This works as long as a hiding function is callable with the same arguments as the same function from the base class.
Pull Request: https://github.com/llvm/llvm-project/pull/136655
Added:
Modified:
llvm/include/llvm/Analysis/TargetTransformInfo.h
llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
llvm/include/llvm/CodeGen/BasicTTIImpl.h
llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h
llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
llvm/lib/Target/ARM/ARMTargetTransformInfo.h
llvm/lib/Target/BPF/BPFTargetTransformInfo.h
llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
llvm/lib/Target/VE/VETargetTransformInfo.h
llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
llvm/lib/Target/X86/X86TargetTransformInfo.cpp
llvm/lib/Target/X86/X86TargetTransformInfo.h
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index b5d766c34d09d..918a02fb48e70 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -3101,10 +3101,10 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
return Impl.areTypesABICompatible(Caller, Callee, Types);
}
bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
- return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
+ return Impl.isIndexedLoadLegal(Mode, Ty);
}
bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override {
- return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout());
+ return Impl.isIndexedStoreLegal(Mode, Ty);
}
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index d28803cf164a3..b46eb349c2249 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -471,7 +471,9 @@ class TargetTransformInfoImplBase {
bool haveFastSqrt(Type *Ty) const { return false; }
- bool isExpensiveToSpeculativelyExecute(const Instruction *I) { return true; }
+ bool isExpensiveToSpeculativelyExecute(const Instruction *I) const {
+ return true;
+ }
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return true; }
@@ -745,9 +747,10 @@ class TargetTransformInfoImplBase {
return 1;
}
- unsigned getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
- const APInt &DemandedDstElts,
- TTI::TargetCostKind CostKind) const {
+ InstructionCost
+ getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
+ const APInt &DemandedDstElts,
+ TTI::TargetCostKind CostKind) const {
return 1;
}
@@ -805,7 +808,7 @@ class TargetTransformInfoImplBase {
return InstructionCost::getInvalid();
}
- unsigned getInterleavedMemoryOpCost(
+ InstructionCost getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
bool UseMaskForCond, bool UseMaskForGaps) const {
@@ -965,13 +968,11 @@ class TargetTransformInfoImplBase {
Callee->getFnAttribute("target-features"));
}
- bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty,
- const DataLayout &DL) const {
+ bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty) const {
return false;
}
- bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty,
- const DataLayout &DL) const {
+ bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty) const {
return false;
}
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index ca32d36297beb..db5fb2f7f1a54 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -475,19 +475,17 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
return VF;
}
- bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
- const DataLayout &DL) const {
+ bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const {
EVT VT = getTLI()->getValueType(DL, Ty);
return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
}
- bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
- const DataLayout &DL) const {
+ bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const {
EVT VT = getTLI()->getValueType(DL, Ty);
return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
}
- bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) const {
+ bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const {
return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
}
@@ -1468,7 +1466,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
}
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const {
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 720daa384968c..594f1bff5c458 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -4367,7 +4367,7 @@ bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
}
InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
- MaybeAlign Alignment,
+ Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
@@ -4402,7 +4402,7 @@ InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
return 1;
if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
- LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
+ LT.second.is128BitVector() && Alignment < Align(16)) {
// Unaligned stores are extremely inefficient. We don't split all
// unaligned 128-bit stores because the negative impact that has shown in
// practice on inlined block copy code.
@@ -4429,8 +4429,7 @@ InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
EVT EltVT = VT.getVectorElementType();
unsigned EltSize = EltVT.getScalarSizeInBits();
if (!isPowerOf2_32(EltSize) || EltSize < 8 || EltSize > 64 ||
- VT.getVectorNumElements() >= (128 / EltSize) || !Alignment ||
- *Alignment != Align(1))
+ VT.getVectorNumElements() >= (128 / EltSize) || Alignment != Align(1))
return LT.first;
// FIXME: v3i8 lowering currently is very inefficient, due to automatic
// widening to v4i8, which produces suboptimal results.
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 44b076b9a1c75..7da2820bee323 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -249,7 +249,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
bool useNeonVector(const Type *Ty) const;
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 919e73a2c534d..cc2ca77ffb792 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -274,7 +274,7 @@ void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
BaseT::getPeelingPreferences(L, SE, PP);
}
-int64_t AMDGPUTTIImpl::getMaxMemIntrinsicInlineSizeThreshold() const {
+uint64_t AMDGPUTTIImpl::getMaxMemIntrinsicInlineSizeThreshold() const {
return 1024;
}
@@ -412,7 +412,7 @@ bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
}
-int64_t GCNTTIImpl::getMaxMemIntrinsicInlineSizeThreshold() const {
+uint64_t GCNTTIImpl::getMaxMemIntrinsicInlineSizeThreshold() const {
return 1024;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index 1a575c92cce61..57682ff8ed76f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -57,7 +57,7 @@ class AMDGPUTTIImpl final : public BasicTTIImplBase<AMDGPUTTIImpl> {
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP) const;
- int64_t getMaxMemIntrinsicInlineSizeThreshold() const;
+ uint64_t getMaxMemIntrinsicInlineSizeThreshold() const;
};
class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
@@ -137,7 +137,7 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
unsigned AddrSpace) const;
- int64_t getMaxMemIntrinsicInlineSizeThreshold() const;
+ uint64_t getMaxMemIntrinsicInlineSizeThreshold() const;
Type *
getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
unsigned SrcAddrSpace, unsigned DestAddrSpace,
diff --git a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
index 6cc2611a8d1a8..bbfd189d08a76 100644
--- a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.cpp
@@ -32,7 +32,8 @@ unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
}
-unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
+unsigned R600TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
+ bool Vec = ClassID == 1;
return getHardwareNumberOfRegisters(Vec);
}
diff --git a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h
index 527b225c4eed3..163d01516efa7 100644
--- a/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/R600TargetTransformInfo.h
@@ -47,7 +47,7 @@ class R600TTIImpl final : public BasicTTIImplBase<R600TTIImpl> {
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP) const;
unsigned getHardwareNumberOfRegisters(bool Vec) const;
- unsigned getNumberOfRegisters(bool Vec) const;
+ unsigned getNumberOfRegisters(unsigned ClassID) const;
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const;
unsigned getMinVectorRegisterBitWidth() const;
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 89740a3d7f477..76eb01033c3cb 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1545,7 +1545,7 @@ InstructionCost ARMTTIImpl::getArithmeticInstrCost(
}
InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
- MaybeAlign Alignment,
+ Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
@@ -1559,8 +1559,7 @@ InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
CostKind);
- if (ST->hasNEON() && Src->isVectorTy() &&
- (Alignment && *Alignment != Align(16)) &&
+ if (ST->hasNEON() && Src->isVectorTy() && Alignment != Align(16) &&
cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
// Unaligned loads/stores are extremely inefficient.
// We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index b594f07e6a5a9..2a3ad431cc169 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -263,7 +263,7 @@ class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
const Instruction *CxtI = nullptr) const;
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;
diff --git a/llvm/lib/Target/BPF/BPFTargetTransformInfo.h b/llvm/lib/Target/BPF/BPFTargetTransformInfo.h
index 05a53c8bab824..9667f7e8f9e64 100644
--- a/llvm/lib/Target/BPF/BPFTargetTransformInfo.h
+++ b/llvm/lib/Target/BPF/BPFTargetTransformInfo.h
@@ -37,8 +37,8 @@ class BPFTTIImpl : public BasicTTIImplBase<BPFTTIImpl> {
: BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
TLI(ST->getTargetLowering()) {}
- int getIntImmCost(const APInt &Imm, Type *Ty,
- TTI::TargetCostKind CostKind) const {
+ InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
+ TTI::TargetCostKind CostKind) const {
if (Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
return TTI::TCC_Free;
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
index 7825024f9ac0f..d6c1750e862c2 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
@@ -104,7 +104,8 @@ HexagonTTIImpl::getPreferredAddressingMode(const Loop *L,
/// --- Vector TTI begin ---
-unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
+unsigned HexagonTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
+ bool Vector = ClassID == 1;
if (Vector)
return useHVX() ? 32 : 0;
return 32;
@@ -162,7 +163,7 @@ InstructionCost HexagonTTIImpl::getAddressComputationCost(Type *Tp,
}
InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
- MaybeAlign Alignment,
+ Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
@@ -189,10 +190,9 @@ InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
return VecWidth / RegWidth;
// Cost of constructing HVX vector from scalar loads
const Align RegAlign(RegWidth / 8);
- if (!Alignment || *Alignment > RegAlign)
+ if (Alignment > RegAlign)
Alignment = RegAlign;
- assert(Alignment);
- unsigned AlignWidth = 8 * Alignment->value();
+ unsigned AlignWidth = 8 * Alignment.value();
unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
return 3 * NumLoads;
}
@@ -203,7 +203,7 @@ InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
// At this point unspecified alignment is considered as Align(1).
- const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
+ const Align BoundAlignment = std::min(Alignment, Align(8));
unsigned AlignWidth = 8 * BoundAlignment.value();
unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
if (Alignment == Align(4) || Alignment == Align(8))
@@ -226,12 +226,10 @@ HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
CostKind);
}
-InstructionCost HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
- ArrayRef<int> Mask,
- TTI::TargetCostKind CostKind,
- int Index, Type *SubTp,
- ArrayRef<const Value *> Args,
- const Instruction *CxtI) const {
+InstructionCost HexagonTTIImpl::getShuffleCost(
+ TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask,
+ TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
+ ArrayRef<const Value *> Args, const Instruction *CxtI) const {
return 1;
}
@@ -251,8 +249,7 @@ InstructionCost HexagonTTIImpl::getInterleavedMemoryOpCost(
Alignment, AddressSpace,
CostKind,
UseMaskForCond, UseMaskForGaps);
- return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
- CostKind);
+ return getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
}
InstructionCost HexagonTTIImpl::getCmpSelInstrCost(
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
index 9d0c3843e2a7a..b9dc41b6f4fe1 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
@@ -81,7 +81,7 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
/// \name Vector TTI Implementations
/// @{
- unsigned getNumberOfRegisters(bool vector) const;
+ unsigned getNumberOfRegisters(unsigned ClassID) const;
unsigned getMaxInterleaveFactor(ElementCount VF) const;
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
unsigned getMinVectorRegisterBitWidth() const;
@@ -107,17 +107,18 @@ class HexagonTTIImpl : public BasicTTIImplBase<HexagonTTIImpl> {
InstructionCost getAddressComputationCost(Type *Tp, ScalarEvolution *SE,
const SCEV *S) const;
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;
InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind) const;
- InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
+ InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
ArrayRef<int> Mask,
TTI::TargetCostKind CostKind, int Index,
- Type *SubTp, ArrayRef<const Value *> Args = {},
+ VectorType *SubTp,
+ ArrayRef<const Value *> Args = {},
const Instruction *CxtI = nullptr) const;
InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index ecd45a01ffbf2..b57efe8360d4e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -74,7 +74,7 @@ class NVPTXTTIImpl : public BasicTTIImplBase<NVPTXTTIImpl> {
// vectorizers but disables heuristics based on the number of registers.
// FIXME: Return a more reasonable number, while keeping an eye on
// LoopVectorizer's unrolling heuristics.
- unsigned getNumberOfRegisters(bool Vector) const { return 1; }
+ unsigned getNumberOfRegisters(unsigned ClassID) const { return 1; }
// Only <2 x half> should be vectorized, so always return 32 for the vector
// register size.
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 762e2d4d28d27..eb487bdaa88b9 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -599,10 +599,10 @@ InstructionCost PPCTTIImpl::getArithmeticInstrCost(
return Cost * CostFactor;
}
-InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
- ArrayRef<int> Mask,
+InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
+ VectorType *Tp, ArrayRef<int> Mask,
TTI::TargetCostKind CostKind,
- int Index, Type *SubTp,
+ int Index, VectorType *SubTp,
ArrayRef<const Value *> Args,
const Instruction *CxtI) const {
@@ -759,7 +759,7 @@ InstructionCost PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
}
InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
- MaybeAlign Alignment,
+ Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
@@ -803,13 +803,12 @@ InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
return 1;
// Use lfiwax/xxspltw
- Align AlignBytes = Alignment ? *Alignment : Align(1);
- if (Opcode == Instruction::Load && MemBits == 32 && AlignBytes < SrcBytes)
+ if (Opcode == Instruction::Load && MemBits == 32 && Alignment < SrcBytes)
return 2;
}
// Aligned loads and stores are easy.
- if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
+ if (!SrcBytes || Alignment >= SrcBytes)
return Cost;
// If we can use the permutation-based load sequence, then this is also
@@ -820,7 +819,7 @@ InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
// than using the permutation-based load sequence. On the P8, that's no
// longer true.
if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
- *Alignment >= LT.second.getScalarType().getStoreSize())
+ Alignment >= LT.second.getScalarType().getStoreSize())
return Cost + LT.first; // Add the cost of the permutations.
// For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
@@ -838,8 +837,7 @@ InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
// to be decomposed based on the alignment factor.
// Add the cost of each scalar load or store.
- assert(Alignment);
- Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
+ Cost += LT.first * ((SrcBytes / Alignment.value()) - 1);
// For a vector type, there is also scalarization overhead (only for
// stores, loads are expanded using the vector-load + permutation sequence,
@@ -874,8 +872,8 @@ InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost(
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VecTy);
// Firstly, the cost of load/store operation.
- InstructionCost Cost = getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment),
- AddressSpace, CostKind);
+ InstructionCost Cost =
+ getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
// PPC, for both Altivec/VSX, support cheap arbitrary permutations
// (at least in the sense that there need only be one non-loop-invariant
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
index 9e4c40bb39295..d5428c7e4e24c 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.h
@@ -107,10 +107,11 @@ class PPCTTIImpl : public BasicTTIImplBase<PPCTTIImpl> {
TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
ArrayRef<const Value *> Args = {},
const Instruction *CxtI = nullptr) const;
- InstructionCost getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
+ InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
ArrayRef<int> Mask,
TTI::TargetCostKind CostKind, int Index,
- Type *SubTp, ArrayRef<const Value *> Args = {},
+ VectorType *SubTp,
+ ArrayRef<const Value *> Args = {},
const Instruction *CxtI = nullptr) const;
InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
TTI::CastContextHint CCH,
@@ -130,7 +131,7 @@ class PPCTTIImpl : public BasicTTIImplBase<PPCTTIImpl> {
unsigned Index, Value *Op0,
Value *Op1) const;
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index a6a14837bf473..b7700a57fa10c 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1952,7 +1952,7 @@ RISCVTTIImpl::getStoreImmCost(Type *Ty, TTI::OperandValueInfo OpInfo,
}
InstructionCost RISCVTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
- MaybeAlign Alignment,
+ Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 21f00e80608b1..43a367b3d3e5a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -215,7 +215,7 @@ class RISCVTTIImpl : public BasicTTIImplBase<RISCVTTIImpl> {
TTI::TargetCostKind CostKind) const;
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpdInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
index bfc50a02558cf..53270ac096bb0 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
@@ -372,8 +372,8 @@ void SystemZTTIImpl::getUnrollingPreferences(
}
if (isa<StoreInst>(&I)) {
Type *MemAccessTy = I.getOperand(0)->getType();
- NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy,
- std::nullopt, 0, TTI::TCK_RecipThroughput);
+ NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, Align(),
+ 0, TTI::TCK_RecipThroughput);
}
}
@@ -1298,7 +1298,7 @@ static bool isBswapIntrinsicCall(const Value *V) {
}
InstructionCost SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
- MaybeAlign Alignment,
+ Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
index 99faab7194892..cb993b7b2df11 100644
--- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.h
@@ -129,7 +129,7 @@ class SystemZTTIImpl : public BasicTTIImplBase<SystemZTTIImpl> {
bool isFoldableLoad(const LoadInst *Ld,
const Instruction *&FoldedValue) const;
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;
diff --git a/llvm/lib/Target/VE/VETargetTransformInfo.h b/llvm/lib/Target/VE/VETargetTransformInfo.h
index 88b46ff128383..93092ae7221b9 100644
--- a/llvm/lib/Target/VE/VETargetTransformInfo.h
+++ b/llvm/lib/Target/VE/VETargetTransformInfo.h
@@ -133,18 +133,18 @@ class VETTIImpl : public BasicTTIImplBase<VETTIImpl> {
}
// Load & Store {
- bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment,
+ bool isLegalMaskedLoad(Type *DataType, Align Alignment,
unsigned /*AddressSpace*/) const {
return isVectorLaneType(*getLaneType(DataType));
}
- bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment,
+ bool isLegalMaskedStore(Type *DataType, Align Alignment,
unsigned /*AddressSpace*/) const {
return isVectorLaneType(*getLaneType(DataType));
}
- bool isLegalMaskedGather(Type *DataType, MaybeAlign Alignment) const {
+ bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
return isVectorLaneType(*getLaneType(DataType));
};
- bool isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) const {
+ bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
return isVectorLaneType(*getLaneType(DataType));
}
// } Load & Store
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
index 406daa8a6f9b8..49aa94956bda6 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
@@ -142,7 +142,7 @@ InstructionCost WebAssemblyTTIImpl::getCastInstrCost(
}
InstructionCost WebAssemblyTTIImpl::getMemoryOpCost(
- unsigned Opcode, Type *Ty, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Ty, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo,
const Instruction *I) const {
if (!ST->hasSIMD128() || !isa<FixedVectorType>(Ty)) {
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
index b8b6051ec5f28..cb9adf7d1d5e9 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
@@ -73,7 +73,7 @@ class WebAssemblyTTIImpl final : public BasicTTIImplBase<WebAssemblyTTIImpl> {
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) const;
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index 363019986d76c..2375a8f992aa5 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -5184,7 +5184,7 @@ X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
}
InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
- MaybeAlign Alignment,
+ Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
@@ -5293,8 +5293,7 @@ InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
// Can we use this vector size, as per the remaining element count?
// Iff the vector is naturally aligned, we can do a wide load regardless.
if (NumEltRemaining < CurrNumEltPerOp &&
- (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
- CurrOpSizeBytes != 1)
+ (!IsLoad || Alignment < CurrOpSizeBytes) && CurrOpSizeBytes != 1)
break; // Try smalled vector size.
// This isn't exactly right. We're using slow unaligned 32-byte accesses
@@ -5344,7 +5343,7 @@ InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
SubVecEltsLeft -= CurrNumEltPerOp;
NumEltRemaining -= CurrNumEltPerOp;
- Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
+ Alignment = commonAlignment(Alignment, CurrOpSizeBytes);
}
}
@@ -6183,8 +6182,7 @@ InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode,
const int GSOverhead = (Opcode == Instruction::Load) ? getGatherOverhead()
: getScatterOverhead();
return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
- MaybeAlign(Alignment), AddressSpace,
- CostKind);
+ Alignment, AddressSpace, CostKind);
}
/// Calculate the cost of Gather / Scatter operation
@@ -6596,8 +6594,8 @@ InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment,
AddressSpace, CostKind);
else
- MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment),
- AddressSpace, CostKind);
+ MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace,
+ CostKind);
unsigned VF = VecTy->getNumElements() / Factor;
MVT VT =
@@ -6780,8 +6778,8 @@ InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
// Get the cost of all the memory operations.
// FIXME: discount dead loads.
- InstructionCost MemOpCosts = getMemoryOpCost(
- Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
+ InstructionCost MemOpCosts =
+ getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
auto *VT = FixedVectorType::get(ScalarTy, VF);
EVT ETy = TLI->getValueType(DL, VT);
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h
index 1534301667ff1..ef7b7e90eeb5a 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.h
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h
@@ -178,7 +178,7 @@ class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
const APInt &DemandedDstElts,
TTI::TargetCostKind CostKind) const;
InstructionCost getMemoryOpCost(
- unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
+ unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;
More information about the llvm-commits
mailing list