[llvm] r357731 - [IR] Refactor attribute methods in Function class (NFC)
Evandro Menezes via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 4 15:40:07 PDT 2019
Author: evandro
Date: Thu Apr 4 15:40:06 2019
New Revision: 357731
URL: http://llvm.org/viewvc/llvm-project?rev=357731&view=rev
Log:
[IR] Refactor attribute methods in Function class (NFC)
Rename the functions that query the optimization kind attributes.
Differential revision: https://reviews.llvm.org/D60287
Modified:
llvm/trunk/include/llvm/CodeGen/TargetLowering.h
llvm/trunk/include/llvm/IR/Function.h
llvm/trunk/lib/Analysis/GlobalsModRef.cpp
llvm/trunk/lib/Analysis/InlineCost.cpp
llvm/trunk/lib/Analysis/LoopPass.cpp
llvm/trunk/lib/Analysis/RegionPass.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp
llvm/trunk/lib/CodeGen/BranchFolding.cpp
llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp
llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp
llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp
llvm/trunk/lib/CodeGen/GlobalMerge.cpp
llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
llvm/trunk/lib/CodeGen/MachineCombiner.cpp
llvm/trunk/lib/CodeGen/MachineFunction.cpp
llvm/trunk/lib/CodeGen/SafeStack.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/trunk/lib/CodeGen/TailDuplicator.cpp
llvm/trunk/lib/IR/Pass.cpp
llvm/trunk/lib/Target/AArch64/AArch64CompressJumpTables.cpp
llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp
llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td
llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp
llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp
llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp
llvm/trunk/lib/Target/ARM/ARMSubtarget.h
llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp
llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp
llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.h
llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp
llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp
llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp
llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp
llvm/trunk/lib/Target/X86/X86FrameLowering.cpp
llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.h
llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
llvm/trunk/lib/Target/X86/X86InstrInfo.td
llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp
llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp
llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp
llvm/trunk/lib/Transforms/IPO/FunctionAttrs.cpp
llvm/trunk/lib/Transforms/IPO/HotColdSplitting.cpp
llvm/trunk/lib/Transforms/IPO/InferFunctionAttrs.cpp
llvm/trunk/lib/Transforms/IPO/Inliner.cpp
llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
llvm/trunk/lib/Transforms/Scalar/ConstantHoisting.cpp
llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
llvm/trunk/lib/Transforms/Scalar/LoopLoadElimination.cpp
llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp
llvm/trunk/lib/Transforms/Scalar/LoopUnswitch.cpp
llvm/trunk/lib/Transforms/Scalar/WarnMissedTransforms.cpp
llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp
llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
Modified: llvm/trunk/include/llvm/CodeGen/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetLowering.h?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h Thu Apr 4 15:40:06 2019
@@ -953,7 +953,7 @@ public:
/// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
uint64_t Range) const {
- const bool OptForSize = SI->getParent()->getParent()->optForSize();
+ const bool OptForSize = SI->getParent()->getParent()->hasOptSize();
const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
const unsigned MaxJumpTableSize =
OptForSize ? UINT_MAX : getMaximumJumpTableSize();
Modified: llvm/trunk/include/llvm/IR/Function.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/Function.h?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/Function.h (original)
+++ llvm/trunk/include/llvm/IR/Function.h Thu Apr 4 15:40:06 2019
@@ -591,14 +591,14 @@ public:
}
/// Do not optimize this function (-O0).
- bool optForNone() const { return hasFnAttribute(Attribute::OptimizeNone); }
+ bool hasOptNone() const { return hasFnAttribute(Attribute::OptimizeNone); }
/// Optimize this function for minimum size (-Oz).
- bool optForMinSize() const { return hasFnAttribute(Attribute::MinSize); }
+ bool hasMinSize() const { return hasFnAttribute(Attribute::MinSize); }
/// Optimize this function for size (-Os) or minimum size (-Oz).
- bool optForSize() const {
- return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize();
+ bool hasOptSize() const {
+ return hasFnAttribute(Attribute::OptimizeForSize) || hasMinSize();
}
/// copyAttributesFrom - copy all additional attributes (those not needed to
Modified: llvm/trunk/lib/Analysis/GlobalsModRef.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/GlobalsModRef.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/GlobalsModRef.cpp (original)
+++ llvm/trunk/lib/Analysis/GlobalsModRef.cpp Thu Apr 4 15:40:06 2019
@@ -513,7 +513,7 @@ void GlobalsAAResult::AnalyzeCallGraph(C
break;
}
- if (F->isDeclaration() || F->optForNone()) {
+ if (F->isDeclaration() || F->hasOptNone()) {
// Try to get mod/ref behaviour from function attributes.
if (F->doesNotAccessMemory()) {
// Can't do better than that!
@@ -566,7 +566,7 @@ void GlobalsAAResult::AnalyzeCallGraph(C
// Don't prove any properties based on the implementation of an optnone
// function. Function attributes were already used as a best approximation
// above.
- if (Node->getFunction()->optForNone())
+ if (Node->getFunction()->hasOptNone())
continue;
for (Instruction &I : instructions(Node->getFunction())) {
Modified: llvm/trunk/lib/Analysis/InlineCost.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/InlineCost.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/InlineCost.cpp (original)
+++ llvm/trunk/lib/Analysis/InlineCost.cpp Thu Apr 4 15:40:06 2019
@@ -897,7 +897,7 @@ void CallAnalyzer::updateThreshold(CallS
// Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
// and reduce the threshold if the caller has the necessary attribute.
- if (Caller->optForMinSize()) {
+ if (Caller->hasMinSize()) {
Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
// For minsize, we want to disable the single BB bonus and the vector
// bonuses, but not the last-call-to-static bonus. Inlining the last call to
@@ -905,12 +905,12 @@ void CallAnalyzer::updateThreshold(CallS
// call/return instructions.
SingleBBBonusPercent = 0;
VectorBonusPercent = 0;
- } else if (Caller->optForSize())
+ } else if (Caller->hasOptSize())
Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
// Adjust the threshold based on inlinehint attribute and profile based
// hotness information if the caller does not have MinSize attribute.
- if (!Caller->optForMinSize()) {
+ if (!Caller->hasMinSize()) {
if (Callee.hasFnAttribute(Attribute::InlineHint))
Threshold = MaxIfValid(Threshold, Params.HintThreshold);
@@ -923,7 +923,7 @@ void CallAnalyzer::updateThreshold(CallS
// BlockFrequencyInfo is available.
BlockFrequencyInfo *CallerBFI = GetBFI ? &((*GetBFI)(*Caller)) : nullptr;
auto HotCallSiteThreshold = getHotCallSiteThreshold(CS, CallerBFI);
- if (!Caller->optForSize() && HotCallSiteThreshold) {
+ if (!Caller->hasOptSize() && HotCallSiteThreshold) {
LLVM_DEBUG(dbgs() << "Hot callsite.\n");
// FIXME: This should update the threshold only if it exceeds the
// current threshold, but AutoFDO + ThinLTO currently relies on this
@@ -1899,7 +1899,7 @@ InlineResult CallAnalyzer::analyzeCall(C
// size, we penalise any call sites that perform loops. We do this after all
// other costs here, so will likely only be dealing with relatively small
// functions (and hence DT and LI will hopefully be cheap).
- if (Caller->optForMinSize()) {
+ if (Caller->hasMinSize()) {
DominatorTree DT(F);
LoopInfo LI(DT);
int NumLoops = 0;
@@ -2036,7 +2036,7 @@ InlineCost llvm::getInlineCost(
return llvm::InlineCost::getNever("conflicting attributes");
// Don't inline this call if the caller has the optnone attribute.
- if (Caller->optForNone())
+ if (Caller->hasOptNone())
return llvm::InlineCost::getNever("optnone attribute");
// Don't inline a function that treats null pointer as valid into a caller
Modified: llvm/trunk/lib/Analysis/LoopPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/LoopPass.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/LoopPass.cpp (original)
+++ llvm/trunk/lib/Analysis/LoopPass.cpp Thu Apr 4 15:40:06 2019
@@ -396,7 +396,7 @@ bool LoopPass::skipLoop(const Loop *L) c
if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(*L)))
return true;
// Check for the OptimizeNone attribute.
- if (F->optForNone()) {
+ if (F->hasOptNone()) {
// FIXME: Report this to dbgs() only once per function.
LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' in function "
<< F->getName() << "\n");
Modified: llvm/trunk/lib/Analysis/RegionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/RegionPass.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/RegionPass.cpp (original)
+++ llvm/trunk/lib/Analysis/RegionPass.cpp Thu Apr 4 15:40:06 2019
@@ -288,7 +288,7 @@ bool RegionPass::skipRegion(Region &R) c
if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(R)))
return true;
- if (F.optForNone()) {
+ if (F.hasOptNone()) {
// Report this only once per function.
if (R.getEntry() == &F.getEntryBlock())
LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName()
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp Thu Apr 4 15:40:06 2019
@@ -2865,7 +2865,7 @@ void AsmPrinter::setupCodePaddingContext
MCCodePaddingContext &Context) const {
assert(MF != nullptr && "Machine function must be valid");
Context.IsPaddingActive = !MF->hasInlineAsm() &&
- !MF->getFunction().optForSize() &&
+ !MF->getFunction().hasOptSize() &&
TM.getOptLevel() != CodeGenOpt::None;
Context.IsBasicBlockReachableViaFallthrough =
std::find(MBB.pred_begin(), MBB.pred_end(), MBB.getPrevNode()) !=
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp Thu Apr 4 15:40:06 2019
@@ -1342,7 +1342,7 @@ void CodeViewDebug::beginFunctionImpl(co
FPO |= FrameProcedureOptions(uint32_t(CurFn->EncodedLocalFramePtrReg) << 14U);
FPO |= FrameProcedureOptions(uint32_t(CurFn->EncodedParamFramePtrReg) << 16U);
if (Asm->TM.getOptLevel() != CodeGenOpt::None &&
- !GV.optForSize() && !GV.optForNone())
+ !GV.hasOptSize() && !GV.hasOptNone())
FPO |= FrameProcedureOptions::OptimizedForSpeed;
// FIXME: Set GuardCfg when it is implemented.
CurFn->FrameProcOpts = FPO;
Modified: llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp (original)
+++ llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp Thu Apr 4 15:40:06 2019
@@ -1111,11 +1111,11 @@ bool AtomicExpand::expandAtomicCmpXchg(A
bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
SuccessOrder != AtomicOrdering::Monotonic &&
SuccessOrder != AtomicOrdering::Acquire &&
- !F->optForMinSize();
+ !F->hasMinSize();
// There's no overhead for sinking the release barrier in a weak cmpxchg, so
// do it even on minsize.
- bool UseUnconditionalReleaseBarrier = F->optForMinSize() && !CI->isWeak();
+ bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak();
// Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
//
Modified: llvm/trunk/lib/CodeGen/BranchFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/BranchFolding.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/BranchFolding.cpp (original)
+++ llvm/trunk/lib/CodeGen/BranchFolding.cpp Thu Apr 4 15:40:06 2019
@@ -721,7 +721,7 @@ ProfitableToMerge(MachineBasicBlock *MBB
// branch instruction, which is likely to be smaller than the 2
// instructions that would be deleted in the merge.
MachineFunction *MF = MBB1->getParent();
- return EffectiveTailLen >= 2 && MF->getFunction().optForSize() &&
+ return EffectiveTailLen >= 2 && MF->getFunction().hasOptSize() &&
(I1 == MBB1->begin() || I2 == MBB2->begin());
}
@@ -1574,7 +1574,7 @@ ReoptimizeBlock:
}
if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 &&
- MF.getFunction().optForSize()) {
+ MF.getFunction().hasOptSize()) {
// Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch
// direction, thereby defeating careful block placement and regressing
// performance. Therefore, only consider this for optsize functions.
Modified: llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp (original)
+++ llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp Thu Apr 4 15:40:06 2019
@@ -426,7 +426,7 @@ bool CodeGenPrepare::runOnFunction(Funct
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
BPI.reset(new BranchProbabilityInfo(F, *LI));
BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
- OptSize = F.optForSize();
+ OptSize = F.hasOptSize();
ProfileSummaryInfo *PSI =
&getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
@@ -4454,7 +4454,7 @@ static bool FindAllMemoryUses(
if (!MightBeFoldableInst(I))
return true;
- const bool OptSize = I->getFunction()->optForSize();
+ const bool OptSize = I->getFunction()->hasOptSize();
// Loop over all the uses, recursively processing them.
for (Use &U : I->uses()) {
Modified: llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp (original)
+++ llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp Thu Apr 4 15:40:06 2019
@@ -721,7 +721,7 @@ static bool expandMemCmp(CallInst *CI, c
NumMemCmpCalls++;
// Early exit from expansion if -Oz.
- if (CI->getFunction()->optForMinSize())
+ if (CI->getFunction()->hasMinSize())
return false;
// Early exit from expansion if size is not a constant.
@@ -742,7 +742,7 @@ static bool expandMemCmp(CallInst *CI, c
if (!Options) return false;
const unsigned MaxNumLoads =
- TLI->getMaxExpandSizeMemcmp(CI->getFunction()->optForSize());
+ TLI->getMaxExpandSizeMemcmp(CI->getFunction()->hasOptSize());
unsigned NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences()
? MemCmpEqZeroNumLoadsPerBlock
Modified: llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp Thu Apr 4 15:40:06 2019
@@ -657,7 +657,7 @@ bool RegBankSelect::runOnMachineFunction
LLVM_DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n');
const Function &F = MF.getFunction();
Mode SaveOptMode = OptMode;
- if (F.optForNone())
+ if (F.hasOptNone())
OptMode = Mode::Fast;
init(MF);
Modified: llvm/trunk/lib/CodeGen/GlobalMerge.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalMerge.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalMerge.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalMerge.cpp Thu Apr 4 15:40:06 2019
@@ -330,7 +330,7 @@ bool GlobalMerge::doMerge(SmallVectorImp
Function *ParentFn = I->getParent()->getParent();
// If we're only optimizing for size, ignore non-minsize functions.
- if (OnlyOptimizeForSize && !ParentFn->optForMinSize())
+ if (OnlyOptimizeForSize && !ParentFn->hasMinSize())
continue;
size_t UGSIdx = GlobalUsesByFunction[ParentFn];
Modified: llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp Thu Apr 4 15:40:06 2019
@@ -1813,7 +1813,7 @@ MachineBlockPlacement::findBestLoopTop(c
// i.e. when the layout predecessor does not fallthrough to the loop header.
// In practice this never happens though: there always seems to be a preheader
// that can fallthrough and that is also placed before the header.
- if (F->getFunction().optForSize())
+ if (F->getFunction().hasOptSize())
return L.getHeader();
// Check that the header hasn't been fused with a preheader block due to
@@ -2561,8 +2561,8 @@ void MachineBlockPlacement::alignBlocks(
// exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass.
- if (F->getFunction().optForMinSize() ||
- (F->getFunction().optForSize() && !TLI->alignLoopsWithOptSize()))
+ if (F->getFunction().hasMinSize() ||
+ (F->getFunction().hasOptSize() && !TLI->alignLoopsWithOptSize()))
return;
BlockChain &FunctionChain = *BlockToChain[&F->front()];
if (FunctionChain.begin() == FunctionChain.end())
@@ -2837,7 +2837,7 @@ bool MachineBlockPlacement::runOnMachine
if (allowTailDupPlacement()) {
MPDT = &getAnalysis<MachinePostDominatorTree>();
- if (MF.getFunction().optForSize())
+ if (MF.getFunction().hasOptSize())
TailDupSize = 1;
bool PreRegAlloc = false;
TailDup.initMF(MF, PreRegAlloc, MBPI, /* LayoutMode */ true, TailDupSize);
Modified: llvm/trunk/lib/CodeGen/MachineCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineCombiner.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineCombiner.cpp Thu Apr 4 15:40:06 2019
@@ -637,7 +637,7 @@ bool MachineCombiner::runOnMachineFuncti
MLI = &getAnalysis<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- OptSize = MF.getFunction().optForSize();
+ OptSize = MF.getFunction().hasOptSize();
LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
if (!TII->useMachineCombiner()) {
Modified: llvm/trunk/lib/CodeGen/MachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineFunction.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineFunction.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineFunction.cpp Thu Apr 4 15:40:06 2019
@@ -174,7 +174,7 @@ void MachineFunction::init() {
Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
- // FIXME: Use Function::optForSize().
+ // FIXME: Use Function::hasOptSize().
if (!F.hasFnAttribute(Attribute::OptimizeForSize))
Alignment = std::max(Alignment,
STI->getTargetLowering()->getPrefFunctionAlignment());
Modified: llvm/trunk/lib/CodeGen/SafeStack.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SafeStack.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SafeStack.cpp (original)
+++ llvm/trunk/lib/CodeGen/SafeStack.cpp Thu Apr 4 15:40:06 2019
@@ -728,7 +728,7 @@ void SafeStack::TryInlinePointerAddress(
if (!isa<CallInst>(UnsafeStackPtr))
return;
- if(F.optForNone())
+ if(F.hasOptNone())
return;
CallSite CS(UnsafeStackPtr);
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Thu Apr 4 15:40:06 2019
@@ -196,7 +196,7 @@ namespace {
DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), AA(AA) {
- ForCodeSize = DAG.getMachineFunction().getFunction().optForSize();
+ ForCodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
MaximumLegalStoreInBits = 0;
for (MVT VT : MVT::all_valuetypes())
@@ -12188,7 +12188,7 @@ SDValue DAGCombiner::visitFPOW(SDNode *N
// Assume that libcalls are the smallest code.
// TODO: This restriction should probably be lifted for vectors.
- if (DAG.getMachineFunction().getFunction().optForSize())
+ if (DAG.getMachineFunction().getFunction().hasOptSize())
return SDValue();
// pow(X, 0.25) --> sqrt(sqrt(X))
@@ -19213,7 +19213,7 @@ SDValue DAGCombiner::SimplifySetCC(EVT V
SDValue DAGCombiner::BuildSDIV(SDNode *N) {
// when optimising for minimum size, we don't want to expand a div to a mul
// and a shift.
- if (DAG.getMachineFunction().getFunction().optForMinSize())
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
return SDValue();
SmallVector<SDNode *, 8> Built;
@@ -19254,7 +19254,7 @@ SDValue DAGCombiner::BuildSDIVPow2(SDNod
SDValue DAGCombiner::BuildUDIV(SDNode *N) {
// when optimising for minimum size, we don't want to expand a div to a mul
// and a shift.
- if (DAG.getMachineFunction().getFunction().optForMinSize())
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
return SDValue();
SmallVector<SDNode *, 8> Built;
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Thu Apr 4 15:40:06 2019
@@ -3092,7 +3092,7 @@ bool SelectionDAGLegalize::ExpandNode(SD
// Check to see if this FP immediate is already legal.
// If this is a legal constant, turn it into a TargetConstantFP node.
if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0),
- DAG.getMachineFunction().getFunction().optForSize()))
+ DAG.getMachineFunction().getFunction().hasOptSize()))
Results.push_back(ExpandConstantFP(CFP, true));
break;
}
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Thu Apr 4 15:40:06 2019
@@ -1418,7 +1418,7 @@ SDValue SelectionDAG::getConstantPool(co
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment = MF->getFunction().optForSize()
+ Alignment = MF->getFunction().hasOptSize()
? getDataLayout().getABITypeAlignment(C->getType())
: getDataLayout().getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
@@ -5657,8 +5657,8 @@ static bool shouldLowerMemFuncForSize(co
// On Darwin, -Os means optimize for size without hurting performance, so
// only really optimize for size when -Oz (MinSize) is used.
if (MF.getTarget().getTargetTriple().isOSDarwin())
- return MF.getFunction().optForMinSize();
- return MF.getFunction().optForSize();
+ return MF.getFunction().hasMinSize();
+ return MF.getFunction().hasOptSize();
}
static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Thu Apr 4 15:40:06 2019
@@ -5220,7 +5220,7 @@ static SDValue ExpandPowI(const SDLoc &D
return DAG.getConstantFP(1.0, DL, LHS.getValueType());
const Function &F = DAG.getMachineFunction().getFunction();
- if (!F.optForSize() ||
+ if (!F.hasOptSize() ||
// If optimizing for size, don't insert too many multiplies.
// This inserts up to 5 multiplies.
countPopulation(Val) + Log2_32(Val) < 7) {
@@ -10617,7 +10617,7 @@ MachineBasicBlock *SelectionDAGBuilder::
// Don't perform if there is only one cluster or optimizing for size.
if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
TM.getOptLevel() == CodeGenOpt::None ||
- SwitchMBB->getParent()->getFunction().optForMinSize())
+ SwitchMBB->getParent()->getFunction().hasMinSize())
return SwitchMBB;
BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
@@ -10740,7 +10740,7 @@ void SelectionDAGBuilder::visitSwitch(co
unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
- !DefaultMBB->getParent()->getFunction().optForMinSize()) {
+ !DefaultMBB->getParent()->getFunction().hasMinSize()) {
// For optimized builds, lower large range as a balanced binary tree.
splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
continue;
Modified: llvm/trunk/lib/CodeGen/TailDuplicator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TailDuplicator.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TailDuplicator.cpp (original)
+++ llvm/trunk/lib/CodeGen/TailDuplicator.cpp Thu Apr 4 15:40:06 2019
@@ -557,7 +557,7 @@ bool TailDuplicator::shouldTailDuplicate
unsigned MaxDuplicateCount;
if (TailDupSize == 0 &&
TailDuplicateSize.getNumOccurrences() == 0 &&
- MF->getFunction().optForSize())
+ MF->getFunction().hasOptSize())
MaxDuplicateCount = 1;
else if (TailDupSize == 0)
MaxDuplicateCount = TailDuplicateSize;
Modified: llvm/trunk/lib/IR/Pass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/Pass.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/IR/Pass.cpp (original)
+++ llvm/trunk/lib/IR/Pass.cpp Thu Apr 4 15:40:06 2019
@@ -168,7 +168,7 @@ bool FunctionPass::skipFunction(const Fu
if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(F)))
return true;
- if (F.optForNone()) {
+ if (F.hasOptNone()) {
LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' on function "
<< F.getName() << "\n");
return true;
@@ -207,7 +207,7 @@ bool BasicBlockPass::skipBasicBlock(cons
OptPassGate &Gate = F->getContext().getOptPassGate();
if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(BB)))
return true;
- if (F->optForNone()) {
+ if (F->hasOptNone()) {
// Report this only once per function.
if (&BB == &F->getEntryBlock())
LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName()
Modified: llvm/trunk/lib/Target/AArch64/AArch64CompressJumpTables.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CompressJumpTables.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CompressJumpTables.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CompressJumpTables.cpp Thu Apr 4 15:40:06 2019
@@ -140,7 +140,7 @@ bool AArch64CompressJumpTables::runOnMac
const auto &ST = MF->getSubtarget<AArch64Subtarget>();
TII = ST.getInstrInfo();
- if (ST.force32BitJumpTables() && !MF->getFunction().optForMinSize())
+ if (ST.force32BitJumpTables() && !MF->getFunction().hasMinSize())
return false;
scanFunction();
Modified: llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp Thu Apr 4 15:40:06 2019
@@ -940,7 +940,7 @@ bool AArch64ConditionalCompares::runOnMa
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- MinSize = MF.getFunction().optForMinSize();
+ MinSize = MF.getFunction().hasMinSize();
bool Changed = false;
CmpConv.runOnMachineFunction(MF, MBPI);
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp Thu Apr 4 15:40:06 2019
@@ -52,7 +52,7 @@ public:
}
bool runOnMachineFunction(MachineFunction &MF) override {
- ForCodeSize = MF.getFunction().optForSize();
+ ForCodeSize = MF.getFunction().hasOptSize();
Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Thu Apr 4 15:40:06 2019
@@ -10382,7 +10382,7 @@ static SDValue splitStores(SDNode *N, Ta
return SDValue();
// Don't split at -Oz.
- if (DAG.getMachineFunction().getFunction().optForMinSize())
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
return SDValue();
// Don't split v2i64 vectors. Memcpy lowering produces those and splitting
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h Thu Apr 4 15:40:06 2019
@@ -474,7 +474,7 @@ public:
}
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
- if (DAG.getMachineFunction().getFunction().optForMinSize())
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
return false;
return true;
}
Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp Thu Apr 4 15:40:06 2019
@@ -5486,7 +5486,7 @@ MachineBasicBlock::iterator AArch64Instr
bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
MachineFunction &MF) const {
- return MF.getFunction().optForMinSize();
+ return MF.getFunction().hasMinSize();
}
#define GET_INSTRINFO_HELPERS
Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td Thu Apr 4 15:40:06 2019
@@ -407,10 +407,10 @@ def AArch64umaxv : SDNode<"AArch64ISD
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
- def ForCodeSize : Predicate<"MF->getFunction().optForSize()">;
- def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">;
+ def ForCodeSize : Predicate<"MF->getFunction().hasOptSize()">;
+ def NotForCodeSize : Predicate<"!MF->getFunction().hasOptSize()">;
// Avoid generating STRQro if it is slow, unless we're optimizing for code size.
- def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">;
+ def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().hasOptSize()">;
def UseBTI : Predicate<[{ MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
def NotUseBTI : Predicate<[{ !MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
Modified: llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp Thu Apr 4 15:40:06 2019
@@ -119,13 +119,13 @@ bool ARMAsmPrinter::runOnMachineFunction
// Calculate this function's optimization goal.
unsigned OptimizationGoal;
- if (F.optForNone())
+ if (F.hasOptNone())
// For best debugging illusion, speed and small size sacrificed
OptimizationGoal = 6;
- else if (F.optForMinSize())
+ else if (F.hasMinSize())
// Aggressively for small size, speed and debug illusion sacrificed
OptimizationGoal = 4;
- else if (F.optForSize())
+ else if (F.hasOptSize())
// For small size, but speed and debugging illusion preserved
OptimizationGoal = 3;
else if (TM.getOptLevel() == CodeGenOpt::Aggressive)
Modified: llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp Thu Apr 4 15:40:06 2019
@@ -1899,7 +1899,7 @@ isProfitableToIfCvt(MachineBasicBlock &M
// If we are optimizing for size, see if the branch in the predecessor can be
// lowered to cbn?z by the constant island lowering pass, and return false if
// so. This results in a shorter instruction sequence.
- if (MBB.getParent()->getFunction().optForSize()) {
+ if (MBB.getParent()->getFunction().hasOptSize()) {
MachineBasicBlock *Pred = *MBB.pred_begin();
if (!Pred->empty()) {
MachineInstr *LastMI = &*Pred->rbegin();
@@ -2267,7 +2267,7 @@ bool llvm::tryFoldSPUpdateIntoPushPop(co
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
- if (!Subtarget.optForMinSize())
+ if (!Subtarget.hasMinSize())
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR
@@ -4163,7 +4163,7 @@ int ARMBaseInstrInfo::getOperandLatencyI
// instructions).
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI.getParent()->getParent();
- // FIXME: Use Function::optForSize().
+ // FIXME: Use Function::hasOptSize().
if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize))
--Latency;
}
Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Thu Apr 4 15:40:06 2019
@@ -2074,7 +2074,7 @@ ARMTargetLowering::LowerCall(TargetLower
auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
auto *BB = CLI.CS.getParent();
bool PreferIndirect =
- Subtarget->isThumb() && Subtarget->optForMinSize() &&
+ Subtarget->isThumb() && Subtarget->hasMinSize() &&
count_if(GV->users(), [&BB](const User *U) {
return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
}) > 2;
@@ -2146,7 +2146,7 @@ ARMTargetLowering::LowerCall(TargetLower
CallOpc = ARMISD::CALL_NOLINK;
else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
// Emit regular call when code size is the priority
- !Subtarget->optForMinSize())
+ !Subtarget->hasMinSize())
// "mov lr, pc; b _foo" to avoid confusing the RSP
CallOpc = ARMISD::CALL_NOLINK;
else
@@ -7818,7 +7818,7 @@ ARMTargetLowering::BuildSDIVPow2(SDNode
return SDValue();
const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget());
- const bool MinSize = ST.optForMinSize();
+ const bool MinSize = ST.hasMinSize();
const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode()
: ST.hasDivideInARMMode();
@@ -14826,7 +14826,7 @@ bool ARMTargetLowering::isCheapToSpecula
}
bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
- return !Subtarget->optForMinSize();
+ return !Subtarget->hasMinSize();
}
Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
Modified: llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrInfo.td?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrInfo.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.td Thu Apr 4 15:40:06 2019
@@ -361,7 +361,7 @@ let RecomputePerFunction = 1 in {
def UseFPVMLx: Predicate<"((Subtarget->useFPVMLx() &&"
" TM.Options.AllowFPOpFusion != FPOpFusion::Fast) ||"
- "Subtarget->optForMinSize())">;
+ "Subtarget->hasMinSize())">;
}
def UseMulOps : Predicate<"Subtarget->useMulOps()">;
Modified: llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Thu Apr 4 15:40:06 2019
@@ -1294,7 +1294,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSM
// can still change to a writeback form as that will save us 2 bytes
// of code size. It can create WAW hazards though, so only do it if
// we're minimizing code size.
- if (!STI->optForMinSize() || !BaseKill)
+ if (!STI->hasMinSize() || !BaseKill)
return false;
bool HighRegsUsed = false;
Modified: llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp Thu Apr 4 15:40:06 2019
@@ -170,7 +170,7 @@ SDValue ARMSelectionDAGInfo::EmitTargetC
// Code size optimisation: do not inline memcpy if expansion results in
// more instructions than the libary call.
- if (NumMEMCPYs > 1 && Subtarget.optForMinSize()) {
+ if (NumMEMCPYs > 1 && Subtarget.hasMinSize()) {
return SDValue();
}
Modified: llvm/trunk/lib/Target/ARM/ARMSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMSubtarget.h?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMSubtarget.h (original)
+++ llvm/trunk/lib/Target/ARM/ARMSubtarget.h Thu Apr 4 15:40:06 2019
@@ -715,7 +715,7 @@ public:
bool disablePostRAScheduler() const { return DisablePostRAScheduler; }
bool useSoftFloat() const { return UseSoftFloat; }
bool isThumb() const { return InThumbMode; }
- bool optForMinSize() const { return OptMinSize; }
+ bool hasMinSize() const { return OptMinSize; }
bool isThumb1Only() const { return InThumbMode && !HasThumb2; }
bool isThumb2() const { return InThumbMode && HasThumb2; }
bool hasThumb2() const { return HasThumb2; }
Modified: llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMTargetMachine.cpp Thu Apr 4 15:40:06 2019
@@ -270,7 +270,7 @@ ARMBaseTargetMachine::getSubtargetImpl(c
// Use the optminsize to identify the subtarget, but don't use it in the
// feature string.
std::string Key = CPU + FS;
- if (F.optForMinSize())
+ if (F.hasMinSize())
Key += "+minsize";
auto &I = SubtargetMap[Key];
@@ -280,7 +280,7 @@ ARMBaseTargetMachine::getSubtargetImpl(c
// function that reside in TargetOptions.
resetTargetOptions(F);
I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle,
- F.optForMinSize());
+ F.hasMinSize());
if (!I->isThumb() && !I->hasARMOps())
F.getContext().emitError("Function '" + F.getName() + "' uses ARM "
Modified: llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.cpp Thu Apr 4 15:40:06 2019
@@ -602,7 +602,7 @@ void ARMTTIImpl::getUnrollingPreferences
// Disable loop unrolling for Oz and Os.
UP.OptSizeThreshold = 0;
UP.PartialOptSizeThreshold = 0;
- if (L->getHeader()->getParent()->optForSize())
+ if (L->getHeader()->getParent()->hasOptSize())
return;
// Only enable on Thumb-2 targets.
Modified: llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.h?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.h (original)
+++ llvm/trunk/lib/Target/ARM/ARMTargetTransformInfo.h Thu Apr 4 15:40:06 2019
@@ -94,7 +94,7 @@ public:
bool enableInterleavedAccessVectorization() { return true; }
bool shouldFavorBackedgeIndex(const Loop *L) const {
- if (L->getHeader()->getParent()->optForSize())
+ if (L->getHeader()->getParent()->hasOptSize())
return false;
return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
}
Modified: llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp (original)
+++ llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp Thu Apr 4 15:40:06 2019
@@ -1127,8 +1127,8 @@ bool Thumb2SizeReduce::runOnMachineFunct
TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
// Optimizing / minimizing size? Minimizing size implies optimizing for size.
- OptimizeSize = MF.getFunction().optForSize();
- MinimizeSize = STI->optForMinSize();
+ OptimizeSize = MF.getFunction().hasOptSize();
+ MinimizeSize = STI->hasMinSize();
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());
Modified: llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp Thu Apr 4 15:40:06 2019
@@ -374,17 +374,17 @@ static bool isRestoreCall(unsigned Opc)
}
static inline bool isOptNone(const MachineFunction &MF) {
- return MF.getFunction().optForNone() ||
+ return MF.getFunction().hasOptNone() ||
MF.getTarget().getOptLevel() == CodeGenOpt::None;
}
static inline bool isOptSize(const MachineFunction &MF) {
const Function &F = MF.getFunction();
- return F.optForSize() && !F.optForMinSize();
+ return F.hasOptSize() && !F.hasMinSize();
}
static inline bool isMinSize(const MachineFunction &MF) {
- return MF.getFunction().optForMinSize();
+ return MF.getFunction().hasMinSize();
}
/// Implements shrink-wrapping of the stack frame. By default, stack frame
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp Thu Apr 4 15:40:06 2019
@@ -14707,7 +14707,7 @@ SDValue PPCTargetLowering::combineMUL(SD
return SDValue();
// An imul is usually smaller than the alternative sequence for legal type.
- if (DAG.getMachineFunction().getFunction().optForMinSize() &&
+ if (DAG.getMachineFunction().getFunction().hasMinSize() &&
isOperationLegal(ISD::MUL, N->getValueType(0)))
return SDValue();
Modified: llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp Thu Apr 4 15:40:06 2019
@@ -150,7 +150,7 @@ bool FixupBWInstPass::runOnMachineFuncti
this->MF = &MF;
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
- OptForSize = MF.getFunction().optForSize();
+ OptForSize = MF.getFunction().hasOptSize();
MLI = &getAnalysis<MachineLoopInfo>();
LiveRegs.init(TII->getRegisterInfo());
Modified: llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp Thu Apr 4 15:40:06 2019
@@ -200,7 +200,7 @@ bool FixupLEAPass::runOnMachineFunction(
bool IsSlowLEA = ST.slowLEA();
bool IsSlow3OpsLEA = ST.slow3OpsLEA();
- OptIncDec = !ST.slowIncDec() || Func.getFunction().optForSize();
+ OptIncDec = !ST.slowIncDec() || Func.getFunction().hasOptSize();
OptLEA = ST.LEAusesAG() || IsSlowLEA || IsSlow3OpsLEA;
if (!OptLEA && !OptIncDec)
Modified: llvm/trunk/lib/Target/X86/X86FrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FrameLowering.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FrameLowering.cpp Thu Apr 4 15:40:06 2019
@@ -2810,7 +2810,7 @@ eliminateCallFramePseudoInstr(MachineFun
StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
if (StackAdjustment) {
- if (!(F.optForMinSize() &&
+ if (!(F.hasMinSize() &&
adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
/*InEpilogue=*/false);
Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Thu Apr 4 15:40:06 2019
@@ -183,8 +183,8 @@ namespace {
"indirect-tls-seg-refs");
// OptFor[Min]Size are used in pattern predicates that isel is matching.
- OptForSize = MF.getFunction().optForSize();
- OptForMinSize = MF.getFunction().optForMinSize();
+ OptForSize = MF.getFunction().hasOptSize();
+ OptForMinSize = MF.getFunction().hasMinSize();
assert((!OptForMinSize || OptForSize) &&
"OptForMinSize implies OptForSize");
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Apr 4 15:40:06 2019
@@ -7759,7 +7759,7 @@ static SDValue lowerBuildVectorAsBroadca
// TODO: If multiple splats are generated to load the same constant,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
- bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
// Handle broadcasting a single constant scalar from the constant pool
// into a vector.
@@ -10666,7 +10666,7 @@ static SDValue lowerShuffleAsBlend(const
case MVT::v32i16:
case MVT::v64i8: {
// Attempt to lower to a bitmask if we can. Only if not optimizing for size.
- bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
if (!OptForSize) {
if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
Subtarget, DAG))
@@ -16982,7 +16982,7 @@ SDValue X86TargetLowering::LowerINSERT_V
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
- bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize();
+ bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
@@ -17636,7 +17636,7 @@ static SDValue LowerFunnelShift(SDValue
"Unexpected funnel shift type!");
// Expand slow SHLD/SHRD cases if we are not optimizing for size.
- bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
if (!OptForSize && Subtarget.isSHLDSlow())
return SDValue();
@@ -18895,7 +18895,7 @@ static SDValue LowerFP_EXTEND(SDValue Op
/// implementation, and likely shuffle complexity of the alternate sequence.
static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
- bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize();
+ bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize();
bool HasFastHOps = Subtarget.hasFastHorizontalOps();
return !IsSingleSource || IsOptimizingSize || HasFastHOps;
}
@@ -19376,7 +19376,7 @@ SDValue X86TargetLowering::EmitCmp(SDVal
!cast<ConstantSDNode>(Op0)->getAPIntValue().isSignedIntN(8)) ||
(isa<ConstantSDNode>(Op1) &&
!cast<ConstantSDNode>(Op1)->getAPIntValue().isSignedIntN(8))) &&
- !DAG.getMachineFunction().getFunction().optForMinSize() &&
+ !DAG.getMachineFunction().getFunction().hasMinSize() &&
!Subtarget.isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@@ -19550,7 +19550,7 @@ static SDValue LowerAndToBT(SDValue And,
} else {
// Use BT if the immediate can't be encoded in a TEST instruction or we
// are optimizing for size and the immedaite won't fit in a byte.
- bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
isPowerOf2_64(AndRHSVal)) {
Src = AndLHS;
@@ -35932,7 +35932,7 @@ static SDValue reduceVMULWidth(SDNode *N
// pmulld is supported since SSE41. It is better to use pmulld
// instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
// the expansion.
- bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize();
+ bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
return SDValue();
@@ -36240,7 +36240,7 @@ static SDValue combineMul(SDNode *N, Sel
if (!MulConstantOptimization)
return SDValue();
// An imul is usually smaller than the alternative sequence.
- if (DAG.getMachineFunction().getFunction().optForMinSize())
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
return SDValue();
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
@@ -37659,7 +37659,7 @@ static SDValue combineOr(SDNode *N, Sele
return SDValue();
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
- bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
unsigned Bits = VT.getScalarSizeInBits();
// SHLD/SHRD instructions have lower register pressure, but on some
@@ -39938,7 +39938,7 @@ static SDValue combineFMinNumFMaxNum(SDN
// If we have to respect NaN inputs, this takes at least 3 instructions.
// Favor a library call when operating on a scalar and minimizing code size.
- if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize())
+ if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
return SDValue();
EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Thu Apr 4 15:40:06 2019
@@ -829,7 +829,7 @@ namespace llvm {
}
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
- if (DAG.getMachineFunction().getFunction().optForMinSize())
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
return false;
return true;
}
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Thu Apr 4 15:40:06 2019
@@ -1453,7 +1453,7 @@ MachineInstr *X86InstrInfo::commuteInstr
case X86::VBLENDPDrri:
case X86::VBLENDPSrri:
// If we're optimizing for size, try to use MOVSD/MOVSS.
- if (MI.getParent()->getParent()->getFunction().optForSize()) {
+ if (MI.getParent()->getParent()->getFunction().hasOptSize()) {
unsigned Mask, Opc;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
@@ -4820,14 +4820,14 @@ MachineInstr *X86InstrInfo::foldMemoryOp
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
- if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() &&
+ if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
(MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
MI.getOpcode() == X86::PUSH64r))
return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
- if (!MF.getFunction().optForSize() &&
+ if (!MF.getFunction().hasOptSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
@@ -4995,7 +4995,7 @@ X86InstrInfo::foldMemoryOperandImpl(Mach
return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
- if (!MF.getFunction().optForSize() &&
+ if (!MF.getFunction().hasOptSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
@@ -5195,7 +5195,7 @@ MachineInstr *X86InstrInfo::foldMemoryOp
if (NoFusing) return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
- if (!MF.getFunction().optForSize() &&
+ if (!MF.getFunction().hasOptSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Thu Apr 4 15:40:06 2019
@@ -925,12 +925,12 @@ def IsNotPIC : Predicate<"!TM.isPosi
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
- def OptForSize : Predicate<"MF->getFunction().optForSize()">;
- def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">;
- def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">;
+ def OptForSize : Predicate<"MF->getFunction().hasOptSize()">;
+ def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">;
+ def OptForSpeed : Predicate<"!MF->getFunction().hasOptSize()">;
def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
- "MF->getFunction().optForSize()">;
- def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().optForSize() || "
+ "MF->getFunction().hasOptSize()">;
+ def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().hasOptSize() || "
"!Subtarget->hasSSE41()">;
}
Modified: llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp Thu Apr 4 15:40:06 2019
@@ -700,7 +700,7 @@ bool OptimizeLEAPass::runOnMachineFuncti
// Remove redundant address calculations. Do it only for -Os/-Oz since only
// a code size gain is expected from this part of the pass.
- if (MF.getFunction().optForSize())
+ if (MF.getFunction().hasOptSize())
Changed |= removeRedundantAddrCalc(LEAs);
}
Modified: llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp Thu Apr 4 15:40:06 2019
@@ -97,7 +97,7 @@ bool PadShortFunc::runOnMachineFunction(
if (skipFunction(MF.getFunction()))
return false;
- if (MF.getFunction().optForSize())
+ if (MF.getFunction().hasOptSize())
return false;
if (!MF.getSubtarget<X86Subtarget>().padShortFunctions())
Modified: llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp Thu Apr 4 15:40:06 2019
@@ -248,7 +248,7 @@ SDValue X86SelectionDAGInfo::EmitTargetC
Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
if (Repeats.BytesLeft() > 0 &&
- DAG.getMachineFunction().getFunction().optForMinSize()) {
+ DAG.getMachineFunction().getFunction().hasMinSize()) {
// When aggressively optimizing for size, avoid generating the code to
// handle BytesLeft.
Repeats.AVT = MVT::i8;
Modified: llvm/trunk/lib/Transforms/IPO/FunctionAttrs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/FunctionAttrs.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/IPO/FunctionAttrs.cpp (original)
+++ llvm/trunk/lib/Transforms/IPO/FunctionAttrs.cpp Thu Apr 4 15:40:06 2019
@@ -1366,7 +1366,7 @@ PreservedAnalyses PostOrderFunctionAttrs
bool HasUnknownCall = false;
for (LazyCallGraph::Node &N : C) {
Function &F = N.getFunction();
- if (F.optForNone() || F.hasFnAttribute(Attribute::Naked)) {
+ if (F.hasOptNone() || F.hasFnAttribute(Attribute::Naked)) {
// Treat any function we're trying not to optimize as if it were an
// indirect call and omit it from the node set used below.
HasUnknownCall = true;
@@ -1439,7 +1439,7 @@ static bool runImpl(CallGraphSCC &SCC, A
bool ExternalNode = false;
for (CallGraphNode *I : SCC) {
Function *F = I->getFunction();
- if (!F || F->optForNone() || F->hasFnAttribute(Attribute::Naked)) {
+ if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked)) {
// External node or function we're trying not to optimize - we both avoid
// transform them and avoid leveraging information they provide.
ExternalNode = true;
Modified: llvm/trunk/lib/Transforms/IPO/HotColdSplitting.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/HotColdSplitting.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/IPO/HotColdSplitting.cpp (original)
+++ llvm/trunk/lib/Transforms/IPO/HotColdSplitting.cpp Thu Apr 4 15:40:06 2019
@@ -149,7 +149,7 @@ static bool mayExtractBlock(const BasicB
/// module has profile data), set entry count to 0 to ensure treated as cold.
/// Return true if the function is changed.
static bool markFunctionCold(Function &F, bool UpdateEntryCount = false) {
- assert(!F.optForNone() && "Can't mark this cold");
+ assert(!F.hasOptNone() && "Can't mark this cold");
bool Changed = false;
if (!F.hasFnAttribute(Attribute::Cold)) {
F.addFnAttr(Attribute::Cold);
@@ -673,7 +673,7 @@ bool HotColdSplitting::run(Module &M) {
continue;
// Do not modify `optnone` functions.
- if (F.optForNone())
+ if (F.hasOptNone())
continue;
// Detect inherently cold functions and mark them as such.
Modified: llvm/trunk/lib/Transforms/IPO/InferFunctionAttrs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/InferFunctionAttrs.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/IPO/InferFunctionAttrs.cpp (original)
+++ llvm/trunk/lib/Transforms/IPO/InferFunctionAttrs.cpp Thu Apr 4 15:40:06 2019
@@ -25,7 +25,7 @@ static bool inferAllPrototypeAttributes(
for (Function &F : M.functions())
// We only infer things using the prototype and the name; we don't need
// definitions.
- if (F.isDeclaration() && !F.optForNone())
+ if (F.isDeclaration() && !F.hasOptNone())
Changed |= inferLibFuncAttributes(F, TLI);
return Changed;
Modified: llvm/trunk/lib/Transforms/IPO/Inliner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/Inliner.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/IPO/Inliner.cpp (original)
+++ llvm/trunk/lib/Transforms/IPO/Inliner.cpp Thu Apr 4 15:40:06 2019
@@ -973,7 +973,7 @@ PreservedAnalyses InlinerPass::run(LazyC
LazyCallGraph::Node &N = *CG.lookup(F);
if (CG.lookupSCC(N) != C)
continue;
- if (F.optForNone()) {
+ if (F.hasOptNone()) {
setInlineRemark(Calls[i].first, "optnone attribute");
continue;
}
Modified: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp Thu Apr 4 15:40:06 2019
@@ -3508,7 +3508,7 @@ static bool combineInstructionsOverFunct
MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
- InstCombiner IC(Worklist, Builder, F.optForMinSize(), ExpensiveCombines, AA,
+ InstCombiner IC(Worklist, Builder, F.hasMinSize(), ExpensiveCombines, AA,
AC, TLI, DT, ORE, DL, LI);
IC.MaxArraySizeForCombine = MaxArraySize;
Modified: llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp Thu Apr 4 15:40:06 2019
@@ -393,7 +393,7 @@ static bool promoteIndirectCalls(Module
}
bool Changed = false;
for (auto &F : M) {
- if (F.isDeclaration() || F.optForNone())
+ if (F.isDeclaration() || F.hasOptNone())
continue;
std::unique_ptr<OptimizationRemarkEmitter> OwnedORE;
Modified: llvm/trunk/lib/Transforms/Scalar/ConstantHoisting.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/ConstantHoisting.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/ConstantHoisting.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/ConstantHoisting.cpp Thu Apr 4 15:40:06 2019
@@ -548,7 +548,7 @@ ConstantHoistingPass::maximizeConstantsI
ConstCandVecType::iterator &MaxCostItr) {
unsigned NumUses = 0;
- if(!Entry->getParent()->optForSize() || std::distance(S,E) > 100) {
+ if(!Entry->getParent()->hasOptSize() || std::distance(S,E) > 100) {
for (auto ConstCand = S; ConstCand != E; ++ConstCand) {
NumUses += ConstCand->Uses.size();
if (ConstCand->CumulativeCost > MaxCostItr->CumulativeCost)
Modified: llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp Thu Apr 4 15:40:06 2019
@@ -284,7 +284,7 @@ bool LoopIdiomRecognize::runOnLoop(Loop
// Determine if code size heuristics need to be applied.
ApplyCodeSizeHeuristics =
- L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
+ L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs;
HasMemset = TLI->has(LibFunc_memset);
HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
Modified: llvm/trunk/lib/Transforms/Scalar/LoopLoadElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopLoadElimination.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopLoadElimination.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopLoadElimination.cpp Thu Apr 4 15:40:06 2019
@@ -529,7 +529,7 @@ public:
}
if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) {
- if (L->getHeader()->getParent()->optForSize()) {
+ if (L->getHeader()->getParent()->hasOptSize()) {
LLVM_DEBUG(
dbgs() << "Versioning is needed but not allowed when optimizing "
"for size.\n");
Modified: llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopUnrollPass.cpp Thu Apr 4 15:40:06 2019
@@ -198,7 +198,7 @@ TargetTransformInfo::UnrollingPreference
TTI.getUnrollingPreferences(L, SE, UP);
// Apply size attributes
- if (L->getHeader()->getParent()->optForSize()) {
+ if (L->getHeader()->getParent()->hasOptSize()) {
UP.Threshold = UP.OptSizeThreshold;
UP.PartialThreshold = UP.PartialOptSizeThreshold;
}
Modified: llvm/trunk/lib/Transforms/Scalar/LoopUnswitch.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopUnswitch.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoopUnswitch.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoopUnswitch.cpp Thu Apr 4 15:40:06 2019
@@ -657,7 +657,7 @@ bool LoopUnswitch::processCurrentLoop()
}
// Do not do non-trivial unswitch while optimizing for size.
- // FIXME: Use Function::optForSize().
+ // FIXME: Use Function::hasOptSize().
if (OptimizeForSize ||
loopHeader->getParent()->hasFnAttribute(Attribute::OptimizeForSize))
return false;
Modified: llvm/trunk/lib/Transforms/Scalar/WarnMissedTransforms.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/WarnMissedTransforms.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/WarnMissedTransforms.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/WarnMissedTransforms.cpp Thu Apr 4 15:40:06 2019
@@ -92,7 +92,7 @@ PreservedAnalyses
WarnMissedTransformationsPass::run(Function &F, FunctionAnalysisManager &AM) {
// Do not warn about not applied transformations if optimizations are
// disabled.
- if (F.optForNone())
+ if (F.hasOptNone())
return PreservedAnalyses::all();
auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
Modified: llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp Thu Apr 4 15:40:06 2019
@@ -2375,7 +2375,7 @@ Value *LibCallSimplifier::optimizeFPuts(
// Don't rewrite fputs to fwrite when optimising for size because fwrite
// requires more arguments and thus extra MOVs are required.
- if (CI->getFunction()->optForSize())
+ if (CI->getFunction()->hasOptSize())
return nullptr;
// Check if has any use
Modified: llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp?rev=357731&r1=357730&r2=357731&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp Thu Apr 4 15:40:06 2019
@@ -7162,7 +7162,7 @@ static bool processLoopInVPlanNativePath
// Check the function attributes to find out if this function should be
// optimized for size.
bool OptForSize =
- Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
+ Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->hasOptSize();
// Plan how to best vectorize, return the best VF and its cost.
const VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF);
@@ -7245,7 +7245,7 @@ bool LoopVectorizePass::processLoop(Loop
// Check the function attributes to find out if this function should be
// optimized for size.
bool OptForSize =
- Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize();
+ Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->hasOptSize();
// Entrance to the VPlan-native vectorization path. Outer loops are processed
// here. They may require CFG and instruction level transformations before
More information about the llvm-commits
mailing list