[llvm] r320884 - MachineFunction: Return reference from getFunction(); NFC
Matthias Braun via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 15 14:22:58 PST 2017
Author: matze
Date: Fri Dec 15 14:22:58 2017
New Revision: 320884
URL: http://llvm.org/viewvc/llvm-project?rev=320884&view=rev
Log:
MachineFunction: Return reference from getFunction(); NFC
The Function can never be nullptr so we can return a reference.
Modified:
llvm/trunk/include/llvm/Analysis/BlockFrequencyInfoImpl.h
llvm/trunk/include/llvm/CodeGen/MachineFunction.h
llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
llvm/trunk/include/llvm/CodeGen/TargetFrameLowering.h
llvm/trunk/include/llvm/IR/Function.h
llvm/trunk/lib/CodeGen/Analysis.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/ARMException.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/WinException.cpp
llvm/trunk/lib/CodeGen/BranchFolding.cpp
llvm/trunk/lib/CodeGen/DeadMachineInstructionElim.cpp
llvm/trunk/lib/CodeGen/EarlyIfConversion.cpp
llvm/trunk/lib/CodeGen/ExecutionDepsFix.cpp
llvm/trunk/lib/CodeGen/FEntryInserter.cpp
llvm/trunk/lib/CodeGen/GCRootLowering.cpp
llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp
llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp
llvm/trunk/lib/CodeGen/GlobalISel/InstructionSelect.cpp
llvm/trunk/lib/CodeGen/GlobalISel/Legalizer.cpp
llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp
llvm/trunk/lib/CodeGen/IfConversion.cpp
llvm/trunk/lib/CodeGen/LexicalScopes.cpp
llvm/trunk/lib/CodeGen/LiveDebugValues.cpp
llvm/trunk/lib/CodeGen/LiveDebugVariables.cpp
llvm/trunk/lib/CodeGen/LiveRangeShrink.cpp
llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp
llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp
llvm/trunk/lib/CodeGen/MIRPrinter.cpp
llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp
llvm/trunk/lib/CodeGen/MachineBlockFrequencyInfo.cpp
llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
llvm/trunk/lib/CodeGen/MachineCSE.cpp
llvm/trunk/lib/CodeGen/MachineCombiner.cpp
llvm/trunk/lib/CodeGen/MachineCopyPropagation.cpp
llvm/trunk/lib/CodeGen/MachineFunction.cpp
llvm/trunk/lib/CodeGen/MachineInstr.cpp
llvm/trunk/lib/CodeGen/MachineLICM.cpp
llvm/trunk/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp
llvm/trunk/lib/CodeGen/MachinePipeliner.cpp
llvm/trunk/lib/CodeGen/MachineRegisterInfo.cpp
llvm/trunk/lib/CodeGen/MachineScheduler.cpp
llvm/trunk/lib/CodeGen/MachineSink.cpp
llvm/trunk/lib/CodeGen/MachineVerifier.cpp
llvm/trunk/lib/CodeGen/OptimizePHIs.cpp
llvm/trunk/lib/CodeGen/PatchableFunction.cpp
llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp
llvm/trunk/lib/CodeGen/PostRASchedulerList.cpp
llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp
llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp
llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp
llvm/trunk/lib/CodeGen/RegUsageInfoCollector.cpp
llvm/trunk/lib/CodeGen/RegUsageInfoPropagate.cpp
llvm/trunk/lib/CodeGen/ResetMachineFunctionPass.cpp
llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/trunk/lib/CodeGen/ShrinkWrap.cpp
llvm/trunk/lib/CodeGen/StackColoring.cpp
llvm/trunk/lib/CodeGen/TailDuplication.cpp
llvm/trunk/lib/CodeGen/TailDuplicator.cpp
llvm/trunk/lib/CodeGen/TargetFrameLoweringImpl.cpp
llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
llvm/trunk/lib/CodeGen/TargetOptionsImpl.cpp
llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp
llvm/trunk/lib/CodeGen/TwoAddressInstructionPass.cpp
llvm/trunk/lib/CodeGen/XRayInstrumentation.cpp
llvm/trunk/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
llvm/trunk/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
llvm/trunk/lib/Target/AArch64/AArch64CollectLOH.cpp
llvm/trunk/lib/Target/AArch64/AArch64CondBrTuning.cpp
llvm/trunk/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp
llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
llvm/trunk/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp
llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td
llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
llvm/trunk/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp
llvm/trunk/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
llvm/trunk/lib/Target/AArch64/AArch64StorePairSuppress.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
llvm/trunk/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp
llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp
llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp
llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
llvm/trunk/lib/Target/AMDGPU/SIFrameLowering.cpp
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/trunk/lib/Target/AMDGPU/SIInsertSkips.cpp
llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp
llvm/trunk/lib/Target/AMDGPU/SIWholeQuadMode.cpp
llvm/trunk/lib/Target/ARC/ARCBranchFinalize.cpp
llvm/trunk/lib/Target/ARC/ARCFrameLowering.cpp
llvm/trunk/lib/Target/ARC/ARCRegisterInfo.cpp
llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp
llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp
llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp
llvm/trunk/lib/Target/ARM/ARMBaseRegisterInfo.cpp
llvm/trunk/lib/Target/ARM/ARMCallLowering.cpp
llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp
llvm/trunk/lib/Target/ARM/ARMFastISel.cpp
llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp
llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
llvm/trunk/lib/Target/ARM/ARMISelLowering.h
llvm/trunk/lib/Target/ARM/ARMLegalizerInfo.cpp
llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
llvm/trunk/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp
llvm/trunk/lib/Target/ARM/ARMSubtarget.cpp
llvm/trunk/lib/Target/ARM/MLxExpansionPass.cpp
llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp
llvm/trunk/lib/Target/ARM/ThumbRegisterInfo.cpp
llvm/trunk/lib/Target/AVR/AVRFrameLowering.cpp
llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp
llvm/trunk/lib/Target/AVR/AVRRegisterInfo.cpp
llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp
llvm/trunk/lib/Target/BPF/BPFRegisterInfo.cpp
llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp
llvm/trunk/lib/Target/Hexagon/HexagonBitTracker.cpp
llvm/trunk/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
llvm/trunk/lib/Target/Hexagon/HexagonConstExtenders.cpp
llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp
llvm/trunk/lib/Target/Hexagon/HexagonCopyToCombine.cpp
llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp
llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp
llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp
llvm/trunk/lib/Target/Hexagon/HexagonGenMux.cpp
llvm/trunk/lib/Target/Hexagon/HexagonGenPredicate.cpp
llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp
llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
llvm/trunk/lib/Target/Hexagon/HexagonMachineScheduler.cpp
llvm/trunk/lib/Target/Hexagon/HexagonNewValueJump.cpp
llvm/trunk/lib/Target/Hexagon/HexagonOptAddrMode.cpp
llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp
llvm/trunk/lib/Target/Hexagon/HexagonRDFOpt.cpp
llvm/trunk/lib/Target/Hexagon/HexagonSplitDouble.cpp
llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp
llvm/trunk/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
llvm/trunk/lib/Target/Hexagon/RDFGraph.cpp
llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp
llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp
llvm/trunk/lib/Target/MSP430/MSP430RegisterInfo.cpp
llvm/trunk/lib/Target/Mips/MipsAsmPrinter.cpp
llvm/trunk/lib/Target/Mips/MipsCCState.cpp
llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp
llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp
llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp
llvm/trunk/lib/Target/Mips/MipsSEFrameLowering.cpp
llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
llvm/trunk/lib/Target/Mips/MipsSEInstrInfo.cpp
llvm/trunk/lib/Target/Mips/MipsTargetMachine.cpp
llvm/trunk/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
llvm/trunk/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp
llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp
llvm/trunk/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp
llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp
llvm/trunk/lib/Target/PowerPC/PPCEarlyReturn.cpp
llvm/trunk/lib/Target/PowerPC/PPCFrameLowering.cpp
llvm/trunk/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp
llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp
llvm/trunk/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
llvm/trunk/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
llvm/trunk/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp
llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
llvm/trunk/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp
llvm/trunk/lib/Target/SystemZ/SystemZElimCompare.cpp
llvm/trunk/lib/Target/SystemZ/SystemZFrameLowering.cpp
llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp
llvm/trunk/lib/Target/SystemZ/SystemZLDCleanup.cpp
llvm/trunk/lib/Target/SystemZ/SystemZRegisterInfo.cpp
llvm/trunk/lib/Target/SystemZ/SystemZShortenInst.cpp
llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp
llvm/trunk/lib/Target/X86/X86CallFrameOptimization.cpp
llvm/trunk/lib/Target/X86/X86CallLowering.cpp
llvm/trunk/lib/Target/X86/X86CmovConversion.cpp
llvm/trunk/lib/Target/X86/X86DomainReassignment.cpp
llvm/trunk/lib/Target/X86/X86ExpandPseudo.cpp
llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp
llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp
llvm/trunk/lib/Target/X86/X86FloatingPoint.cpp
llvm/trunk/lib/Target/X86/X86FrameLowering.cpp
llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.h
llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
llvm/trunk/lib/Target/X86/X86InstrInfo.td
llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp
llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp
llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp
llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp
llvm/trunk/lib/Target/X86/X86VZeroUpper.cpp
llvm/trunk/lib/Target/X86/X86WinAllocaExpander.cpp
llvm/trunk/lib/Target/XCore/XCoreFrameLowering.cpp
llvm/trunk/lib/Target/XCore/XCoreInstrInfo.cpp
llvm/trunk/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
llvm/trunk/lib/Target/XCore/XCoreRegisterInfo.cpp
Modified: llvm/trunk/include/llvm/Analysis/BlockFrequencyInfoImpl.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/BlockFrequencyInfoImpl.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/BlockFrequencyInfoImpl.h (original)
+++ llvm/trunk/include/llvm/Analysis/BlockFrequencyInfoImpl.h Fri Dec 15 14:22:58 2017
@@ -1341,7 +1341,7 @@ raw_ostream &BlockFrequencyInfoImpl<BT>:
<< ", int = " << getBlockFreq(&BB).getFrequency();
if (Optional<uint64_t> ProfileCount =
BlockFrequencyInfoImplBase::getBlockProfileCount(
- *F->getFunction(), getNode(&BB)))
+ F->getFunction(), getNode(&BB)))
OS << ", count = " << ProfileCount.getValue();
if (Optional<uint64_t> IrrLoopHeaderWeight =
BB.getIrrLoopHeaderWeight())
Modified: llvm/trunk/include/llvm/CodeGen/MachineFunction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineFunction.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineFunction.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineFunction.h Fri Dec 15 14:22:58 2017
@@ -380,8 +380,8 @@ public:
/// Return the DataLayout attached to the Module associated to this MF.
const DataLayout &getDataLayout() const;
- /// getFunction - Return the LLVM function that this machine code represents
- const Function *getFunction() const { return &F; }
+ /// Return the LLVM function that this machine code represents
+ const Function &getFunction() const { return F; }
/// getName - Return the name of the corresponding LLVM function.
StringRef getName() const;
Modified: llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineOptimizationRemarkEmitter.h Fri Dec 15 14:22:58 2017
@@ -33,7 +33,7 @@ public:
const DiagnosticLocation &Loc,
const MachineBasicBlock *MBB)
: DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, RemarkName,
- *MBB->getParent()->getFunction(), Loc),
+ MBB->getParent()->getFunction(), Loc),
MBB(MBB) {}
/// MI-specific kinds of diagnostic Arguments.
@@ -159,8 +159,8 @@ public:
/// (1) to filter trivial false positives or (2) to provide more context so
/// that non-trivial false positives can be quickly detected by the user.
bool allowExtraAnalysis(StringRef PassName) const {
- return (MF.getFunction()->getContext().getDiagnosticsOutputFile() ||
- MF.getFunction()->getContext()
+ return (MF.getFunction().getContext().getDiagnosticsOutputFile() ||
+ MF.getFunction().getContext()
.getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
}
@@ -172,8 +172,8 @@ public:
// remarks enabled. We can't currently check whether remarks are requested
// for the calling pass since that requires actually building the remark.
- if (MF.getFunction()->getContext().getDiagnosticsOutputFile() ||
- MF.getFunction()->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
+ if (MF.getFunction().getContext().getDiagnosticsOutputFile() ||
+ MF.getFunction().getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
auto R = RemarkBuilder();
emit((DiagnosticInfoOptimizationBase &)R);
}
Modified: llvm/trunk/include/llvm/CodeGen/TargetFrameLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetFrameLowering.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetFrameLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetFrameLowering.h Fri Dec 15 14:22:58 2017
@@ -330,12 +330,12 @@ public:
/// Check if given function is safe for not having callee saved registers.
/// This is used when interprocedural register allocation is enabled.
- static bool isSafeForNoCSROpt(const Function *F) {
- if (!F->hasLocalLinkage() || F->hasAddressTaken() ||
- !F->hasFnAttribute(Attribute::NoRecurse))
+ static bool isSafeForNoCSROpt(const Function &F) {
+ if (!F.hasLocalLinkage() || F.hasAddressTaken() ||
+ !F.hasFnAttribute(Attribute::NoRecurse))
return false;
// Function should not be optimized as tail call.
- for (const User *U : F->users())
+ for (const User *U : F.users())
if (auto CS = ImmutableCallSite(U))
if (CS.isTailCall())
return false;
Modified: llvm/trunk/include/llvm/IR/Function.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/Function.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/Function.h (original)
+++ llvm/trunk/include/llvm/IR/Function.h Fri Dec 15 14:22:58 2017
@@ -131,7 +131,7 @@ public:
// This is here to help easily convert from FunctionT * (Function * or
// MachineFunction *) in BlockFrequencyInfoImpl to Function * by calling
// FunctionT->getFunction().
- const Function *getFunction() const { return this; }
+ const Function &getFunction() const { return *this; }
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
const Twine &N = "", Module *M = nullptr) {
Modified: llvm/trunk/lib/CodeGen/Analysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/Analysis.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/Analysis.cpp (original)
+++ llvm/trunk/lib/CodeGen/Analysis.cpp Fri Dec 15 14:22:58 2017
@@ -668,7 +668,7 @@ llvm::getFuncletMembership(const Machine
int EntryBBNumber = MF.front().getNumber();
bool IsSEH = isAsynchronousEHPersonality(
- classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
+ classifyEHPersonality(MF.getFunction().getPersonalityFn()));
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
SmallVector<const MachineBasicBlock *, 16> FuncletBlocks;
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/ARMException.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/ARMException.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/ARMException.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/ARMException.cpp Fri Dec 15 14:22:58 2017
@@ -60,16 +60,16 @@ void ARMException::beginFunction(const M
///
void ARMException::endFunction(const MachineFunction *MF) {
ARMTargetStreamer &ATS = getTargetStreamer();
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
const Function *Per = nullptr;
- if (F->hasPersonalityFn())
- Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
+ if (F.hasPersonalityFn())
+ Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
bool forceEmitPersonality =
- F->hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
- F->needsUnwindTableEntry();
+ F.hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
+ F.needsUnwindTableEntry();
bool shouldEmitPersonality = forceEmitPersonality ||
!MF->getLandingPads().empty();
- if (!Asm->MF->getFunction()->needsUnwindTableEntry() &&
+ if (!Asm->MF->getFunction().needsUnwindTableEntry() &&
!shouldEmitPersonality)
ATS.emitCantUnwind();
else if (shouldEmitPersonality) {
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp Fri Dec 15 14:22:58 2017
@@ -621,35 +621,35 @@ void AsmPrinter::EmitDebugThreadLocal(co
/// EmitFunctionHeader - This method emits the header for the current
/// function.
void AsmPrinter::EmitFunctionHeader() {
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
if (isVerbose())
OutStreamer->GetCommentOS()
<< "-- Begin function "
- << GlobalValue::dropLLVMManglingEscape(F->getName()) << '\n';
+ << GlobalValue::dropLLVMManglingEscape(F.getName()) << '\n';
// Print out constants referenced by the function
EmitConstantPool();
// Print the 'header' of function.
- OutStreamer->SwitchSection(getObjFileLowering().SectionForGlobal(F, TM));
- EmitVisibility(CurrentFnSym, F->getVisibility());
+ OutStreamer->SwitchSection(getObjFileLowering().SectionForGlobal(&F, TM));
+ EmitVisibility(CurrentFnSym, F.getVisibility());
- EmitLinkage(F, CurrentFnSym);
+ EmitLinkage(&F, CurrentFnSym);
if (MAI->hasFunctionAlignment())
- EmitAlignment(MF->getAlignment(), F);
+ EmitAlignment(MF->getAlignment(), &F);
if (MAI->hasDotTypeDotSizeDirective())
OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
if (isVerbose()) {
- F->printAsOperand(OutStreamer->GetCommentOS(),
- /*PrintType=*/false, F->getParent());
+ F.printAsOperand(OutStreamer->GetCommentOS(),
+ /*PrintType=*/false, F.getParent());
OutStreamer->GetCommentOS() << '\n';
}
// Emit the prefix data.
- if (F->hasPrefixData()) {
+ if (F.hasPrefixData()) {
if (MAI->hasSubsectionsViaSymbols()) {
// Preserving prefix data on platforms which use subsections-via-symbols
// is a bit tricky. Here we introduce a symbol for the prefix data
@@ -658,12 +658,12 @@ void AsmPrinter::EmitFunctionHeader() {
MCSymbol *PrefixSym = OutContext.createLinkerPrivateTempSymbol();
OutStreamer->EmitLabel(PrefixSym);
- EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData());
+ EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrefixData());
// Emit an .alt_entry directive for the actual function symbol.
OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_AltEntry);
} else {
- EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData());
+ EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrefixData());
}
}
@@ -675,7 +675,7 @@ void AsmPrinter::EmitFunctionHeader() {
// references to the dangling symbols. Emit them at the start of the function
// so that we don't get references to undefined symbols.
std::vector<MCSymbol*> DeadBlockSyms;
- MMI->takeDeletedSymbolsForFunction(F, DeadBlockSyms);
+ MMI->takeDeletedSymbolsForFunction(&F, DeadBlockSyms);
for (unsigned i = 0, e = DeadBlockSyms.size(); i != e; ++i) {
OutStreamer->AddComment("Address taken block that was later removed");
OutStreamer->EmitLabel(DeadBlockSyms[i]);
@@ -700,8 +700,8 @@ void AsmPrinter::EmitFunctionHeader() {
}
// Emit the prologue data.
- if (F->hasPrologueData())
- EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrologueData());
+ if (F.hasPrologueData())
+ EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrologueData());
}
/// EmitFunctionEntryLabel - Emit the label that is the entrypoint for the
@@ -900,7 +900,7 @@ static bool emitDebugValueComment(const
AsmPrinter::CFIMoveType AsmPrinter::needsCFIMoves() const {
if (MAI->getExceptionHandlingType() == ExceptionHandling::DwarfCFI &&
- MF->getFunction()->needsUnwindTableEntry())
+ MF->getFunction().needsUnwindTableEntry())
return CFI_M_EH;
if (MMI->hasDebugInfo())
@@ -910,7 +910,7 @@ AsmPrinter::CFIMoveType AsmPrinter::need
}
bool AsmPrinter::needsSEHMoves() {
- return MAI->usesWindowsCFI() && MF->getFunction()->needsUnwindTableEntry();
+ return MAI->usesWindowsCFI() && MF->getFunction().needsUnwindTableEntry();
}
void AsmPrinter::emitCFIInstruction(const MachineInstr &MI) {
@@ -964,7 +964,7 @@ void AsmPrinter::emitStackSizeSection(co
OutStreamer->PushSection();
OutStreamer->SwitchSection(StackSizeSection);
- const MCSymbol *FunctionSymbol = getSymbol(MF.getFunction());
+ const MCSymbol *FunctionSymbol = getSymbol(&MF.getFunction());
uint64_t StackSize = FrameInfo.getStackSize();
OutStreamer->EmitValue(MCSymbolRefExpr::create(FunctionSymbol, OutContext),
/* size = */ 8);
@@ -980,10 +980,10 @@ static bool needFuncLabelsForEHOrDebugIn
// We might emit an EH table that uses function begin and end labels even if
// we don't have any landingpads.
- if (!MF.getFunction()->hasPersonalityFn())
+ if (!MF.getFunction().hasPersonalityFn())
return false;
return !isNoOpWithoutInvoke(
- classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
+ classifyEHPersonality(MF.getFunction().getPersonalityFn()));
}
/// EmitFunctionBody - This method emits the body and trailer for a
@@ -1070,7 +1070,7 @@ void AsmPrinter::EmitFunctionBody() {
EmittedInsts += NumInstsInFunction;
MachineOptimizationRemarkAnalysis R(DEBUG_TYPE, "InstructionCount",
- MF->getFunction()->getSubprogram(),
+ MF->getFunction().getSubprogram(),
&MF->front());
R << ore::NV("NumInstructions", NumInstsInFunction)
<< " instructions in function";
@@ -1098,8 +1098,8 @@ void AsmPrinter::EmitFunctionBody() {
}
}
- const Function *F = MF->getFunction();
- for (const auto &BB : *F) {
+ const Function &F = MF->getFunction();
+ for (const auto &BB : F) {
if (!BB.hasAddressTaken())
continue;
MCSymbol *Sym = GetBlockAddressSymbol(&BB);
@@ -1442,7 +1442,7 @@ MCSymbol *AsmPrinter::getCurExceptionSym
void AsmPrinter::SetupMachineFunction(MachineFunction &MF) {
this->MF = &MF;
// Get the function symbol.
- CurrentFnSym = getSymbol(MF.getFunction());
+ CurrentFnSym = getSymbol(&MF.getFunction());
CurrentFnSymForSize = CurrentFnSym;
CurrentFnBegin = nullptr;
CurExceptionSym = nullptr;
@@ -1568,14 +1568,14 @@ void AsmPrinter::EmitJumpTableInfo() {
// Pick the directive to use to print the jump table entries, and switch to
// the appropriate section.
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
const TargetLoweringObjectFile &TLOF = getObjFileLowering();
bool JTInDiffSection = !TLOF.shouldPutJumpTableInFunctionSection(
MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32,
- *F);
+ F);
if (JTInDiffSection) {
// Drop it in the readonly section.
- MCSection *ReadOnlySection = TLOF.getSectionForJumpTable(*F, TM);
+ MCSection *ReadOnlySection = TLOF.getSectionForJumpTable(F, TM);
OutStreamer->SwitchSection(ReadOnlySection);
}
@@ -1949,7 +1949,7 @@ const MCExpr *AsmPrinter::lowerConstant(
raw_string_ostream OS(S);
OS << "Unsupported expression in static initializer: ";
CE->printAsOperand(OS, /*PrintType=*/false,
- !MF ? nullptr : MF->getFunction()->getParent());
+ !MF ? nullptr : MF->getFunction().getParent());
report_fatal_error(OS.str());
}
case Instruction::GetElementPtr: {
@@ -2632,7 +2632,7 @@ void AsmPrinter::setupCodePaddingContext
assert(MF != nullptr && "Machine function must be valid");
assert(LI != nullptr && "Loop info must be valid");
Context.IsPaddingActive = !MF->hasInlineAsm() &&
- !MF->getFunction()->optForSize() &&
+ !MF->getFunction().optForSize() &&
TM.getOptLevel() != CodeGenOpt::None;
const MachineLoop *CurrentLoop = LI->getLoopFor(&MBB);
Context.IsBasicBlockInsideInnermostLoop =
@@ -2830,7 +2830,7 @@ void AsmPrinter::emitXRayTable() {
return;
auto PrevSection = OutStreamer->getCurrentSectionOnly();
- auto Fn = MF->getFunction();
+ const Function &F = MF->getFunction();
MCSection *InstMap = nullptr;
MCSection *FnSledIndex = nullptr;
if (MF->getSubtarget().getTargetTriple().isOSBinFormatELF()) {
@@ -2838,9 +2838,9 @@ void AsmPrinter::emitXRayTable() {
assert(Associated != nullptr);
auto Flags = ELF::SHF_WRITE | ELF::SHF_ALLOC | ELF::SHF_LINK_ORDER;
std::string GroupName;
- if (Fn->hasComdat()) {
+ if (F.hasComdat()) {
Flags |= ELF::SHF_GROUP;
- GroupName = Fn->getComdat()->getName();
+ GroupName = F.getComdat()->getName();
}
auto UniqueID = ++XRayFnUniqueID;
@@ -2886,15 +2886,15 @@ void AsmPrinter::emitXRayTable() {
void AsmPrinter::recordSled(MCSymbol *Sled, const MachineInstr &MI,
SledKind Kind, uint8_t Version) {
- auto Fn = MI.getMF()->getFunction();
- auto Attr = Fn->getFnAttribute("function-instrument");
- bool LogArgs = Fn->hasFnAttribute("xray-log-args");
+ const Function &F = MI.getMF()->getFunction();
+ auto Attr = F.getFnAttribute("function-instrument");
+ bool LogArgs = F.hasFnAttribute("xray-log-args");
bool AlwaysInstrument =
Attr.isStringAttribute() && Attr.getValueAsString() == "xray-always";
if (Kind == SledKind::FUNCTION_ENTER && LogArgs)
Kind = SledKind::LOG_ARGS_ENTER;
Sleds.emplace_back(XRayFunctionEntry{Sled, CurrentFnSym, Kind,
- AlwaysInstrument, Fn, Version});
+ AlwaysInstrument, &F, Version});
}
uint16_t AsmPrinter::getDwarfVersion() const {
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp Fri Dec 15 14:22:58 2017
@@ -514,7 +514,7 @@ void AsmPrinter::EmitInlineAsm(const Mac
// Reset SanitizeAddress based on the function's attribute.
MCTargetOptions MCOptions = TM.Options.MCOptions;
MCOptions.SanitizeAddress =
- MF->getFunction()->hasFnAttribute(Attribute::SanitizeAddress);
+ MF->getFunction().hasFnAttribute(Attribute::SanitizeAddress);
EmitInlineAsm(OS.str(), getSubtargetInfo(), MCOptions, LocMD,
MI->getInlineAsmDialect());
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp Fri Dec 15 14:22:58 2017
@@ -1154,9 +1154,9 @@ void CodeViewDebug::collectVariableInfo(
}
void CodeViewDebug::beginFunctionImpl(const MachineFunction *MF) {
- const Function *GV = MF->getFunction();
- assert(FnDebugInfo.count(GV) == false);
- CurFn = &FnDebugInfo[GV];
+ const Function &GV = MF->getFunction();
+ assert(FnDebugInfo.count(&GV) == false);
+ CurFn = &FnDebugInfo[&GV];
CurFn->FuncId = NextFuncId++;
CurFn->Begin = Asm->getFunctionBegin();
@@ -2273,15 +2273,15 @@ void CodeViewDebug::emitLocalVariable(co
}
void CodeViewDebug::endFunctionImpl(const MachineFunction *MF) {
- const Function *GV = MF->getFunction();
- assert(FnDebugInfo.count(GV));
- assert(CurFn == &FnDebugInfo[GV]);
+ const Function &GV = MF->getFunction();
+ assert(FnDebugInfo.count(&GV));
+ assert(CurFn == &FnDebugInfo[&GV]);
- collectVariableInfo(GV->getSubprogram());
+ collectVariableInfo(GV.getSubprogram());
// Don't emit anything if we don't have any line tables.
if (!CurFn->HaveLineInfo) {
- FnDebugInfo.erase(GV);
+ FnDebugInfo.erase(&GV);
CurFn = nullptr;
return;
}
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp Fri Dec 15 14:22:58 2017
@@ -179,7 +179,7 @@ static bool hasDebugInfo(const MachineMo
const MachineFunction *MF) {
if (!MMI->hasDebugInfo())
return false;
- auto *SP = MF->getFunction()->getSubprogram();
+ auto *SP = MF->getFunction().getSubprogram();
if (!SP)
return false;
assert(SP->getUnit());
@@ -223,7 +223,7 @@ void DebugHandlerBase::beginFunction(con
// label, so arguments are visible when breaking at function entry.
const DILocalVariable *DIVar = Ranges.front().first->getDebugVariable();
if (DIVar->isParameter() &&
- getDISubprogram(DIVar->getScope())->describes(MF->getFunction())) {
+ getDISubprogram(DIVar->getScope())->describes(&MF->getFunction())) {
LabelsBeforeInsn[Ranges.front().first] = Asm->getFunctionBegin();
if (Ranges.front().first->getDebugExpression()->isFragment()) {
// Mark all non-overlapping initial fragments.
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp Fri Dec 15 14:22:58 2017
@@ -87,7 +87,7 @@ static MCSymbol *getExceptionSym(AsmPrin
void DwarfCFIException::beginFunction(const MachineFunction *MF) {
shouldEmitMoves = shouldEmitPersonality = shouldEmitLSDA = false;
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
// If any landing pads survive, we need an EH table.
bool hasLandingPads = !MF->getLandingPads().empty();
@@ -100,17 +100,17 @@ void DwarfCFIException::beginFunction(co
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
unsigned PerEncoding = TLOF.getPersonalityEncoding();
const Function *Per = nullptr;
- if (F->hasPersonalityFn())
- Per = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
+ if (F.hasPersonalityFn())
+ Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
// Emit a personality function even when there are no landing pads
forceEmitPersonality =
// ...if a personality function is explicitly specified
- F->hasPersonalityFn() &&
+ F.hasPersonalityFn() &&
// ... and it's not known to be a noop in the absence of invokes
!isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
// ... and we're not explicitly asked not to emit it
- F->needsUnwindTableEntry();
+ F.needsUnwindTableEntry();
shouldEmitPersonality =
(forceEmitPersonality ||
@@ -143,8 +143,8 @@ void DwarfCFIException::beginFragment(co
if (!shouldEmitPersonality)
return;
- auto *F = MBB->getParent()->getFunction();
- auto *P = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
+ auto &F = MBB->getParent()->getFunction();
+ auto *P = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
assert(P && "Expected personality function");
// If we are forced to emit this personality, make sure to record
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Fri Dec 15 14:22:58 2017
@@ -1163,7 +1163,7 @@ void DwarfDebug::beginInstruction(const
DebugHandlerBase::beginInstruction(MI);
assert(CurMI);
- const auto *SP = MI->getMF()->getFunction()->getSubprogram();
+ const auto *SP = MI->getMF()->getFunction().getSubprogram();
if (!SP || SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug)
return;
@@ -1261,7 +1261,7 @@ static DebugLoc findPrologueEndLoc(const
void DwarfDebug::beginFunctionImpl(const MachineFunction *MF) {
CurFn = MF;
- auto *SP = MF->getFunction()->getSubprogram();
+ auto *SP = MF->getFunction().getSubprogram();
assert(LScopes.empty() || SP == LScopes.getCurrentFunctionScope()->getScopeNode());
if (SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug)
return;
@@ -1297,7 +1297,7 @@ void DwarfDebug::skippedNonDebugFunction
// Gather and emit post-function debug information.
void DwarfDebug::endFunctionImpl(const MachineFunction *MF) {
- const DISubprogram *SP = MF->getFunction()->getSubprogram();
+ const DISubprogram *SP = MF->getFunction().getSubprogram();
assert(CurFn == MF &&
"endFunction should be called with the same function as beginFunction");
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/WinException.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/WinException.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/WinException.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/WinException.cpp Fri Dec 15 14:22:58 2017
@@ -63,7 +63,7 @@ void WinException::beginFunction(const M
bool hasLandingPads = !MF->getLandingPads().empty();
bool hasEHFunclets = MF->hasEHFunclets();
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
shouldEmitMoves = Asm->needsSEHMoves() && MF->hasWinCFI();
@@ -72,14 +72,14 @@ void WinException::beginFunction(const M
EHPersonality Per = EHPersonality::Unknown;
const Function *PerFn = nullptr;
- if (F->hasPersonalityFn()) {
- PerFn = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
+ if (F.hasPersonalityFn()) {
+ PerFn = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
Per = classifyEHPersonality(PerFn);
}
- bool forceEmitPersonality = F->hasPersonalityFn() &&
+ bool forceEmitPersonality = F.hasPersonalityFn() &&
!isNoOpWithoutInvoke(Per) &&
- F->needsUnwindTableEntry();
+ F.needsUnwindTableEntry();
shouldEmitPersonality =
forceEmitPersonality || ((hasLandingPads || hasEHFunclets) &&
@@ -98,7 +98,7 @@ void WinException::beginFunction(const M
// functions may still refer to it.
const WinEHFuncInfo &FuncInfo = *MF->getWinEHFuncInfo();
StringRef FLinkageName =
- GlobalValue::dropLLVMManglingEscape(MF->getFunction()->getName());
+ GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName());
emitEHRegistrationOffsetLabel(FuncInfo, FLinkageName);
}
shouldEmitLSDA = hasEHFunclets;
@@ -115,10 +115,10 @@ void WinException::endFunction(const Mac
if (!shouldEmitPersonality && !shouldEmitMoves && !shouldEmitLSDA)
return;
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
EHPersonality Per = EHPersonality::Unknown;
- if (F->hasPersonalityFn())
- Per = classifyEHPersonality(F->getPersonalityFn()->stripPointerCasts());
+ if (F.hasPersonalityFn())
+ Per = classifyEHPersonality(F.getPersonalityFn()->stripPointerCasts());
// Get rid of any dead landing pads if we're not using funclets. In funclet
// schemes, the landing pad is not actually reachable. It only exists so
@@ -170,8 +170,8 @@ static MCSymbol *getMCSymbolForMBB(AsmPr
// Give catches and cleanups a name based off of their parent function and
// their funclet entry block's number.
const MachineFunction *MF = MBB->getParent();
- const Function *F = MF->getFunction();
- StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName());
+ const Function &F = MF->getFunction();
+ StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName());
MCContext &Ctx = MF->getContext();
StringRef HandlerPrefix = MBB->isCleanupFuncletEntry() ? "dtor" : "catch";
return Ctx.getOrCreateSymbol("?" + HandlerPrefix + "$" +
@@ -183,7 +183,7 @@ void WinException::beginFunclet(const Ma
MCSymbol *Sym) {
CurrentFuncletEntry = &MBB;
- const Function *F = Asm->MF->getFunction();
+ const Function &F = Asm->MF->getFunction();
// If a symbol was not provided for the funclet, invent one.
if (!Sym) {
Sym = getMCSymbolForMBB(Asm, &MBB);
@@ -198,7 +198,7 @@ void WinException::beginFunclet(const Ma
// We want our funclet's entry point to be aligned such that no nops will be
// present after the label.
Asm->EmitAlignment(std::max(Asm->MF->getAlignment(), MBB.getAlignment()),
- F);
+ &F);
// Now that we've emitted the alignment directive, point at our funclet.
Asm->OutStreamer->EmitLabel(Sym);
@@ -215,8 +215,8 @@ void WinException::beginFunclet(const Ma
const Function *PerFn = nullptr;
// Determine which personality routine we are using for this funclet.
- if (F->hasPersonalityFn())
- PerFn = dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
+ if (F.hasPersonalityFn())
+ PerFn = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
const MCSymbol *PersHandlerSym =
TLOF.getCFIPersonalitySymbol(PerFn, Asm->TM, MMI);
@@ -237,10 +237,10 @@ void WinException::endFunclet() {
const MachineFunction *MF = Asm->MF;
if (shouldEmitMoves || shouldEmitPersonality) {
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
EHPersonality Per = EHPersonality::Unknown;
- if (F->hasPersonalityFn())
- Per = classifyEHPersonality(F->getPersonalityFn()->stripPointerCasts());
+ if (F.hasPersonalityFn())
+ Per = classifyEHPersonality(F.getPersonalityFn()->stripPointerCasts());
// Emit an UNWIND_INFO struct describing the prologue.
Asm->OutStreamer->EmitWinEHHandlerData();
@@ -249,7 +249,7 @@ void WinException::endFunclet() {
!CurrentFuncletEntry->isCleanupFuncletEntry()) {
// If this is a C++ catch funclet (or the parent function),
// emit a reference to the LSDA for the parent function.
- StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName());
+ StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName());
MCSymbol *FuncInfoXData = Asm->OutContext.getOrCreateSymbol(
Twine("$cppxdata$", FuncLinkageName));
Asm->OutStreamer->EmitValue(create32bitRef(FuncInfoXData), 4);
@@ -533,7 +533,7 @@ void WinException::emitCSpecificHandlerT
// Emit a label assignment with the SEH frame offset so we can use it for
// llvm.x86.seh.recoverfp.
StringRef FLinkageName =
- GlobalValue::dropLLVMManglingEscape(MF->getFunction()->getName());
+ GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName());
MCSymbol *ParentFrameOffset =
Ctx.getOrCreateParentFrameOffsetSymbol(FLinkageName);
const MCExpr *MCOffset =
@@ -628,11 +628,11 @@ void WinException::emitSEHActionsForRang
}
void WinException::emitCXXFrameHandler3Table(const MachineFunction *MF) {
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
auto &OS = *Asm->OutStreamer;
const WinEHFuncInfo &FuncInfo = *MF->getWinEHFuncInfo();
- StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName());
+ StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName());
SmallVector<std::pair<const MCExpr *, int>, 4> IPToStateTable;
MCSymbol *FuncInfoXData = nullptr;
@@ -938,8 +938,8 @@ void WinException::emitEHRegistrationOff
/// indexed by state number instead of IP.
void WinException::emitExceptHandlerTable(const MachineFunction *MF) {
MCStreamer &OS = *Asm->OutStreamer;
- const Function *F = MF->getFunction();
- StringRef FLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName());
+ const Function &F = MF->getFunction();
+ StringRef FLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName());
bool VerboseAsm = OS.isVerboseAsm();
auto AddComment = [&](const Twine &Comment) {
@@ -956,7 +956,7 @@ void WinException::emitExceptHandlerTabl
OS.EmitLabel(LSDALabel);
const Function *Per =
- dyn_cast<Function>(F->getPersonalityFn()->stripPointerCasts());
+ dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
StringRef PerName = Per->getName();
int BaseState = -1;
if (PerName == "_except_handler4") {
Modified: llvm/trunk/lib/CodeGen/BranchFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/BranchFolding.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/BranchFolding.cpp (original)
+++ llvm/trunk/lib/CodeGen/BranchFolding.cpp Fri Dec 15 14:22:58 2017
@@ -118,7 +118,7 @@ INITIALIZE_PASS(BranchFolderPass, DEBUG_
"Control Flow Optimizer", false, false)
bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
@@ -685,7 +685,7 @@ ProfitableToMerge(MachineBasicBlock *MBB
// branch instruction, which is likely to be smaller than the 2
// instructions that would be deleted in the merge.
MachineFunction *MF = MBB1->getParent();
- return EffectiveTailLen >= 2 && MF->getFunction()->optForSize() &&
+ return EffectiveTailLen >= 2 && MF->getFunction().optForSize() &&
(I1 == MBB1->begin() || I2 == MBB2->begin());
}
@@ -1511,7 +1511,7 @@ ReoptimizeBlock:
}
if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 &&
- MF.getFunction()->optForSize()) {
+ MF.getFunction().optForSize()) {
// Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch
// direction, thereby defeating careful block placement and regressing
// performance. Therefore, only consider this for optsize functions.
Modified: llvm/trunk/lib/CodeGen/DeadMachineInstructionElim.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/DeadMachineInstructionElim.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/DeadMachineInstructionElim.cpp (original)
+++ llvm/trunk/lib/CodeGen/DeadMachineInstructionElim.cpp Fri Dec 15 14:22:58 2017
@@ -94,7 +94,7 @@ bool DeadMachineInstructionElim::isDead(
}
bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool AnyChanges = false;
Modified: llvm/trunk/lib/CodeGen/EarlyIfConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/EarlyIfConversion.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/EarlyIfConversion.cpp (original)
+++ llvm/trunk/lib/CodeGen/EarlyIfConversion.cpp Fri Dec 15 14:22:58 2017
@@ -785,7 +785,7 @@ bool EarlyIfConverter::tryConvertIf(Mach
bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
<< "********** Function: " << MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// Only run if conversion if the target wants it.
Modified: llvm/trunk/lib/CodeGen/ExecutionDepsFix.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ExecutionDepsFix.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ExecutionDepsFix.cpp (original)
+++ llvm/trunk/lib/CodeGen/ExecutionDepsFix.cpp Fri Dec 15 14:22:58 2017
@@ -617,7 +617,7 @@ bool ExecutionDepsFix::isBlockDone(Machi
}
bool ExecutionDepsFix::runOnMachineFunction(MachineFunction &mf) {
- if (skipFunction(*mf.getFunction()))
+ if (skipFunction(mf.getFunction()))
return false;
MF = &mf;
TII = MF->getSubtarget().getInstrInfo();
Modified: llvm/trunk/lib/CodeGen/FEntryInserter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/FEntryInserter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/FEntryInserter.cpp (original)
+++ llvm/trunk/lib/CodeGen/FEntryInserter.cpp Fri Dec 15 14:22:58 2017
@@ -36,7 +36,7 @@ struct FEntryInserter : public MachineFu
bool FEntryInserter::runOnMachineFunction(MachineFunction &MF) {
const std::string FEntryName =
- MF.getFunction()->getFnAttribute("fentry-call").getValueAsString();
+ MF.getFunction().getFnAttribute("fentry-call").getValueAsString();
if (FEntryName != "true")
return false;
Modified: llvm/trunk/lib/CodeGen/GCRootLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GCRootLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GCRootLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/GCRootLowering.cpp Fri Dec 15 14:22:58 2017
@@ -328,10 +328,10 @@ void GCMachineCodeAnalysis::FindStackOff
bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
// Quick exit for functions that do not use GC.
- if (!MF.getFunction()->hasGC())
+ if (!MF.getFunction().hasGC())
return false;
- FI = &getAnalysis<GCModuleInfo>().getFunctionInfo(*MF.getFunction());
+ FI = &getAnalysis<GCModuleInfo>().getFunctionInfo(MF.getFunction());
MMI = &getAnalysis<MachineModuleInfo>();
TII = MF.getSubtarget().getInstrInfo();
Modified: llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/CallLowering.cpp Fri Dec 15 14:22:58 2017
@@ -108,7 +108,7 @@ bool CallLowering::handleAssignments(Mac
ArrayRef<ArgInfo> Args,
ValueHandler &Handler) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
SmallVector<CCValAssign, 16> ArgLocs;
Modified: llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp Fri Dec 15 14:22:58 2017
@@ -124,8 +124,8 @@ unsigned IRTranslator::getOrCreateVReg(c
bool Success = translate(*CV, VReg);
if (!Success) {
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
- MF->getFunction()->getSubprogram(),
- &MF->getFunction()->getEntryBlock());
+ MF->getFunction().getSubprogram(),
+ &MF->getFunction().getEntryBlock());
R << "unable to translate constant: " << ore::NV("Type", Val.getType());
reportTranslationError(*MF, *TPC, *ORE, R);
return VReg;
@@ -591,7 +591,7 @@ void IRTranslator::getStackGuard(unsigne
MIB.addDef(DstReg);
auto &TLI = *MF->getSubtarget().getTargetLowering();
- Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent());
+ Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
if (!Global)
return;
@@ -925,7 +925,7 @@ bool IRTranslator::translateLandingPad(c
// If there aren't registers to copy the values into (e.g., during SjLj
// exceptions), then don't bother.
auto &TLI = *MF->getSubtarget().getTargetLowering();
- const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn();
+ const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
return true;
@@ -1236,7 +1236,7 @@ void IRTranslator::finalizeFunction() {
bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
MF = &CurMF;
- const Function &F = *MF->getFunction();
+ const Function &F = MF->getFunction();
if (F.empty())
return false;
CLI = MF->getSubtarget().getCallLowering();
@@ -1252,8 +1252,7 @@ bool IRTranslator::runOnMachineFunction(
if (!DL->isLittleEndian()) {
// Currently we don't properly handle big endian code.
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
- MF->getFunction()->getSubprogram(),
- &MF->getFunction()->getEntryBlock());
+ F.getSubprogram(), &F.getEntryBlock());
R << "unable to translate in big endian mode";
reportTranslationError(*MF, *TPC, *ORE, R);
}
@@ -1289,8 +1288,7 @@ bool IRTranslator::runOnMachineFunction(
}
if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
- MF->getFunction()->getSubprogram(),
- &MF->getFunction()->getEntryBlock());
+ F.getSubprogram(), &F.getEntryBlock());
R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
reportTranslationError(*MF, *TPC, *ORE, R);
return false;
Modified: llvm/trunk/lib/CodeGen/GlobalISel/InstructionSelect.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/InstructionSelect.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/InstructionSelect.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/InstructionSelect.cpp Fri Dec 15 14:22:58 2017
@@ -189,7 +189,7 @@ bool InstructionSelect::runOnMachineFunc
if (MF.size() != NumBlocks) {
MachineOptimizationRemarkMissed R("gisel-select", "GISelFailure",
- MF.getFunction()->getSubprogram(),
+ MF.getFunction().getSubprogram(),
/*MBB=*/nullptr);
R << "inserting blocks is not supported yet";
reportGISelFailure(MF, TPC, MORE, R);
Modified: llvm/trunk/lib/CodeGen/GlobalISel/Legalizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/Legalizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/Legalizer.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/Legalizer.cpp Fri Dec 15 14:22:58 2017
@@ -175,7 +175,7 @@ bool Legalizer::runOnMachineFunction(Mac
// outerloop for that.
if (MF.size() != NumBlocks) {
MachineOptimizationRemarkMissed R("gisel-legalize", "GISelFailure",
- MF.getFunction()->getSubprogram(),
+ MF.getFunction().getSubprogram(),
/*MBB=*/nullptr);
R << "inserting blocks is not supported yet";
reportGISelFailure(MF, TPC, MORE, R);
Modified: llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp Fri Dec 15 14:22:58 2017
@@ -136,7 +136,7 @@ LegalizerHelper::LegalizeResult
LegalizerHelper::libcall(MachineInstr &MI) {
LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
unsigned Size = LLTy.getSizeInBits();
- auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
MIRBuilder.setInstr(MI);
@@ -410,7 +410,7 @@ LegalizerHelper::LegalizeResult Legalize
return UnableToLegalize;
int NumParts = SizeOp0 / NarrowSize;
const APInt &Cst = MI.getOperand(1).getCImm()->getValue();
- LLVMContext &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
SmallVector<unsigned, 2> DstRegs;
for (int i = 0; i < NumParts; ++i) {
@@ -824,7 +824,7 @@ LegalizerHelper::lower(MachineInstr &MI,
return UnableToLegalize;
unsigned Res = MI.getOperand(0).getReg();
Type *ZeroTy;
- LLVMContext &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
switch (Ty.getSizeInBits()) {
case 16:
ZeroTy = Type::getHalfTy(Ctx);
Modified: llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp Fri Dec 15 14:22:58 2017
@@ -263,7 +263,7 @@ MachineInstrBuilder MachineIRBuilder::bu
const ConstantInt *NewVal = &Val;
if (Ty.getSizeInBits() != Val.getBitWidth())
- NewVal = ConstantInt::get(MF->getFunction()->getContext(),
+ NewVal = ConstantInt::get(MF->getFunction().getContext(),
Val.getValue().sextOrTrunc(Ty.getSizeInBits()));
return buildInstr(TargetOpcode::G_CONSTANT).addDef(Res).addCImm(NewVal);
@@ -271,7 +271,7 @@ MachineInstrBuilder MachineIRBuilder::bu
MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res,
int64_t Val) {
- auto IntN = IntegerType::get(MF->getFunction()->getContext(),
+ auto IntN = IntegerType::get(MF->getFunction().getContext(),
MRI->getType(Res).getSizeInBits());
ConstantInt *CI = ConstantInt::get(IntN, Val, true);
return buildConstant(Res, *CI);
Modified: llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/RegBankSelect.cpp Fri Dec 15 14:22:58 2017
@@ -601,9 +601,9 @@ bool RegBankSelect::runOnMachineFunction
return false;
DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n');
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
Mode SaveOptMode = OptMode;
- if (F->hasFnAttribute(Attribute::OptimizeNone))
+ if (F.hasFnAttribute(Attribute::OptimizeNone))
OptMode = Mode::Fast;
init(MF);
Modified: llvm/trunk/lib/CodeGen/IfConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/IfConversion.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/IfConversion.cpp (original)
+++ llvm/trunk/lib/CodeGen/IfConversion.cpp Fri Dec 15 14:22:58 2017
@@ -337,7 +337,7 @@ INITIALIZE_PASS_DEPENDENCY(MachineBranch
INITIALIZE_PASS_END(IfConverter, DEBUG_TYPE, "If Converter", false, false)
bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()) || (PredicateFtor && !PredicateFtor(MF)))
+ if (skipFunction(MF.getFunction()) || (PredicateFtor && !PredicateFtor(MF)))
return false;
const TargetSubtargetInfo &ST = MF.getSubtarget();
Modified: llvm/trunk/lib/CodeGen/LexicalScopes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LexicalScopes.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/LexicalScopes.cpp (original)
+++ llvm/trunk/lib/CodeGen/LexicalScopes.cpp Fri Dec 15 14:22:58 2017
@@ -49,7 +49,7 @@ void LexicalScopes::reset() {
void LexicalScopes::initialize(const MachineFunction &Fn) {
reset();
// Don't attempt any lexical scope creation for a NoDebug compile unit.
- if (Fn.getFunction()->getSubprogram()->getUnit()->getEmissionKind() ==
+ if (Fn.getFunction().getSubprogram()->getUnit()->getEmissionKind() ==
DICompileUnit::NoDebug)
return;
MF = &Fn;
@@ -173,7 +173,7 @@ LexicalScopes::getOrCreateRegularScope(c
false)).first;
if (!Parent) {
- assert(cast<DISubprogram>(Scope)->describes(MF->getFunction()));
+ assert(cast<DISubprogram>(Scope)->describes(&MF->getFunction()));
assert(!CurrentFnLexicalScope);
CurrentFnLexicalScope = &I->second;
}
Modified: llvm/trunk/lib/CodeGen/LiveDebugValues.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveDebugValues.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/LiveDebugValues.cpp (original)
+++ llvm/trunk/lib/CodeGen/LiveDebugValues.cpp Fri Dec 15 14:22:58 2017
@@ -703,12 +703,12 @@ bool LiveDebugValues::ExtendRanges(Machi
}
bool LiveDebugValues::runOnMachineFunction(MachineFunction &MF) {
- if (!MF.getFunction()->getSubprogram())
+ if (!MF.getFunction().getSubprogram())
// LiveDebugValues will already have removed all DBG_VALUEs.
return false;
// Skip functions from NoDebug compilation units.
- if (MF.getFunction()->getSubprogram()->getUnit()->getEmissionKind() ==
+ if (MF.getFunction().getSubprogram()->getUnit()->getEmissionKind() ==
DICompileUnit::NoDebug)
return false;
Modified: llvm/trunk/lib/CodeGen/LiveDebugVariables.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveDebugVariables.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/LiveDebugVariables.cpp (original)
+++ llvm/trunk/lib/CodeGen/LiveDebugVariables.cpp Fri Dec 15 14:22:58 2017
@@ -833,7 +833,7 @@ static void removeDebugValues(MachineFun
bool LiveDebugVariables::runOnMachineFunction(MachineFunction &mf) {
if (!EnableLDV)
return false;
- if (!mf.getFunction()->getSubprogram()) {
+ if (!mf.getFunction().getSubprogram()) {
removeDebugValues(mf);
return false;
}
Modified: llvm/trunk/lib/CodeGen/LiveRangeShrink.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveRangeShrink.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/LiveRangeShrink.cpp (original)
+++ llvm/trunk/lib/CodeGen/LiveRangeShrink.cpp Fri Dec 15 14:22:58 2017
@@ -106,7 +106,7 @@ static void BuildInstOrderMap(MachineBas
}
bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
MachineRegisterInfo &MRI = MF.getRegInfo();
Modified: llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp Fri Dec 15 14:22:58 2017
@@ -431,7 +431,7 @@ bool MIParser::parseBasicBlockDefinition
break;
case MIToken::IRBlock:
// TODO: Report an error when both name and ir block are specified.
- if (parseIRBlock(BB, *MF.getFunction()))
+ if (parseIRBlock(BB, MF.getFunction()))
return true;
lex();
break;
@@ -447,7 +447,7 @@ bool MIParser::parseBasicBlockDefinition
if (!Name.empty()) {
BB = dyn_cast_or_null<BasicBlock>(
- MF.getFunction()->getValueSymbolTable()->lookup(Name));
+ MF.getFunction().getValueSymbolTable()->lookup(Name));
if (!BB)
return error(Loc, Twine("basic block '") + Name +
"' is not defined in the function '" +
@@ -1234,7 +1234,7 @@ bool MIParser::parseIRConstant(StringRef
const Constant *&C) {
auto Source = StringValue.str(); // The source has to be null terminated.
SMDiagnostic Err;
- C = parseConstantValue(Source, Err, *MF.getFunction()->getParent(),
+ C = parseConstantValue(Source, Err, *MF.getFunction().getParent(),
&PFS.IRSlots);
if (!C)
return error(Loc + Err.getColumnNo(), Err.getMessage());
@@ -1254,7 +1254,7 @@ bool MIParser::parseLowLevelType(StringR
lex();
return false;
} else if (Token.is(MIToken::PointerType)) {
- const DataLayout &DL = MF.getFunction()->getParent()->getDataLayout();
+ const DataLayout &DL = MF.getDataLayout();
unsigned AS = APSInt(Token.range().drop_front()).getZExtValue();
Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
lex();
@@ -1419,7 +1419,7 @@ bool MIParser::parseFixedStackObjectOper
bool MIParser::parseGlobalValue(GlobalValue *&GV) {
switch (Token.kind()) {
case MIToken::NamedGlobalValue: {
- const Module *M = MF.getFunction()->getParent();
+ const Module *M = MF.getFunction().getParent();
GV = M->getNamedValue(Token.stringValue());
if (!GV)
return error(Twine("use of undefined global value '") + Token.range() +
@@ -1557,7 +1557,7 @@ bool MIParser::parseDIExpression(MDNode
if (expectAndConsume(MIToken::rparen))
return true;
- Expr = DIExpression::get(MF.getFunction()->getContext(), Elements);
+ Expr = DIExpression::get(MF.getFunction().getContext(), Elements);
return false;
}
@@ -2102,7 +2102,7 @@ bool MIParser::parseOperandsOffset(Machi
bool MIParser::parseIRValue(const Value *&V) {
switch (Token.kind()) {
case MIToken::NamedIRValue: {
- V = MF.getFunction()->getValueSymbolTable()->lookup(Token.stringValue());
+ V = MF.getFunction().getValueSymbolTable()->lookup(Token.stringValue());
break;
}
case MIToken::IRValue: {
@@ -2361,7 +2361,7 @@ bool MIParser::parseMachineMemoryOperand
// Optional synchronization scope.
SyncScope::ID SSID;
- if (parseOptionalScope(MF.getFunction()->getContext(), SSID))
+ if (parseOptionalScope(MF.getFunction().getContext(), SSID))
return true;
// Up to two atomic orderings (cmpxchg provides guarantees on failure).
@@ -2542,12 +2542,12 @@ static const BasicBlock *getIRBlockFromS
const BasicBlock *MIParser::getIRBlock(unsigned Slot) {
if (Slots2BasicBlocks.empty())
- initSlots2BasicBlocks(*MF.getFunction(), Slots2BasicBlocks);
+ initSlots2BasicBlocks(MF.getFunction(), Slots2BasicBlocks);
return getIRBlockFromSlot(Slot, Slots2BasicBlocks);
}
const BasicBlock *MIParser::getIRBlock(unsigned Slot, const Function &F) {
- if (&F == MF.getFunction())
+ if (&F == &MF.getFunction())
return getIRBlock(Slot);
DenseMap<unsigned, const BasicBlock *> CustomSlots2BasicBlocks;
initSlots2BasicBlocks(F, CustomSlots2BasicBlocks);
@@ -2578,7 +2578,7 @@ static void initSlots2Values(const Funct
const Value *MIParser::getIRValue(unsigned Slot) {
if (Slots2Values.empty())
- initSlots2Values(*MF.getFunction(), Slots2Values);
+ initSlots2Values(MF.getFunction(), Slots2Values);
auto ValueInfo = Slots2Values.find(Slot);
if (ValueInfo == Slots2Values.end())
return nullptr;
Modified: llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp Fri Dec 15 14:22:58 2017
@@ -551,7 +551,7 @@ bool MIRParserImpl::initializeFrameInfo(
const yaml::MachineFunction &YamlMF) {
MachineFunction &MF = PFS.MF;
MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const yaml::MachineFrameInfo &YamlMFI = YamlMF.FrameInfo;
MFI.setFrameAddressIsTaken(YamlMFI.IsFrameAddressTaken);
MFI.setReturnAddressIsTaken(YamlMFI.IsReturnAddressTaken);
@@ -722,7 +722,7 @@ bool MIRParserImpl::initializeConstantPo
MachineConstantPool &ConstantPool, const yaml::MachineFunction &YamlMF) {
DenseMap<unsigned, unsigned> &ConstantPoolSlots = PFS.ConstantPoolSlots;
const MachineFunction &MF = PFS.MF;
- const auto &M = *MF.getFunction()->getParent();
+ const auto &M = *MF.getFunction().getParent();
SMDiagnostic Error;
for (const auto &YamlConstant : YamlMF.Constants) {
if (YamlConstant.IsTargetSpecific)
Modified: llvm/trunk/lib/CodeGen/MIRPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRPrinter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRPrinter.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRPrinter.cpp Fri Dec 15 14:22:58 2017
@@ -213,8 +213,8 @@ void MIRPrinter::print(const MachineFunc
MachineFunctionProperties::Property::Selected);
convert(YamlMF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo());
- ModuleSlotTracker MST(MF.getFunction()->getParent());
- MST.incorporateFunction(*MF.getFunction());
+ ModuleSlotTracker MST(MF.getFunction().getParent());
+ MST.incorporateFunction(MF.getFunction());
convert(MST, YamlMF.FrameInfo, MF.getFrameInfo());
convertStackObjects(YamlMF, MF, MST);
if (const auto *ConstantPool = MF.getConstantPool())
@@ -696,7 +696,7 @@ void MIPrinter::print(const MachineInstr
if (!MI.memoperands_empty()) {
OS << " :: ";
- const LLVMContext &Context = MF->getFunction()->getContext();
+ const LLVMContext &Context = MF->getFunction().getContext();
bool NeedComma = false;
for (const auto *Op : MI.memoperands()) {
if (NeedComma)
Modified: llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp Fri Dec 15 14:22:58 2017
@@ -267,8 +267,8 @@ void MachineBasicBlock::print(raw_ostrea
<< " is null\n";
return;
}
- const Function *F = MF->getFunction();
- const Module *M = F ? F->getParent() : nullptr;
+ const Function &F = MF->getFunction();
+ const Module *M = F.getParent();
ModuleSlotTracker MST(M);
print(OS, MST, Indexes);
}
Modified: llvm/trunk/lib/CodeGen/MachineBlockFrequencyInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineBlockFrequencyInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineBlockFrequencyInfo.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineBlockFrequencyInfo.cpp Fri Dec 15 14:22:58 2017
@@ -224,14 +224,14 @@ MachineBlockFrequencyInfo::getBlockFreq(
Optional<uint64_t> MachineBlockFrequencyInfo::getBlockProfileCount(
const MachineBasicBlock *MBB) const {
- const Function *F = MBFI->getFunction()->getFunction();
- return MBFI ? MBFI->getBlockProfileCount(*F, MBB) : None;
+ const Function &F = MBFI->getFunction()->getFunction();
+ return MBFI ? MBFI->getBlockProfileCount(F, MBB) : None;
}
Optional<uint64_t>
MachineBlockFrequencyInfo::getProfileCountFromFreq(uint64_t Freq) const {
- const Function *F = MBFI->getFunction()->getFunction();
- return MBFI ? MBFI->getProfileCountFromFreq(*F, Freq) : None;
+ const Function &F = MBFI->getFunction()->getFunction();
+ return MBFI ? MBFI->getProfileCountFromFreq(F, Freq) : None;
}
bool
Modified: llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp Fri Dec 15 14:22:58 2017
@@ -1235,7 +1235,7 @@ void MachineBlockPlacement::precomputeTr
// When profile is available, we need to handle the triangle-shape CFG.
static BranchProbability getLayoutSuccessorProbThreshold(
const MachineBasicBlock *BB) {
- if (!BB->getParent()->getFunction()->getEntryCount())
+ if (!BB->getParent()->getFunction().getEntryCount())
return BranchProbability(StaticLikelyProb, 100);
if (BB->succ_size() == 2) {
const MachineBasicBlock *Succ1 = *BB->succ_begin();
@@ -1769,7 +1769,7 @@ MachineBlockPlacement::findBestLoopTop(c
// i.e. when the layout predecessor does not fallthrough to the loop header.
// In practice this never happens though: there always seems to be a preheader
// that can fallthrough and that is also placed before the header.
- if (F->getFunction()->optForSize())
+ if (F->getFunction().optForSize())
return L.getHeader();
// Check that the header hasn't been fused with a preheader block due to
@@ -2178,7 +2178,7 @@ MachineBlockPlacement::collectLoopBlockS
// will be merged into the first outer loop chain for which this block is not
// cold anymore. This needs precise profile data and we only do this when
// profile data is available.
- if (F->getFunction()->getEntryCount() || ForceLoopColdBlock) {
+ if (F->getFunction().getEntryCount() || ForceLoopColdBlock) {
BlockFrequency LoopFreq(0);
for (auto LoopPred : L.getHeader()->predecessors())
if (!L.contains(LoopPred))
@@ -2220,7 +2220,7 @@ void MachineBlockPlacement::buildLoopCha
// for better layout.
bool RotateLoopWithProfile =
ForcePreciseRotationCost ||
- (PreciseRotationCost && F->getFunction()->getEntryCount());
+ (PreciseRotationCost && F->getFunction().getEntryCount());
// First check to see if there is an obviously preferable top block for the
// loop. This will default to the header, but may end up as one of the
@@ -2485,7 +2485,7 @@ void MachineBlockPlacement::alignBlocks(
// exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass.
- if (F->getFunction()->optForSize())
+ if (F->getFunction().optForSize())
return;
BlockChain &FunctionChain = *BlockToChain[&F->front()];
if (FunctionChain.begin() == FunctionChain.end())
@@ -2715,7 +2715,7 @@ bool MachineBlockPlacement::maybeTailDup
}
bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// Check for single-block functions and skip them.
@@ -2760,7 +2760,7 @@ bool MachineBlockPlacement::runOnMachine
if (TailDupPlacement) {
MPDT = &getAnalysis<MachinePostDominatorTree>();
- if (MF.getFunction()->optForSize())
+ if (MF.getFunction().optForSize())
TailDupSize = 1;
bool PreRegAlloc = false;
TailDup.initMF(MF, PreRegAlloc, MBPI, /* LayoutMode */ true, TailDupSize);
@@ -2817,7 +2817,7 @@ bool MachineBlockPlacement::runOnMachine
}
if (ViewBlockLayoutWithBFI != GVDT_None &&
(ViewBlockFreqFuncName.empty() ||
- F->getFunction()->getName().equals(ViewBlockFreqFuncName))) {
+ F->getFunction().getName().equals(ViewBlockFreqFuncName))) {
MBFI->view("MBP." + MF.getName(), false);
}
Modified: llvm/trunk/lib/CodeGen/MachineCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineCSE.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineCSE.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineCSE.cpp Fri Dec 15 14:22:58 2017
@@ -727,7 +727,7 @@ bool MachineCSE::PerformCSE(MachineDomTr
}
bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
Modified: llvm/trunk/lib/CodeGen/MachineCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineCombiner.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineCombiner.cpp Fri Dec 15 14:22:58 2017
@@ -548,7 +548,7 @@ bool MachineCombiner::runOnMachineFuncti
MLI = &getAnalysis<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- OptSize = MF.getFunction()->optForSize();
+ OptSize = MF.getFunction().optForSize();
DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
if (!TII->useMachineCombiner()) {
Modified: llvm/trunk/lib/CodeGen/MachineCopyPropagation.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineCopyPropagation.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineCopyPropagation.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineCopyPropagation.cpp Fri Dec 15 14:22:58 2017
@@ -378,7 +378,7 @@ void MachineCopyPropagation::CopyPropaga
}
bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
Changed = false;
Modified: llvm/trunk/lib/CodeGen/MachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineFunction.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineFunction.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineFunction.cpp Fri Dec 15 14:22:58 2017
@@ -244,7 +244,7 @@ getOrCreateJumpTableInfo(unsigned EntryK
/// Should we be emitting segmented stack stuff for the function
bool MachineFunction::shouldSplitStack() const {
- return getFunction()->hasFnAttribute("split-stack");
+ return getFunction().hasFnAttribute("split-stack");
}
/// This discards all of the MachineBasicBlock numbers and recomputes them.
@@ -485,8 +485,7 @@ LLVM_DUMP_METHOD void MachineFunction::d
#endif
StringRef MachineFunction::getName() const {
- assert(getFunction() && "No function!");
- return getFunction()->getName();
+ return getFunction().getName();
}
void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
@@ -519,8 +518,8 @@ void MachineFunction::print(raw_ostream
OS << '\n';
}
- ModuleSlotTracker MST(getFunction()->getParent());
- MST.incorporateFunction(*getFunction());
+ ModuleSlotTracker MST(getFunction().getParent());
+ MST.incorporateFunction(getFunction());
for (const auto &BB : *this) {
OS << '\n';
BB.print(OS, MST, Indexes);
Modified: llvm/trunk/lib/CodeGen/MachineInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineInstr.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineInstr.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineInstr.cpp Fri Dec 15 14:22:58 2017
@@ -1211,7 +1211,7 @@ void MachineInstr::print(raw_ostream &OS
const Module *M = nullptr;
if (const MachineBasicBlock *MBB = getParent())
if (const MachineFunction *MF = MBB->getParent())
- M = MF->getFunction()->getParent();
+ M = MF->getFunction().getParent();
ModuleSlotTracker MST(M);
print(OS, MST, SkipOpers, SkipDebugLoc, TII);
Modified: llvm/trunk/lib/CodeGen/MachineLICM.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineLICM.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineLICM.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineLICM.cpp Fri Dec 15 14:22:58 2017
@@ -280,7 +280,7 @@ static bool LoopIsOuterMostWithPredecess
}
bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
Changed = FirstInLoop = false;
Modified: llvm/trunk/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineOptimizationRemarkEmitter.cpp Fri Dec 15 14:22:58 2017
@@ -50,7 +50,7 @@ void MachineOptimizationRemarkEmitter::e
auto &OptDiag = cast<DiagnosticInfoMIROptimization>(OptDiagCommon);
computeHotness(OptDiag);
- LLVMContext &Ctx = MF.getFunction()->getContext();
+ LLVMContext &Ctx = MF.getFunction().getContext();
// Only emit it if its hotness meets the threshold.
if (OptDiag.getHotness().getValueOr(0) <
@@ -71,7 +71,7 @@ bool MachineOptimizationRemarkEmitterPas
MachineFunction &MF) {
MachineBlockFrequencyInfo *MBFI;
- if (MF.getFunction()->getContext().getDiagnosticsHotnessRequested())
+ if (MF.getFunction().getContext().getDiagnosticsHotnessRequested())
MBFI = &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI();
else
MBFI = nullptr;
Modified: llvm/trunk/lib/CodeGen/MachinePipeliner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachinePipeliner.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachinePipeliner.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachinePipeliner.cpp Fri Dec 15 14:22:58 2017
@@ -729,13 +729,13 @@ INITIALIZE_PASS_END(MachinePipeliner, DE
/// The "main" function for implementing Swing Modulo Scheduling.
bool MachinePipeliner::runOnMachineFunction(MachineFunction &mf) {
- if (skipFunction(*mf.getFunction()))
+ if (skipFunction(mf.getFunction()))
return false;
if (!EnableSWP)
return false;
- if (mf.getFunction()->getAttributes().hasAttribute(
+ if (mf.getFunction().getAttributes().hasAttribute(
AttributeList::FunctionIndex, Attribute::OptimizeForSize) &&
!EnableSWPOptSize.getPosition())
return false;
Modified: llvm/trunk/lib/CodeGen/MachineRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineRegisterInfo.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -531,7 +531,7 @@ static bool isNoReturnDef(const MachineO
const MachineFunction &MF = *MBB.getParent();
// We need to keep correct unwind information even if the function will
// not return, since the runtime may need it.
- if (MF.getFunction()->hasFnAttribute(Attribute::UWTable))
+ if (MF.getFunction().hasFnAttribute(Attribute::UWTable))
return false;
const Function *Called = getCalledFunction(MI);
return !(Called == nullptr || !Called->hasFnAttribute(Attribute::NoReturn) ||
Modified: llvm/trunk/lib/CodeGen/MachineScheduler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineScheduler.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineScheduler.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineScheduler.cpp Fri Dec 15 14:22:58 2017
@@ -351,7 +351,7 @@ ScheduleDAGInstrs *PostMachineScheduler:
/// design would be to split blocks at scheduling boundaries, but LLVM has a
/// general bias against block splitting purely for implementation simplicity.
bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
- if (skipFunction(*mf.getFunction()))
+ if (skipFunction(mf.getFunction()))
return false;
if (EnableMachineSched.getNumOccurrences()) {
@@ -389,7 +389,7 @@ bool MachineScheduler::runOnMachineFunct
}
bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
- if (skipFunction(*mf.getFunction()))
+ if (skipFunction(mf.getFunction()))
return false;
if (EnablePostRAMachineSched.getNumOccurrences()) {
Modified: llvm/trunk/lib/CodeGen/MachineSink.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineSink.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineSink.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineSink.cpp Fri Dec 15 14:22:58 2017
@@ -292,7 +292,7 @@ MachineSinking::AllUsesDominatedByBlock(
}
bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(dbgs() << "******** Machine Sinking ********\n");
Modified: llvm/trunk/lib/CodeGen/MachineVerifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineVerifier.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineVerifier.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineVerifier.cpp Fri Dec 15 14:22:58 2017
@@ -637,12 +637,12 @@ MachineVerifier::visitMachineBasicBlockB
const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
const BasicBlock *BB = MBB->getBasicBlock();
- const Function *Fn = MF->getFunction();
+ const Function &F = MF->getFunction();
if (LandingPadSuccs.size() > 1 &&
!(AsmInfo &&
AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
BB && isa<SwitchInst>(BB->getTerminator())) &&
- !isFuncletEHPersonality(classifyEHPersonality(Fn->getPersonalityFn())))
+ !isFuncletEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
report("MBB has more than one landing pad successor", MBB);
// Call AnalyzeBranch. If it succeeds, there several more conditions to check.
Modified: llvm/trunk/lib/CodeGen/OptimizePHIs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/OptimizePHIs.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/OptimizePHIs.cpp (original)
+++ llvm/trunk/lib/CodeGen/OptimizePHIs.cpp Fri Dec 15 14:22:58 2017
@@ -72,7 +72,7 @@ INITIALIZE_PASS(OptimizePHIs, DEBUG_TYPE
"Optimize machine instruction PHIs", false, false)
bool OptimizePHIs::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
MRI = &Fn.getRegInfo();
Modified: llvm/trunk/lib/CodeGen/PatchableFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PatchableFunction.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PatchableFunction.cpp (original)
+++ llvm/trunk/lib/CodeGen/PatchableFunction.cpp Fri Dec 15 14:22:58 2017
@@ -54,11 +54,11 @@ static bool doesNotGeneratecode(const Ma
}
bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) {
- if (!MF.getFunction()->hasFnAttribute("patchable-function"))
+ if (!MF.getFunction().hasFnAttribute("patchable-function"))
return false;
#ifndef NDEBUG
- Attribute PatchAttr = MF.getFunction()->getFnAttribute("patchable-function");
+ Attribute PatchAttr = MF.getFunction().getFnAttribute("patchable-function");
StringRef PatchType = PatchAttr.getValueAsString();
assert(PatchType == "prologue-short-redirect" && "Only possibility today!");
#endif
Modified: llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp (original)
+++ llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp Fri Dec 15 14:22:58 2017
@@ -1662,7 +1662,7 @@ bool PeepholeOptimizer::optimizeRecurren
}
bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
Modified: llvm/trunk/lib/CodeGen/PostRASchedulerList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PostRASchedulerList.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PostRASchedulerList.cpp (original)
+++ llvm/trunk/lib/CodeGen/PostRASchedulerList.cpp Fri Dec 15 14:22:58 2017
@@ -279,7 +279,7 @@ bool PostRAScheduler::enablePostRASchedu
}
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
TII = Fn.getSubtarget().getInstrInfo();
Modified: llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp (original)
+++ llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp Fri Dec 15 14:22:58 2017
@@ -171,7 +171,7 @@ using StackObjSet = SmallSetVector<int,
/// runOnMachineFunction - Insert prolog/epilog code and replace abstract
/// frame indexes with appropriate references.
bool PEI::runOnMachineFunction(MachineFunction &Fn) {
- const Function* F = Fn.getFunction();
+ const Function &F = Fn.getFunction();
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
@@ -206,7 +206,7 @@ bool PEI::runOnMachineFunction(MachineFu
// called functions. Because of this, calculateCalleeSavedRegisters()
// must be called before this function in order to set the AdjustsStack
// and MaxCallFrameSize variables.
- if (!F->hasFnAttribute(Attribute::Naked))
+ if (!F.hasFnAttribute(Attribute::Naked))
insertPrologEpilogCode(Fn);
// Replace all MO_FrameIndex operands with physical register references
@@ -224,8 +224,8 @@ bool PEI::runOnMachineFunction(MachineFu
MachineFrameInfo &MFI = Fn.getFrameInfo();
uint64_t StackSize = MFI.getStackSize();
if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) {
- DiagnosticInfoStackSize DiagStackSize(*F, StackSize);
- F->getContext().diagnose(DiagStackSize);
+ DiagnosticInfoStackSize DiagStackSize(F, StackSize);
+ F.getContext().diagnose(DiagStackSize);
}
delete RS;
@@ -508,7 +508,7 @@ void PEI::spillCalleeSavedRegs(MachineFu
assert(Fn.getProperties().hasProperty(
MachineFunctionProperties::Property::NoVRegs));
- const Function *F = Fn.getFunction();
+ const Function &F = Fn.getFunction();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
MachineFrameInfo &MFI = Fn.getFrameInfo();
MinCSFrameIndex = std::numeric_limits<unsigned>::max();
@@ -522,7 +522,7 @@ void PEI::spillCalleeSavedRegs(MachineFu
assignCalleeSavedSpillSlots(Fn, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex);
// Add the code to save and restore the callee saved registers.
- if (!F->hasFnAttribute(Attribute::Naked)) {
+ if (!F.hasFnAttribute(Attribute::Naked)) {
MFI.setCalleeSavedInfoValid(true);
std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
@@ -952,7 +952,7 @@ void PEI::calculateFrameObjectOffsets(Ma
ORE->emit([&]() {
return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize",
- Fn.getFunction()->getSubprogram(),
+ Fn.getFunction().getSubprogram(),
&Fn.front())
<< ore::NV("NumStackBytes", StackSize) << " stack bytes in function";
});
@@ -993,7 +993,7 @@ void PEI::insertPrologEpilogCode(Machine
// approach is rather similar to that of Segmented Stacks, but it uses a
// different conditional check and another BIF for allocating more stack
// space.
- if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE)
+ if (Fn.getFunction().getCallingConv() == CallingConv::HiPE)
for (MachineBasicBlock *SaveBlock : SaveBlocks)
TFI.adjustForHiPEPrologue(Fn, *SaveBlock);
}
Modified: llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegAllocGreedy.cpp Fri Dec 15 14:22:58 2017
@@ -2642,7 +2642,7 @@ bool RAGreedy::tryRecoloringCandidates(P
unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
SmallVectorImpl<unsigned> &NewVRegs) {
CutOffInfo = CO_None;
- LLVMContext &Ctx = MF->getFunction()->getContext();
+ LLVMContext &Ctx = MF->getFunction().getContext();
SmallVirtRegSet FixedRegisters;
unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
if (Reg == ~0U && (CutOffInfo != CO_None)) {
Modified: llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegAllocPBQP.cpp Fri Dec 15 14:22:58 2017
@@ -799,7 +799,7 @@ bool RegAllocPBQP::runOnMachineFunction(
findVRegIntervalsToAlloc(MF, LIS);
#ifndef NDEBUG
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
std::string FullyQualifiedName =
F.getParent()->getModuleIdentifier() + "." + F.getName().str();
#endif
Modified: llvm/trunk/lib/CodeGen/RegUsageInfoCollector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegUsageInfoCollector.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RegUsageInfoCollector.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegUsageInfoCollector.cpp Fri Dec 15 14:22:58 2017
@@ -95,7 +95,7 @@ bool RegUsageInfoCollector::runOnMachine
unsigned RegMaskSize = (TRI->getNumRegs() + 31) / 32;
RegMask.resize(RegMaskSize, 0xFFFFFFFF);
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
PhysicalRegisterUsageInfo *PRUI = &getAnalysis<PhysicalRegisterUsageInfo>();
@@ -127,7 +127,7 @@ bool RegUsageInfoCollector::runOnMachine
if (!TargetFrameLowering::isSafeForNoCSROpt(F)) {
const uint32_t *CallPreservedMask =
- TRI->getCallPreservedMask(MF, F->getCallingConv());
+ TRI->getCallPreservedMask(MF, F.getCallingConv());
if (CallPreservedMask) {
// Set callee saved register as preserved.
for (unsigned i = 0; i < RegMaskSize; ++i)
@@ -145,7 +145,7 @@ bool RegUsageInfoCollector::runOnMachine
DEBUG(dbgs() << " \n----------------------------------------\n");
- PRUI->storeUpdateRegUsageInfo(F, std::move(RegMask));
+ PRUI->storeUpdateRegUsageInfo(&F, std::move(RegMask));
return false;
}
Modified: llvm/trunk/lib/CodeGen/RegUsageInfoPropagate.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/RegUsageInfoPropagate.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/RegUsageInfoPropagate.cpp (original)
+++ llvm/trunk/lib/CodeGen/RegUsageInfoPropagate.cpp Fri Dec 15 14:22:58 2017
@@ -102,7 +102,7 @@ static const Function *findCalledFunctio
}
bool RegUsageInfoPropagationPass::runOnMachineFunction(MachineFunction &MF) {
- const Module *M = MF.getFunction()->getParent();
+ const Module *M = MF.getFunction().getParent();
PhysicalRegisterUsageInfo *PRUI = &getAnalysis<PhysicalRegisterUsageInfo>();
DEBUG(dbgs() << " ++++++++++++++++++++ " << getPassName()
Modified: llvm/trunk/lib/CodeGen/ResetMachineFunctionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ResetMachineFunctionPass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ResetMachineFunctionPass.cpp (original)
+++ llvm/trunk/lib/CodeGen/ResetMachineFunctionPass.cpp Fri Dec 15 14:22:58 2017
@@ -51,7 +51,7 @@ namespace {
++NumFunctionsReset;
MF.reset();
if (EmitFallbackDiag) {
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
DiagnosticInfoISelFallback DiagFallback(F);
F.getContext().diagnose(DiagFallback);
}
Modified: llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp (original)
+++ llvm/trunk/lib/CodeGen/ScheduleDAGInstrs.cpp Fri Dec 15 14:22:58 2017
@@ -114,7 +114,7 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(Mac
: ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()),
RemoveKillFlags(RemoveKillFlags),
UnknownValue(UndefValue::get(
- Type::getVoidTy(mf.getFunction()->getContext()))) {
+ Type::getVoidTy(mf.getFunction().getContext()))) {
DbgValues.clear();
const TargetSubtargetInfo &ST = mf.getSubtarget();
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri Dec 15 14:22:58 2017
@@ -161,7 +161,7 @@ namespace {
DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), AA(AA) {
- ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize();
+ ForCodeSize = DAG.getMachineFunction().getFunction().optForSize();
MaximumLegalStoreInBits = 0;
for (MVT VT : MVT::all_valuetypes())
@@ -2933,7 +2933,7 @@ SDValue DAGCombiner::visitSDIV(SDNode *N
// If integer divide is expensive and we satisfy the requirements, emit an
// alternate sequence. Targets may check function attributes for size/speed
// trade-offs.
- AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr))
if (SDValue Op = BuildSDIV(N))
return Op;
@@ -3004,7 +3004,7 @@ SDValue DAGCombiner::visitUDIV(SDNode *N
}
// fold (udiv x, c) -> alternate
- AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr))
if (SDValue Op = BuildUDIV(N))
return Op;
@@ -3063,7 +3063,7 @@ SDValue DAGCombiner::visitREM(SDNode *N)
}
}
- AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
// If X/C can be simplified by the division-by-constant logic, lower
// X%C to the equivalent of X-X/C*C.
@@ -12940,7 +12940,7 @@ bool DAGCombiner::MergeConsecutiveStores
if (MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits)
return false;
- bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ bool NoVectors = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
// This function cannot currently deal with non-byte-sized memory sizes.
@@ -16986,7 +16986,7 @@ SDValue DAGCombiner::SimplifySetCC(EVT V
SDValue DAGCombiner::BuildSDIV(SDNode *N) {
// when optimising for minimum size, we don't want to expand a div to a mul
// and a shift.
- if (DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
@@ -17032,7 +17032,7 @@ SDValue DAGCombiner::BuildSDIVPow2(SDNod
SDValue DAGCombiner::BuildUDIV(SDNode *N) {
// when optimising for minimum size, we don't want to expand a div to a mul
// and a shift.
- if (DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Fri Dec 15 14:22:58 2017
@@ -2014,10 +2014,10 @@ SDValue SelectionDAGLegalize::ExpandLibC
// isTailCall may be true since the callee does not reference caller stack
// frame. Check if it's in the right position and that the return types match.
SDValue TCChain = InChain;
- const Function *F = DAG.getMachineFunction().getFunction();
+ const Function &F = DAG.getMachineFunction().getFunction();
bool isTailCall =
TLI.isInTailCallPosition(DAG, Node, TCChain) &&
- (RetTy == F->getReturnType() || F->getReturnType()->isVoidTy());
+ (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy());
if (isTailCall)
InChain = TCChain;
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Fri Dec 15 14:22:58 2017
@@ -909,7 +909,7 @@ void SelectionDAG::init(MachineFunction
ORE = &NewORE;
TLI = getSubtarget().getTargetLowering();
TSI = getSubtarget().getSelectionDAGInfo();
- Context = &MF->getFunction()->getContext();
+ Context = &MF->getFunction().getContext();
}
SelectionDAG::~SelectionDAG() {
@@ -1331,7 +1331,7 @@ SDValue SelectionDAG::getConstantPool(co
assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals");
if (Alignment == 0)
- Alignment = MF->getFunction()->optForSize()
+ Alignment = MF->getFunction().optForSize()
? getDataLayout().getABITypeAlignment(C->getType())
: getDataLayout().getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
@@ -5100,8 +5100,8 @@ static bool shouldLowerMemFuncForSize(co
// On Darwin, -Os means optimize for size without hurting performance, so
// only really optimize for size when -Oz (MinSize) is used.
if (MF.getTarget().getTargetTriple().isOSDarwin())
- return MF.getFunction()->optForMinSize();
- return MF.getFunction()->optForSize();
+ return MF.getFunction().optForMinSize();
+ return MF.getFunction().optForSize();
}
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Fri Dec 15 14:22:58 2017
@@ -1573,9 +1573,9 @@ void SelectionDAGBuilder::visitRet(const
EVT(TLI.getPointerTy(DL))));
}
- bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
+ bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
CallingConv::ID CallConv =
- DAG.getMachineFunction().getFunction()->getCallingConv();
+ DAG.getMachineFunction().getFunction().getCallingConv();
Chain = DAG.getTargetLoweringInfo().LowerReturn(
Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
@@ -2110,7 +2110,7 @@ static SDValue getLoadStackGuard(Selecti
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
MachineFunction &MF = DAG.getMachineFunction();
- Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent());
+ Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
MachineSDNode *Node =
DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
if (Global) {
@@ -2144,7 +2144,7 @@ void SelectionDAGBuilder::visitSPDescrip
SDValue Guard;
SDLoc dl = getCurSDLoc();
SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
- const Module &M = *ParentBB->getParent()->getFunction()->getParent();
+ const Module &M = *ParentBB->getParent()->getFunction().getParent();
unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
// Generate code to load the content of the guard slot.
@@ -4766,8 +4766,8 @@ static SDValue ExpandPowI(const SDLoc &D
if (Val == 0)
return DAG.getConstantFP(1.0, DL, LHS.getValueType());
- const Function *F = DAG.getMachineFunction().getFunction();
- if (!F->optForSize() ||
+ const Function &F = DAG.getMachineFunction().getFunction();
+ if (!F.optForSize() ||
// If optimizing for size, don't insert too many multiplies.
// This inserts up to 5 multiplies.
countPopulation(Val) + Log2_32(Val) < 7) {
@@ -5640,7 +5640,7 @@ SelectionDAGBuilder::visitIntrinsicCall(
case Intrinsic::stackguard: {
EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
MachineFunction &MF = DAG.getMachineFunction();
- const Module &M = *MF.getFunction()->getParent();
+ const Module &M = *MF.getFunction().getParent();
SDValue Chain = getRoot();
if (TLI.useLoadStackGuardNode()) {
Res = getLoadStackGuard(DAG, sdl, Chain);
@@ -5748,9 +5748,7 @@ SelectionDAGBuilder::visitIntrinsicCall(
return nullptr;
case Intrinsic::gcroot: {
MachineFunction &MF = DAG.getMachineFunction();
- const Function *F = MF.getFunction();
- (void)F;
- assert(F->hasGC() &&
+ assert(MF.getFunction().hasGC() &&
"only valid in functions with gc specified, enforced by Verifier");
assert(GFI && "implied by previous");
const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
@@ -9869,7 +9867,7 @@ MachineBasicBlock *SelectionDAGBuilder::
// Don't perform if there is only one cluster or optimizing for size.
if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
TM.getOptLevel() == CodeGenOpt::None ||
- SwitchMBB->getParent()->getFunction()->optForMinSize())
+ SwitchMBB->getParent()->getFunction().optForMinSize())
return SwitchMBB;
BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
@@ -10021,7 +10019,7 @@ void SelectionDAGBuilder::visitSwitch(co
unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
- !DefaultMBB->getParent()->getFunction()->optForMinSize()) {
+ !DefaultMBB->getParent()->getFunction().optForMinSize()) {
// For optimized builds, lower large range as a balanced binary tree.
splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
continue;
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Fri Dec 15 14:22:58 2017
@@ -212,7 +212,7 @@ namespace llvm {
IS.OptLevel = NewOptLevel;
IS.TM.setOptLevel(NewOptLevel);
DEBUG(dbgs() << "\nChanging optimization level for Function "
- << IS.MF->getFunction()->getName() << "\n");
+ << IS.MF->getFunction().getName() << "\n");
DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel
<< " ; After: -O" << NewOptLevel << "\n");
SavedFastISel = IS.TM.Options.EnableFastISel;
@@ -228,7 +228,7 @@ namespace llvm {
if (IS.OptLevel == SavedOptLevel)
return;
DEBUG(dbgs() << "\nRestoring optimization level for Function "
- << IS.MF->getFunction()->getName() << "\n");
+ << IS.MF->getFunction().getName() << "\n");
DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel
<< " ; After: -O" << SavedOptLevel << "\n");
IS.OptLevel = SavedOptLevel;
@@ -384,7 +384,7 @@ bool SelectionDAGISel::runOnMachineFunct
assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
"-fast-isel-abort > 0 requires -fast-isel");
- const Function &Fn = *mf.getFunction();
+ const Function &Fn = mf.getFunction();
MF = &mf;
// Reset the target options before resetting the optimization
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp Fri Dec 15 14:22:58 2017
@@ -52,11 +52,11 @@ bool TargetLowering::isPositionIndepende
/// so, it sets Chain to the input chain of the tail call.
bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
SDValue &Chain) const {
- const Function *F = DAG.getMachineFunction().getFunction();
+ const Function &F = DAG.getMachineFunction().getFunction();
// Conservatively require the attributes of the call to match those of
// the return. Ignore noalias because it doesn't affect the call sequence.
- AttributeList CallerAttrs = F->getAttributes();
+ AttributeList CallerAttrs = F.getAttributes();
if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
.removeAttribute(Attribute::NoAlias)
.hasAttributes())
@@ -2963,7 +2963,7 @@ static SDValue BuildExactSDIV(const Targ
SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
std::vector<SDNode *> *Created) const {
- AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.isIntDivCheap(N->getValueType(0), Attr))
return SDValue(N,0); // Lower SDIV as SDIV
Modified: llvm/trunk/lib/CodeGen/ShrinkWrap.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ShrinkWrap.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ShrinkWrap.cpp (original)
+++ llvm/trunk/lib/CodeGen/ShrinkWrap.cpp Fri Dec 15 14:22:58 2017
@@ -449,7 +449,7 @@ static bool isIrreducibleCFG(const Machi
}
bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF))
+ if (skipFunction(MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF))
return false;
DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
@@ -569,10 +569,10 @@ bool ShrinkWrap::isShrinkWrapEnabled(con
// of the crash. Since a crash can happen anywhere, the
// frame must be lowered before anything else happen for the
// sanitizers to be able to get a correct stack frame.
- !(MF.getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
- MF.getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
- MF.getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
- MF.getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress));
+ !(MF.getFunction().hasFnAttribute(Attribute::SanitizeAddress) ||
+ MF.getFunction().hasFnAttribute(Attribute::SanitizeThread) ||
+ MF.getFunction().hasFnAttribute(Attribute::SanitizeMemory) ||
+ MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress));
// If EnableShrinkWrap is set, it takes precedence on whatever the
// target sets. The rational is that we assume we want to test
// something related to shrink-wrapping.
Modified: llvm/trunk/lib/CodeGen/StackColoring.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/StackColoring.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/StackColoring.cpp (original)
+++ llvm/trunk/lib/CodeGen/StackColoring.cpp Fri Dec 15 14:22:58 2017
@@ -1129,8 +1129,7 @@ void StackColoring::expungeSlotMap(Dense
bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
DEBUG(dbgs() << "********** Stack Coloring **********\n"
- << "********** Function: "
- << ((const Value*)Func.getFunction())->getName() << '\n');
+ << "********** Function: " << Func.getName() << '\n');
MF = &Func;
MFI = &MF->getFrameInfo();
Indexes = &getAnalysis<SlotIndexes>();
@@ -1170,7 +1169,7 @@ bool StackColoring::runOnMachineFunction
// Don't continue because there are not enough lifetime markers, or the
// stack is too small, or we are told not to optimize the slots.
if (NumMarkers < 2 || TotalSize < 16 || DisableColoring ||
- skipFunction(*Func.getFunction())) {
+ skipFunction(Func.getFunction())) {
DEBUG(dbgs()<<"Will not try to merge slots.\n");
return removeAllMarkers();
}
Modified: llvm/trunk/lib/CodeGen/TailDuplication.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TailDuplication.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TailDuplication.cpp (original)
+++ llvm/trunk/lib/CodeGen/TailDuplication.cpp Fri Dec 15 14:22:58 2017
@@ -49,7 +49,7 @@ char &llvm::TailDuplicateID = TailDuplic
INITIALIZE_PASS(TailDuplicatePass, DEBUG_TYPE, "Tail Duplication", false, false)
bool TailDuplicatePass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
Modified: llvm/trunk/lib/CodeGen/TailDuplicator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TailDuplicator.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TailDuplicator.cpp (original)
+++ llvm/trunk/lib/CodeGen/TailDuplicator.cpp Fri Dec 15 14:22:58 2017
@@ -550,7 +550,7 @@ bool TailDuplicator::shouldTailDuplicate
unsigned MaxDuplicateCount;
if (TailDupSize == 0 &&
TailDuplicateSize.getNumOccurrences() == 0 &&
- MF->getFunction()->optForSize())
+ MF->getFunction().optForSize())
MaxDuplicateCount = 1;
else if (TailDupSize == 0)
MaxDuplicateCount = TailDuplicateSize;
Modified: llvm/trunk/lib/CodeGen/TargetFrameLoweringImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetFrameLoweringImpl.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetFrameLoweringImpl.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetFrameLoweringImpl.cpp Fri Dec 15 14:22:58 2017
@@ -32,7 +32,7 @@ TargetFrameLowering::~TargetFrameLowerin
/// The default implementation just looks at attribute "no-frame-pointer-elim".
bool TargetFrameLowering::noFramePointerElim(const MachineFunction &MF) const {
- auto Attr = MF.getFunction()->getFnAttribute("no-frame-pointer-elim");
+ auto Attr = MF.getFunction().getFnAttribute("no-frame-pointer-elim");
return Attr.getValueAsString() == "true";
}
@@ -82,7 +82,7 @@ void TargetFrameLowering::determineCalle
return;
// In Naked functions we aren't going to save any registers.
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
return;
// Functions which call __builtin_unwind_init get all their registers saved.
@@ -99,7 +99,7 @@ unsigned TargetFrameLowering::getStackAl
const MachineFunction &MF) const {
// When HHVM function is called, the stack is skewed as the return address
// is removed from the stack before we enter the function.
- if (LLVM_UNLIKELY(MF.getFunction()->getCallingConv() == CallingConv::HHVM))
+ if (LLVM_UNLIKELY(MF.getFunction().getCallingConv() == CallingConv::HHVM))
return MF.getTarget().getPointerSize();
return 0;
Modified: llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp Fri Dec 15 14:22:58 2017
@@ -1592,8 +1592,8 @@ void TargetLoweringBase::setMaximumJumpT
/// Get the reciprocal estimate attribute string for a function that will
/// override the target defaults.
static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
- const Function *F = MF.getFunction();
- return F->getFnAttribute("reciprocal-estimates").getValueAsString();
+ const Function &F = MF.getFunction();
+ return F.getFnAttribute("reciprocal-estimates").getValueAsString();
}
/// Construct a string for the given reciprocal operation of the given type.
Modified: llvm/trunk/lib/CodeGen/TargetOptionsImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetOptionsImpl.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetOptionsImpl.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetOptionsImpl.cpp Fri Dec 15 14:22:58 2017
@@ -28,7 +28,7 @@ bool TargetOptions::DisableFramePointerE
return true;
// Check to see if we should eliminate non-leaf frame pointers.
- if (MF.getFunction()->hasFnAttribute("no-frame-pointer-elim-non-leaf"))
+ if (MF.getFunction().hasFnAttribute("no-frame-pointer-elim-non-leaf"))
return MF.getFrameInfo().hasCalls();
return false;
Modified: llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -422,21 +422,21 @@ TargetRegisterInfo::getRegAllocationHint
}
bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
- return !MF.getFunction()->hasFnAttribute("no-realign-stack");
+ return !MF.getFunction().hasFnAttribute("no-realign-stack");
}
bool TargetRegisterInfo::needsStackRealignment(
const MachineFunction &MF) const {
const MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
unsigned StackAlign = TFI->getStackAlignment();
bool requiresRealignment = ((MFI.getMaxAlignment() > StackAlign) ||
- F->hasFnAttribute(Attribute::StackAlignment));
- if (MF.getFunction()->hasFnAttribute("stackrealign") || requiresRealignment) {
+ F.hasFnAttribute(Attribute::StackAlignment));
+ if (F.hasFnAttribute("stackrealign") || requiresRealignment) {
if (canRealignStack(MF))
return true;
- DEBUG(dbgs() << "Can't realign function's stack: " << F->getName() << "\n");
+ DEBUG(dbgs() << "Can't realign function's stack: " << F.getName() << "\n");
}
return false;
}
Modified: llvm/trunk/lib/CodeGen/TwoAddressInstructionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TwoAddressInstructionPass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TwoAddressInstructionPass.cpp (original)
+++ llvm/trunk/lib/CodeGen/TwoAddressInstructionPass.cpp Fri Dec 15 14:22:58 2017
@@ -1663,7 +1663,7 @@ bool TwoAddressInstructionPass::runOnMac
OptLevel = TM.getOptLevel();
// Disable optimizations if requested. We cannot skip the whole pass as some
// fixups are necessary for correctness.
- if (skipFunction(*Func.getFunction()))
+ if (skipFunction(Func.getFunction()))
OptLevel = CodeGenOpt::None;
bool MadeChange = false;
Modified: llvm/trunk/lib/CodeGen/XRayInstrumentation.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/XRayInstrumentation.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/XRayInstrumentation.cpp (original)
+++ llvm/trunk/lib/CodeGen/XRayInstrumentation.cpp Fri Dec 15 14:22:58 2017
@@ -142,7 +142,7 @@ void XRayInstrumentation::prependRetWith
}
bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
- auto &F = *MF.getFunction();
+ auto &F = MF.getFunction();
auto InstrAttr = F.getFnAttribute("function-instrument");
bool AlwaysInstrument = !InstrAttr.hasAttribute(Attribute::None) &&
InstrAttr.isStringAttribute() &&
Modified: llvm/trunk/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp Fri Dec 15 14:22:58 2017
@@ -308,7 +308,7 @@ public:
//===----------------------------------------------------------------------===//
bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
if (!F.getSubtarget<AArch64Subtarget>().balanceFPOps())
Modified: llvm/trunk/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp Fri Dec 15 14:22:58 2017
@@ -393,7 +393,7 @@ bool AArch64AdvSIMDScalar::runOnMachineF
bool Changed = false;
DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
- if (skipFunction(*mf.getFunction()))
+ if (skipFunction(mf.getFunction()))
return false;
MRI = &mf.getRegInfo();
Modified: llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp Fri Dec 15 14:22:58 2017
@@ -220,7 +220,7 @@ void AArch64CallLowering::splitToValueTy
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val, unsigned VReg) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
@@ -322,7 +322,7 @@ bool AArch64CallLowering::lowerCall(Mach
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = F.getParent()->getDataLayout();
Modified: llvm/trunk/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp Fri Dec 15 14:22:58 2017
@@ -42,7 +42,7 @@ struct LDTLSCleanup : public MachineFunc
}
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
Modified: llvm/trunk/lib/Target/AArch64/AArch64CollectLOH.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CollectLOH.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CollectLOH.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CollectLOH.cpp Fri Dec 15 14:22:58 2017
@@ -482,7 +482,7 @@ static void handleNormalInst(const Machi
}
bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n"
Modified: llvm/trunk/lib/Target/AArch64/AArch64CondBrTuning.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CondBrTuning.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CondBrTuning.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CondBrTuning.cpp Fri Dec 15 14:22:58 2017
@@ -290,7 +290,7 @@ bool AArch64CondBrTuning::tryToTuneBranc
}
bool AArch64CondBrTuning::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(dbgs() << "********** AArch64 Conditional Branch Tuning **********\n"
Modified: llvm/trunk/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ConditionOptimizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ConditionOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ConditionOptimizer.cpp Fri Dec 15 14:22:58 2017
@@ -327,7 +327,7 @@ bool AArch64ConditionOptimizer::adjustTo
bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
Modified: llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp Fri Dec 15 14:22:58 2017
@@ -924,7 +924,7 @@ bool AArch64ConditionalCompares::tryConv
bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
<< "********** Function: " << MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
@@ -936,7 +936,7 @@ bool AArch64ConditionalCompares::runOnMa
MBPI = &getAnalysis<MachineBranchProbabilityInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = nullptr;
- MinSize = MF.getFunction()->optForMinSize();
+ MinSize = MF.getFunction().optForMinSize();
bool Changed = false;
CmpConv.runOnMachineFunction(MF, MBPI);
Modified: llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp Fri Dec 15 14:22:58 2017
@@ -198,7 +198,7 @@ void AArch64DeadRegisterDefinitions::pro
// Scan the function for instructions that have a dead definition of a
// register. Replace that register with the zero register when possible.
bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TRI = MF.getSubtarget().getRegisterInfo();
Modified: llvm/trunk/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64FalkorHWPFFix.cpp Fri Dec 15 14:22:58 2017
@@ -798,7 +798,7 @@ bool FalkorHWPFFix::runOnMachineFunction
if (ST.getProcFamily() != AArch64Subtarget::Falkor)
return false;
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
Modified: llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -174,7 +174,7 @@ bool AArch64FrameLowering::canUseRedZone
return false;
// Don't use the red zone if the function explicitly asks us not to.
// This is typically used for kernel code.
- if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone))
+ if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone))
return false;
const MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -459,13 +459,13 @@ void AArch64FrameLowering::emitPrologue(
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
- bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
+ bool needsFrameMoves = MMI.hasDebugInfo() || F.needsUnwindTableEntry();
bool HasFP = hasFP(MF);
// Debug location must be unknown since the first debug location is used
@@ -474,7 +474,7 @@ void AArch64FrameLowering::emitPrologue(
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
int NumBytes = (int)MFI.getStackSize();
@@ -507,7 +507,7 @@ void AArch64FrameLowering::emitPrologue(
}
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
@@ -716,7 +716,7 @@ void AArch64FrameLowering::emitEpilogue(
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
// Initial and residual are named for consistency with the prologue. Note that
@@ -765,7 +765,7 @@ void AArch64FrameLowering::emitEpilogue(
// it as the 2nd argument of AArch64ISD::TC_RETURN.
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
@@ -857,7 +857,7 @@ int AArch64FrameLowering::resolveFrameIn
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
bool IsWin64 =
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16;
int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize();
@@ -928,7 +928,7 @@ static unsigned getPrologueDeath(Machine
static bool produceCompactUnwindFrame(MachineFunction &MF) {
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
- AttributeList Attrs = MF.getFunction()->getAttributes();
+ AttributeList Attrs = MF.getFunction().getAttributes();
return Subtarget.isTargetMachO() &&
!(Subtarget.getTargetLowering()->supportSwiftError() &&
Attrs.hasAttrSomewhere(Attribute::SwiftError));
@@ -959,7 +959,7 @@ static void computeCalleeSaveRegisterPai
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
MachineFrameInfo &MFI = MF.getFrameInfo();
- CallingConv::ID CC = MF.getFunction()->getCallingConv();
+ CallingConv::ID CC = MF.getFunction().getCallingConv();
unsigned Count = CSI.size();
(void)CC;
// MachO's compact unwind format relies on all registers being stored in
@@ -1154,7 +1154,7 @@ void AArch64FrameLowering::determineCall
RegScavenger *RS) const {
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp Fri Dec 15 14:22:58 2017
@@ -53,7 +53,7 @@ public:
}
bool runOnMachineFunction(MachineFunction &MF) override {
- ForCodeSize = MF.getFunction()->optForSize();
+ ForCodeSize = MF.getFunction().optForSize();
Subtarget = &MF.getSubtarget<AArch64Subtarget>();
return SelectionDAGISel::runOnMachineFunction(MF);
}
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -2731,7 +2731,7 @@ SDValue AArch64TargetLowering::LowerForm
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
- bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
+ bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -2745,7 +2745,7 @@ SDValue AArch64TargetLowering::LowerForm
// we use a special version of AnalyzeFormalArguments to pass in ValVT and
// LocVT.
unsigned NumArgs = Ins.size();
- Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ValVT = Ins[i].VT;
@@ -2935,7 +2935,7 @@ void AArch64TargetLowering::saveVarArgRe
MachineFrameInfo &MFI = MF.getFrameInfo();
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
auto PtrVT = getPointerTy(DAG.getDataLayout());
- bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
+ bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
SmallVector<SDValue, 8> MemOps;
@@ -3087,15 +3087,15 @@ bool AArch64TargetLowering::isEligibleFo
return false;
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = MF.getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
// Byval parameters hand the function a pointer directly into the stack area
// we want to reuse during a tail call. Working around this *is* possible (see
// X86) but less efficient and uglier in LowerCall.
- for (Function::const_arg_iterator i = CallerF->arg_begin(),
- e = CallerF->arg_end();
+ for (Function::const_arg_iterator i = CallerF.arg_begin(),
+ e = CallerF.arg_end();
i != e; ++i)
if (i->hasByValAttr())
return false;
@@ -4185,7 +4185,7 @@ SDValue AArch64TargetLowering::LowerFCOP
}
SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
- if (DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ if (DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat))
return SDValue();
@@ -4668,7 +4668,7 @@ SDValue AArch64TargetLowering::LowerVAST
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
return LowerWin64_VASTART(Op, DAG);
else if (Subtarget->isTargetDarwin())
return LowerDarwin_VASTART(Op, DAG);
@@ -7909,9 +7909,9 @@ EVT AArch64TargetLowering::getOptimalMem
// instruction to materialize the v2i64 zero and one store (with restrictive
// addressing mode). Just do two i64 store of zero-registers.
bool Fast;
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat) &&
+ !F.hasFnAttribute(Attribute::NoImplicitFloat) &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
(allowsMisalignedMemoryAccesses(MVT::f128, 0, 1, &Fast) && Fast)))
return MVT::f128;
@@ -8156,7 +8156,7 @@ SDValue
AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
std::vector<SDNode *> *Created) const {
- AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (isIntDivCheap(N->getValueType(0), Attr))
return SDValue(N,0); // Lower SDIV as SDIV
@@ -9577,7 +9577,7 @@ static SDValue splitStores(SDNode *N, Ta
return SDValue();
// Don't split at -Oz.
- if (DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
// Don't split v2i64 vectors. Memcpy lowering produces those and splitting
@@ -10939,7 +10939,7 @@ void AArch64TargetLowering::insertCopies
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h Fri Dec 15 14:22:58 2017
@@ -415,7 +415,7 @@ public:
// Do not merge to float value size (128 bytes) if no implicit
// float attribute is set.
- bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
if (NoFloat)
@@ -444,8 +444,8 @@ public:
}
bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
void insertCopiesSplitCSR(
Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp Fri Dec 15 14:22:58 2017
@@ -4753,21 +4753,21 @@ AArch64InstrInfo::getOutlininingCandidat
bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// If F uses a redzone, then don't outline from it because it might mess up
// the stack.
- if (!F->hasFnAttribute(Attribute::NoRedZone))
+ if (!F.hasFnAttribute(Attribute::NoRedZone))
return false;
// If anyone is using the address of this function, don't outline from it.
- if (F->hasAddressTaken())
+ if (F.hasAddressTaken())
return false;
// Can F be deduplicated by the linker? If it can, don't outline from it.
- if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage())
+ if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
-
+
return true;
}
Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.td Fri Dec 15 14:22:58 2017
@@ -328,10 +328,10 @@ def AArch64umaxv : SDNode<"AArch64ISD
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
- def ForCodeSize : Predicate<"MF->getFunction()->optForSize()">;
- def NotForCodeSize : Predicate<"!MF->getFunction()->optForSize()">;
+ def ForCodeSize : Predicate<"MF->getFunction().optForSize()">;
+ def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">;
// Avoid generating STRQro if it is slow, unless we're optimizing for code size.
- def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction()->optForSize()">;
+ def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">;
}
include "AArch64InstrFormats.td"
Modified: llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp Fri Dec 15 14:22:58 2017
@@ -1759,7 +1759,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(
}
bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
Modified: llvm/trunk/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64RedundantCopyElimination.cpp Fri Dec 15 14:22:58 2017
@@ -485,7 +485,7 @@ bool AArch64RedundantCopyElimination::op
bool AArch64RedundantCopyElimination::runOnMachineFunction(
MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TRI = MF.getSubtarget().getRegisterInfo();
MRI = &MF.getRegInfo();
Modified: llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -42,22 +42,22 @@ AArch64RegisterInfo::AArch64RegisterInfo
const MCPhysReg *
AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF->getFunction().getCallingConv() == CallingConv::GHC)
// GHC set of callee saved regs is empty as all those regs are
// used for passing STG regs around
return CSR_AArch64_NoRegs_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg)
+ if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
return CSR_AArch64_AllRegs_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS)
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
return MF->getInfo<AArch64FunctionInfo>()->isSplitCSR() ?
CSR_AArch64_CXX_TLS_Darwin_PE_SaveList :
CSR_AArch64_CXX_TLS_Darwin_SaveList;
if (MF->getSubtarget<AArch64Subtarget>().getTargetLowering()
->supportSwiftError() &&
- MF->getFunction()->getAttributes().hasAttrSomewhere(
+ MF->getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_AArch64_AAPCS_SwiftError_SaveList;
- if (MF->getFunction()->getCallingConv() == CallingConv::PreserveMost)
+ if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
return CSR_AArch64_RT_MostRegs_SaveList;
else
return CSR_AArch64_AAPCS_SaveList;
@@ -66,7 +66,7 @@ AArch64RegisterInfo::getCalleeSavedRegs(
const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<AArch64FunctionInfo>()->isSplitCSR())
return CSR_AArch64_CXX_TLS_Darwin_ViaCopy_SaveList;
return nullptr;
@@ -84,7 +84,7 @@ AArch64RegisterInfo::getCallPreservedMas
return CSR_AArch64_CXX_TLS_Darwin_RegMask;
if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
->supportSwiftError() &&
- MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
return CSR_AArch64_AAPCS_SwiftError_RegMask;
if (CC == CallingConv::PreserveMost)
return CSR_AArch64_RT_MostRegs_RegMask;
Modified: llvm/trunk/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64SIMDInstrOpt.cpp Fri Dec 15 14:22:58 2017
@@ -690,7 +690,7 @@ unsigned AArch64SIMDInstrOpt::determineS
}
bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
Modified: llvm/trunk/lib/Target/AArch64/AArch64StorePairSuppress.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64StorePairSuppress.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64StorePairSuppress.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64StorePairSuppress.cpp Fri Dec 15 14:22:58 2017
@@ -120,7 +120,7 @@ bool AArch64StorePairSuppress::isNarrowF
}
bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const TargetSubtargetInfo &ST = MF.getSubtarget();
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp Fri Dec 15 14:22:58 2017
@@ -205,7 +205,7 @@ void AMDGPUAsmPrinter::EmitFunctionBodyS
if (TM.getTargetTriple().getOS() != Triple::AMDHSA)
return;
- HSAMetadataStream.emitKernel(*MF->getFunction(),
+ HSAMetadataStream.emitKernel(MF->getFunction(),
getHSACodeProps(*MF, CurrentProgramInfo),
getHSADebugProps(*MF, CurrentProgramInfo));
}
@@ -215,14 +215,14 @@ void AMDGPUAsmPrinter::EmitFunctionEntry
const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
if (MFI->isEntryFunction() && STM.isAmdCodeObjectV2(*MF)) {
SmallString<128> SymbolName;
- getNameWithPrefix(SymbolName, MF->getFunction()),
+ getNameWithPrefix(SymbolName, &MF->getFunction()),
getTargetStreamer()->EmitAMDGPUSymbolType(
SymbolName, ELF::STT_AMDGPU_HSA_KERNEL);
}
const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>();
if (STI.dumpCode()) {
// Disassemble function name label to text.
- DisasmLines.push_back(MF->getFunction()->getName().str() + ":");
+ DisasmLines.push_back(MF->getName().str() + ":");
DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size());
HexLines.push_back("");
}
@@ -314,7 +314,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunct
getSIProgramInfo(CurrentProgramInfo, MF);
} else {
auto I = CallGraphResourceInfo.insert(
- std::make_pair(MF.getFunction(), SIFunctionResourceInfo()));
+ std::make_pair(&MF.getFunction(), SIFunctionResourceInfo()));
SIFunctionResourceInfo &Info = I.first->second;
assert(I.second && "should only be called once per function");
Info = analyzeResourceUsage(MF);
@@ -343,7 +343,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunct
if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
if (!MFI->isEntryFunction()) {
OutStreamer->emitRawComment(" Function info:", false);
- SIFunctionResourceInfo &Info = CallGraphResourceInfo[MF.getFunction()];
+ SIFunctionResourceInfo &Info = CallGraphResourceInfo[&MF.getFunction()];
emitCommonFunctionComments(
Info.NumVGPR,
Info.getTotalNumSGPRs(MF.getSubtarget<SISubtarget>()),
@@ -469,7 +469,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR6
unsigned RsrcReg;
if (STM.getGeneration() >= R600Subtarget::EVERGREEN) {
// Evergreen / Northern Islands
- switch (MF.getFunction()->getCallingConv()) {
+ switch (MF.getFunction().getCallingConv()) {
default: LLVM_FALLTHROUGH;
case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
@@ -478,7 +478,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR6
}
} else {
// R600 / R700
- switch (MF.getFunction()->getCallingConv()) {
+ switch (MF.getFunction().getCallingConv()) {
default: LLVM_FALLTHROUGH;
case CallingConv::AMDGPU_GS: LLVM_FALLTHROUGH;
case CallingConv::AMDGPU_CS: LLVM_FALLTHROUGH;
@@ -493,7 +493,7 @@ void AMDGPUAsmPrinter::EmitProgramInfoR6
OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
- if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
+ if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
OutStreamer->EmitIntValue(alignTo(MFI->getLDSSize(), 4) >> 2, 4);
}
@@ -787,9 +787,9 @@ void AMDGPUAsmPrinter::getSIProgramInfo(
ProgInfo.DynamicCallStack = Info.HasDynamicallySizedStack || Info.HasRecursion;
if (!isUInt<32>(ProgInfo.ScratchSize)) {
- DiagnosticInfoStackSize DiagStackSize(*MF.getFunction(),
+ DiagnosticInfoStackSize DiagStackSize(MF.getFunction(),
ProgInfo.ScratchSize, DS_Error);
- MF.getFunction()->getContext().diagnose(DiagStackSize);
+ MF.getFunction().getContext().diagnose(DiagStackSize);
}
const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
@@ -808,8 +808,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(
unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs();
if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
// This can happen due to a compiler bug or when using inline asm.
- LLVMContext &Ctx = MF.getFunction()->getContext();
- DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ DiagnosticInfoResourceLimit Diag(MF.getFunction(),
"addressable scalar registers",
ProgInfo.NumSGPR, DS_Error,
DK_ResourceLimit,
@@ -836,8 +836,8 @@ void AMDGPUAsmPrinter::getSIProgramInfo(
if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
// This can happen due to a compiler bug or when using inline asm to use
// the registers which are usually reserved for vcc etc.
- LLVMContext &Ctx = MF.getFunction()->getContext();
- DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ DiagnosticInfoResourceLimit Diag(MF.getFunction(),
"scalar registers",
ProgInfo.NumSGPR, DS_Error,
DK_ResourceLimit,
@@ -856,15 +856,15 @@ void AMDGPUAsmPrinter::getSIProgramInfo(
}
if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) {
- LLVMContext &Ctx = MF.getFunction()->getContext();
- DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs",
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ DiagnosticInfoResourceLimit Diag(MF.getFunction(), "user SGPRs",
MFI->getNumUserSGPRs(), DS_Error);
Ctx.diagnose(Diag);
}
if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) {
- LLVMContext &Ctx = MF.getFunction()->getContext();
- DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "local memory",
+ LLVMContext &Ctx = MF.getFunction().getContext();
+ DiagnosticInfoResourceLimit Diag(MF.getFunction(), "local memory",
MFI->getLDSSize(), DS_Error);
Ctx.diagnose(Diag);
}
@@ -977,9 +977,9 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI
const SIProgramInfo &CurrentProgramInfo) {
const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
- unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv());
+ unsigned RsrcReg = getRsrcReg(MF.getFunction().getCallingConv());
- if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
+ if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
OutStreamer->EmitIntValue(CurrentProgramInfo.ComputePGMRSrc1, 4);
@@ -997,13 +997,13 @@ void AMDGPUAsmPrinter::EmitProgramInfoSI
OutStreamer->EmitIntValue(S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) |
S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks), 4);
unsigned Rsrc2Val = 0;
- if (STM.isVGPRSpillingEnabled(*MF.getFunction())) {
+ if (STM.isVGPRSpillingEnabled(MF.getFunction())) {
OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4);
if (TM.getTargetTriple().getOS() == Triple::AMDPAL)
Rsrc2Val = S_00B84C_SCRATCH_EN(CurrentProgramInfo.ScratchBlocks > 0);
}
- if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
+ if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
OutStreamer->EmitIntValue(MFI->getPSInputEnable(), 4);
OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4);
@@ -1036,13 +1036,13 @@ void AMDGPUAsmPrinter::EmitPALMetadata(c
// we can use the same fixed value that .AMDGPU.config has for Mesa. Note
// that we use a register number rather than a byte offset, so we need to
// divide by 4.
- unsigned Rsrc1Reg = getRsrcReg(MF.getFunction()->getCallingConv()) / 4;
+ unsigned Rsrc1Reg = getRsrcReg(MF.getFunction().getCallingConv()) / 4;
unsigned Rsrc2Reg = Rsrc1Reg + 1;
// Also calculate the PAL metadata key for *S_SCRATCH_SIZE. It can be used
// with a constant offset to access any non-register shader-specific PAL
// metadata key.
unsigned ScratchSizeKey = PALMD::Key::CS_SCRATCH_SIZE;
- switch (MF.getFunction()->getCallingConv()) {
+ switch (MF.getFunction().getCallingConv()) {
case CallingConv::AMDGPU_PS:
ScratchSizeKey = PALMD::Key::PS_SCRATCH_SIZE;
break;
@@ -1068,7 +1068,7 @@ void AMDGPUAsmPrinter::EmitPALMetadata(c
PALMD::Key::VS_NUM_USED_SGPRS - PALMD::Key::VS_SCRATCH_SIZE;
PALMetadataMap[NumUsedVgprsKey] = CurrentProgramInfo.NumVGPRsForWavesPerEU;
PALMetadataMap[NumUsedSgprsKey] = CurrentProgramInfo.NumSGPRsForWavesPerEU;
- if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
+ if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
PALMetadataMap[Rsrc1Reg] |= CurrentProgramInfo.ComputePGMRSrc1;
PALMetadataMap[Rsrc2Reg] |= CurrentProgramInfo.ComputePGMRSrc2;
// ScratchSize is in bytes, 16 aligned.
@@ -1083,7 +1083,7 @@ void AMDGPUAsmPrinter::EmitPALMetadata(c
PALMetadataMap[ScratchSizeKey] |=
alignTo(CurrentProgramInfo.ScratchSize, 16);
}
- if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
+ if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
PALMetadataMap[Rsrc2Reg] |=
S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks);
PALMetadataMap[R_0286CC_SPI_PS_INPUT_ENA / 4] |= MFI->getPSInputEnable();
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUCallLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUCallLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUCallLowering.cpp Fri Dec 15 14:22:58 2017
@@ -43,7 +43,7 @@ unsigned AMDGPUCallLowering::lowerParame
MachineFunction &MF = MIRBuilder.getMF();
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
MachineRegisterInfo &MRI = MF.getRegInfo();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS);
LLT PtrType = getLLTForType(*PtrTy, DL);
@@ -64,7 +64,7 @@ void AMDGPUCallLowering::lowerParameter(
Type *ParamTy, unsigned Offset,
unsigned DstReg) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -1069,7 +1069,7 @@ SDValue AMDGPUTargetLowering::lowerUnhan
SDValue Callee = CLI.Callee;
SelectionDAG &DAG = CLI.DAG;
- const Function &Fn = *DAG.getMachineFunction().getFunction();
+ const Function &Fn = DAG.getMachineFunction().getFunction();
StringRef FuncName("<unknown>");
@@ -1097,7 +1097,7 @@ SDValue AMDGPUTargetLowering::LowerCall(
SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
- const Function &Fn = *DAG.getMachineFunction().getFunction();
+ const Function &Fn = DAG.getMachineFunction().getFunction();
DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
SDLoc(Op).getDebugLoc());
@@ -1190,7 +1190,7 @@ SDValue AMDGPUTargetLowering::LowerGloba
}
}
- const Function &Fn = *DAG.getMachineFunction().getFunction();
+ const Function &Fn = DAG.getMachineFunction().getFunction();
DiagnosticInfoUnsupported BadInit(
Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
DAG.getContext()->diagnose(BadInit);
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp Fri Dec 15 14:22:58 2017
@@ -153,7 +153,7 @@ void AMDGPUMCInstLower::lower(const Mach
int MCOpcode = TII->pseudoToMCOpcode(Opcode);
if (MCOpcode == -1) {
- LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
+ LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext();
C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have "
"a target-specific version: " + Twine(MI->getOpcode()));
}
@@ -205,7 +205,7 @@ void AMDGPUAsmPrinter::EmitInstruction(c
StringRef Err;
if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) {
- LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
+ LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext();
C.emitError("Illegal instruction detected: " + Err);
MI->print(errs());
}
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp Fri Dec 15 14:22:58 2017
@@ -19,7 +19,7 @@ AMDGPUMachineFunction::AMDGPUMachineFunc
MaxKernArgAlign(0),
LDSSize(0),
ABIArgOffset(0),
- IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction()->getCallingConv())),
+ IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath) {
// FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
// except reserved size is not correctly aligned.
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -43,7 +43,7 @@ unsigned AMDGPURegisterInfo::getSubRegFr
// Forced to be here by one .inc
const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
const MachineFunction *MF) const {
- CallingConv::ID CC = MF->getFunction()->getCallingConv();
+ CallingConv::ID CC = MF->getFunction().getCallingConv();
switch (CC) {
case CallingConv::C:
case CallingConv::Fast:
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp Fri Dec 15 14:22:58 2017
@@ -468,7 +468,7 @@ unsigned SISubtarget::getReservedNumSGPR
}
unsigned SISubtarget::getMaxNumSGPRs(const MachineFunction &MF) const {
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
// Compute maximum number of SGPRs function can use using default/requested
@@ -518,7 +518,7 @@ unsigned SISubtarget::getMaxNumSGPRs(con
}
unsigned SISubtarget::getMaxNumVGPRs(const MachineFunction &MF) const {
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
// Compute maximum number of VGPRs function can use using default/requested
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h Fri Dec 15 14:22:58 2017
@@ -382,7 +382,7 @@ public:
unsigned getOccupancyWithLocalMemSize(const MachineFunction &MF) const {
const auto *MFI = MF.getInfo<SIMachineFunctionInfo>();
- return getOccupancyWithLocalMemSize(MFI->getLDSSize(), *MF.getFunction());
+ return getOccupancyWithLocalMemSize(MFI->getLDSSize(), MF.getFunction());
}
bool hasFP16Denormals() const {
@@ -410,7 +410,7 @@ public:
}
bool enableIEEEBit(const MachineFunction &MF) const {
- return AMDGPU::isCompute(MF.getFunction()->getCallingConv());
+ return AMDGPU::isCompute(MF.getFunction().getCallingConv());
}
bool useFlatForGlobal() const {
@@ -482,12 +482,12 @@ public:
}
bool isMesaKernel(const MachineFunction &MF) const {
- return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction()->getCallingConv());
+ return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction().getCallingConv());
}
// Covers VS/PS/CS graphics shaders
bool isMesaGfxShader(const MachineFunction &MF) const {
- return isMesa3DOS() && AMDGPU::isShader(MF.getFunction()->getCallingConv());
+ return isMesa3DOS() && AMDGPU::isShader(MF.getFunction().getCallingConv());
}
bool isAmdCodeObjectV2(const MachineFunction &MF) const {
Modified: llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp Fri Dec 15 14:22:58 2017
@@ -1641,7 +1641,7 @@ AMDGPUCFGStructurizer::normalizeInfinite
FuncRep->push_back(DummyExitBlk); //insert to function
SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
- LLVMContext &Ctx = LoopHeader->getParent()->getFunction()->getContext();
+ LLVMContext &Ctx = LoopHeader->getParent()->getFunction().getContext();
Ctx.emitError("Extra register needed to handle CFG");
return nullptr;
}
Modified: llvm/trunk/lib/Target/AMDGPU/GCNIterativeScheduler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/GCNIterativeScheduler.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/GCNIterativeScheduler.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/GCNIterativeScheduler.cpp Fri Dec 15 14:22:58 2017
@@ -566,7 +566,7 @@ void GCNIterativeScheduler::scheduleILP(
bool TryMaximizeOccupancy) {
const auto &ST = MF.getSubtarget<SISubtarget>();
auto TgtOcc = std::min(ST.getOccupancyWithLocalMemSize(MF),
- ST.getWavesPerEU(*MF.getFunction()).second);
+ ST.getWavesPerEU(MF.getFunction()).second);
sortRegionsByPressure(TgtOcc);
auto Occ = Regions.front()->MaxPressure.getOccupancy(ST);
Modified: llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/GCNSchedStrategy.cpp Fri Dec 15 14:22:58 2017
@@ -37,7 +37,7 @@ static unsigned getMaxWaves(unsigned SGP
ST.getOccupancyWithNumVGPRs(VGPRs));
return std::min(MinRegOccupancy,
ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
- *MF.getFunction()));
+ MF.getFunction()));
}
void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
@@ -315,7 +315,7 @@ GCNScheduleDAGMILive::GCNScheduleDAGMILi
ST(MF.getSubtarget<SISubtarget>()),
MFI(*MF.getInfo<SIMachineFunctionInfo>()),
StartingOccupancy(ST.getOccupancyWithLocalMemSize(MFI.getLDSSize(),
- *MF.getFunction())),
+ MF.getFunction())),
MinOccupancy(StartingOccupancy), Stage(0), RegionIdx(0) {
DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
Modified: llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp Fri Dec 15 14:22:58 2017
@@ -180,7 +180,7 @@ bool R600ClauseMergePass::mergeIfPossibl
}
bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
Modified: llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp Fri Dec 15 14:22:58 2017
@@ -512,14 +512,14 @@ public:
R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
- CFStack CFStack(ST, MF.getFunction()->getCallingConv());
+ CFStack CFStack(ST, MF.getFunction().getCallingConv());
for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
++MB) {
MachineBasicBlock &MBB = *MB;
unsigned CfCount = 0;
std::vector<std::pair<unsigned, std::set<MachineInstr *>>> LoopStack;
std::vector<MachineInstr * > IfThenElseStack;
- if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_VS) {
+ if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_VS) {
BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
getHWInstrDesc(CF_CALL_FS));
CfCount++;
Modified: llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp Fri Dec 15 14:22:58 2017
@@ -197,7 +197,7 @@ bool R600InstrInfo::usesVertexCache(unsi
bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
- return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
+ return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
usesVertexCache(MI.getOpcode());
}
@@ -207,7 +207,7 @@ bool R600InstrInfo::usesTextureCache(uns
bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
- return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
+ return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
usesVertexCache(MI.getOpcode())) ||
usesTextureCache(MI.getOpcode());
}
Modified: llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp Fri Dec 15 14:22:58 2017
@@ -336,7 +336,7 @@ void R600VectorRegMerger::trackRSI(const
}
bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
const R600Subtarget &ST = Fn.getSubtarget<R600Subtarget>();
Modified: llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp Fri Dec 15 14:22:58 2017
@@ -926,7 +926,7 @@ bool SIFoldOperands::tryFoldOMod(Machine
}
bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
Modified: llvm/trunk/lib/Target/AMDGPU/SIFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -394,7 +394,7 @@ void SIFrameLowering::emitEntryFunctionS
// We now have the GIT ptr - now get the scratch descriptor from the entry
// at offset 0.
PointerType *PtrTy =
- PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
+ PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
AMDGPUAS::CONSTANT_ADDRESS);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
@@ -425,7 +425,7 @@ void SIFrameLowering::emitEntryFunctionS
if (MFI->hasImplicitBufferPtr()) {
unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
- if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
+ if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
BuildMI(MBB, I, DL, Mov64, Rsrc01)
@@ -435,7 +435,7 @@ void SIFrameLowering::emitEntryFunctionS
const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
PointerType *PtrTy =
- PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
+ PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
AMDGPUAS::CONSTANT_ADDRESS);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
auto MMO = MF.getMachineMemOperand(PtrInfo,
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -1460,14 +1460,14 @@ SDValue SITargetLowering::LowerFormalArg
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
MachineFunction &MF = DAG.getMachineFunction();
- FunctionType *FType = MF.getFunction()->getFunctionType();
+ FunctionType *FType = MF.getFunction().getFunctionType();
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
- const Function *Fn = MF.getFunction();
+ const Function &Fn = MF.getFunction();
DiagnosticInfoUnsupported NoGraphicsHSA(
- *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
+ Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
DAG.getContext()->diagnose(NoGraphicsHSA);
return DAG.getEntryNode();
}
@@ -1696,7 +1696,7 @@ SDValue SITargetLowering::LowerFormalArg
auto &ArgUsageInfo =
DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
- ArgUsageInfo.setFuncArgInfo(*MF.getFunction(), Info->getArgInfo());
+ ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo());
unsigned StackArgSize = CCInfo.getNextStackOffset();
Info->setBytesInStackArgArea(StackArgSize);
@@ -2032,8 +2032,8 @@ bool SITargetLowering::isEligibleForTail
return false;
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = MF.getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
@@ -2054,7 +2054,7 @@ bool SITargetLowering::isEligibleForTail
if (IsVarArg)
return false;
- for (const Argument &Arg : CallerF->args()) {
+ for (const Argument &Arg : CallerF.args()) {
if (Arg.hasByValAttr())
return false;
}
@@ -3594,11 +3594,11 @@ SDValue SITargetLowering::lowerTRAP(SDVa
case SISubtarget::TrapIDLLVMTrap:
return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
case SISubtarget::TrapIDLLVMDebugTrap: {
- DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
+ DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
"debugtrap handler not supported",
Op.getDebugLoc(),
DS_Warning);
- LLVMContext &Ctx = MF.getFunction()->getContext();
+ LLVMContext &Ctx = MF.getFunction().getContext();
Ctx.diagnose(NoTrap);
return Chain;
}
@@ -3711,7 +3711,7 @@ SDValue SITargetLowering::lowerADDRSPACE
const MachineFunction &MF = DAG.getMachineFunction();
DiagnosticInfoUnsupported InvalidAddrSpaceCast(
- *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
+ MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
DAG.getContext()->diagnose(InvalidAddrSpaceCast);
return DAG.getUNDEF(ASC->getValueType(0));
@@ -3913,7 +3913,7 @@ SDValue SITargetLowering::lowerImplicitZ
static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
EVT VT) {
- DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
+ DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
"non-hsa intrinsic with hsa target",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
@@ -3922,7 +3922,7 @@ static SDValue emitNonHSAIntrinsicError(
static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
EVT VT) {
- DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
+ DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
"intrinsic not supported on subtarget",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
@@ -3951,7 +3951,7 @@ SDValue SITargetLowering::LowerINTRINSIC
case Intrinsic::amdgcn_queue_ptr: {
if (!Subtarget->isAmdCodeObjectV2(MF)) {
DiagnosticInfoUnsupported BadIntrin(
- *MF.getFunction(), "unsupported hsa intrinsic without hsa target",
+ MF.getFunction(), "unsupported hsa intrinsic without hsa target",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
return DAG.getUNDEF(VT);
@@ -4129,7 +4129,7 @@ SDValue SITargetLowering::LowerINTRINSIC
return SDValue();
DiagnosticInfoUnsupported BadIntrin(
- *MF.getFunction(), "intrinsic not supported on subtarget",
+ MF.getFunction(), "intrinsic not supported on subtarget",
DL.getDebugLoc());
DAG.getContext()->diagnose(BadIntrin);
return DAG.getUNDEF(VT);
@@ -4559,7 +4559,7 @@ SDValue SITargetLowering::LowerINTRINSIC
case Intrinsic::amdgcn_s_barrier: {
if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
- unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second;
+ unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
if (WGSize <= ST.getWavefrontSize())
return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
Op.getOperand(0)), 0);
Modified: llvm/trunk/lib/Target/AMDGPU/SIInsertSkips.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInsertSkips.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInsertSkips.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInsertSkips.cpp Fri Dec 15 14:22:58 2017
@@ -166,7 +166,7 @@ bool SIInsertSkips::skipIfDead(MachineIn
MachineBasicBlock &MBB = *MI.getParent();
MachineFunction *MF = MBB.getParent();
- if (MF->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
+ if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS ||
!shouldSkip(MBB, MBB.getParent()->back()))
return false;
Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp Fri Dec 15 14:22:58 2017
@@ -375,7 +375,7 @@ static bool memOpsHaveSameBasePtr(const
if (!Base1 || !Base2)
return false;
const MachineFunction &MF = *MI1.getParent()->getParent();
- const DataLayout &DL = MF.getFunction()->getParent()->getDataLayout();
+ const DataLayout &DL = MF.getFunction().getParent()->getDataLayout();
Base1 = GetUnderlyingObject(Base1, DL);
Base2 = GetUnderlyingObject(Base1, DL);
@@ -442,10 +442,10 @@ static void reportIllegalCopy(const SIIn
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) {
MachineFunction *MF = MBB.getParent();
- DiagnosticInfoUnsupported IllegalCopy(*MF->getFunction(),
+ DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(),
"illegal SGPR to VGPR copy",
DL, DS_Error);
- LLVMContext &C = MF->getFunction()->getContext();
+ LLVMContext &C = MF->getFunction().getContext();
C.diagnose(IllegalCopy);
BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
@@ -873,8 +873,8 @@ void SIInstrInfo::storeRegToStackSlot(Ma
return;
}
- if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
+ if (!ST.isVGPRSpillingEnabled(MF->getFunction())) {
+ LLVMContext &Ctx = MF->getFunction().getContext();
Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
" spill register");
BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
@@ -975,8 +975,8 @@ void SIInstrInfo::loadRegFromStackSlot(M
return;
}
- if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
- LLVMContext &Ctx = MF->getFunction()->getContext();
+ if (!ST.isVGPRSpillingEnabled(MF->getFunction())) {
+ LLVMContext &Ctx = MF->getFunction().getContext();
Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
" restore register");
BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
@@ -1017,7 +1017,7 @@ unsigned SIInstrInfo::calculateLDSSpillA
if (TIDReg == AMDGPU::NoRegister)
return TIDReg;
- if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
+ if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) &&
WorkGroupSize > WavefrontSize) {
unsigned TIDIGXReg
= MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
@@ -3444,7 +3444,7 @@ void SIInstrInfo::legalizeOperands(Machi
// scratch memory access. In both cases, the legalization never involves
// conversion to the addr64 form.
if (isMIMG(MI) ||
- (AMDGPU::isShader(MF.getFunction()->getCallingConv()) &&
+ (AMDGPU::isShader(MF.getFunction().getCallingConv()) &&
(isMUBUF(MI) || isMTBUF(MI)))) {
MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
Modified: llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp Fri Dec 15 14:22:58 2017
@@ -913,7 +913,7 @@ bool SILoadStoreOptimizer::optimizeBlock
}
bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
STM = &MF.getSubtarget<SISubtarget>();
Modified: llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp Fri Dec 15 14:22:58 2017
@@ -51,9 +51,9 @@ SIMachineFunctionInfo::SIMachineFunction
ImplicitArgPtr(false),
GITPtrHigh(0xffffffff) {
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
- const Function *F = MF.getFunction();
- FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(*F);
- WavesPerEU = ST.getWavesPerEU(*F);
+ const Function &F = MF.getFunction();
+ FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F);
+ WavesPerEU = ST.getWavesPerEU(F);
if (!isEntryFunction()) {
// Non-entry functions have no special inputs for now, other registers
@@ -68,21 +68,21 @@ SIMachineFunctionInfo::SIMachineFunction
ArgInfo.PrivateSegmentWaveByteOffset =
ArgDescriptor::createRegister(ScratchWaveOffsetReg);
- if (F->hasFnAttribute("amdgpu-implicitarg-ptr"))
+ if (F.hasFnAttribute("amdgpu-implicitarg-ptr"))
ImplicitArgPtr = true;
} else {
- if (F->hasFnAttribute("amdgpu-implicitarg-ptr"))
+ if (F.hasFnAttribute("amdgpu-implicitarg-ptr"))
KernargSegmentPtr = true;
}
- CallingConv::ID CC = F->getCallingConv();
+ CallingConv::ID CC = F.getCallingConv();
if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) {
- if (!F->arg_empty())
+ if (!F.arg_empty())
KernargSegmentPtr = true;
WorkGroupIDX = true;
WorkItemIDX = true;
} else if (CC == CallingConv::AMDGPU_PS) {
- PSInputAddr = AMDGPU::getInitialPSInputAddr(*F);
+ PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
}
if (ST.debuggerEmitPrologue()) {
@@ -94,27 +94,27 @@ SIMachineFunctionInfo::SIMachineFunction
WorkItemIDY = true;
WorkItemIDZ = true;
} else {
- if (F->hasFnAttribute("amdgpu-work-group-id-x"))
+ if (F.hasFnAttribute("amdgpu-work-group-id-x"))
WorkGroupIDX = true;
- if (F->hasFnAttribute("amdgpu-work-group-id-y"))
+ if (F.hasFnAttribute("amdgpu-work-group-id-y"))
WorkGroupIDY = true;
- if (F->hasFnAttribute("amdgpu-work-group-id-z"))
+ if (F.hasFnAttribute("amdgpu-work-group-id-z"))
WorkGroupIDZ = true;
- if (F->hasFnAttribute("amdgpu-work-item-id-x"))
+ if (F.hasFnAttribute("amdgpu-work-item-id-x"))
WorkItemIDX = true;
- if (F->hasFnAttribute("amdgpu-work-item-id-y"))
+ if (F.hasFnAttribute("amdgpu-work-item-id-y"))
WorkItemIDY = true;
- if (F->hasFnAttribute("amdgpu-work-item-id-z"))
+ if (F.hasFnAttribute("amdgpu-work-item-id-z"))
WorkItemIDZ = true;
}
const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
- bool MaySpill = ST.isVGPRSpillingEnabled(*F);
+ bool MaySpill = ST.isVGPRSpillingEnabled(F);
bool HasStackObjects = FrameInfo.hasStackObjects();
if (isEntryFunction()) {
@@ -139,30 +139,30 @@ SIMachineFunctionInfo::SIMachineFunction
if (HasStackObjects || MaySpill)
PrivateSegmentBuffer = true;
- if (F->hasFnAttribute("amdgpu-dispatch-ptr"))
+ if (F.hasFnAttribute("amdgpu-dispatch-ptr"))
DispatchPtr = true;
- if (F->hasFnAttribute("amdgpu-queue-ptr"))
+ if (F.hasFnAttribute("amdgpu-queue-ptr"))
QueuePtr = true;
- if (F->hasFnAttribute("amdgpu-dispatch-id"))
+ if (F.hasFnAttribute("amdgpu-dispatch-id"))
DispatchID = true;
} else if (ST.isMesaGfxShader(MF)) {
if (HasStackObjects || MaySpill)
ImplicitBufferPtr = true;
}
- if (F->hasFnAttribute("amdgpu-kernarg-segment-ptr"))
+ if (F.hasFnAttribute("amdgpu-kernarg-segment-ptr"))
KernargSegmentPtr = true;
if (ST.hasFlatAddressSpace() && isEntryFunction() && IsCOV2) {
// TODO: This could be refined a lot. The attribute is a poor way of
// detecting calls that may require it before argument lowering.
- if (HasStackObjects || F->hasFnAttribute("amdgpu-flat-scratch"))
+ if (HasStackObjects || F.hasFnAttribute("amdgpu-flat-scratch"))
FlatScratchInit = true;
}
- Attribute A = F->getFnAttribute("amdgpu-git-ptr-high");
+ Attribute A = F.getFnAttribute("amdgpu-git-ptr-high");
StringRef S = A.getValueAsString();
if (!S.empty())
S.consumeInteger(0, GITPtrHigh);
Modified: llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp Fri Dec 15 14:22:58 2017
@@ -340,9 +340,9 @@ Optional<SIMemOpInfo> SIMemOpInfo::getAt
/* static */
void SIMemOpInfo::reportUnknownSyncScope(
const MachineBasicBlock::iterator &MI) {
- DiagnosticInfoUnsupported Diag(*MI->getParent()->getParent()->getFunction(),
+ DiagnosticInfoUnsupported Diag(MI->getParent()->getParent()->getFunction(),
"Unsupported synchronization scope");
- LLVMContext *CTX = &MI->getParent()->getParent()->getFunction()->getContext();
+ LLVMContext *CTX = &MI->getParent()->getParent()->getFunction().getContext();
CTX->diagnose(Diag);
}
Modified: llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp Fri Dec 15 14:22:58 2017
@@ -205,7 +205,7 @@ static bool isLiveOut(const MachineBasic
}
bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Modified: llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp Fri Dec 15 14:22:58 2017
@@ -103,7 +103,7 @@ static MachineInstr* getOrExecSource(con
}
bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Modified: llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIPeepholeSDWA.cpp Fri Dec 15 14:22:58 2017
@@ -1050,7 +1050,7 @@ void SIPeepholeSDWA::legalizeScalarOpera
bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
- if (!ST.hasSDWA() || skipFunction(*MF.getFunction()))
+ if (!ST.hasSDWA() || skipFunction(MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
Modified: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -1514,7 +1514,7 @@ unsigned SIRegisterInfo::getRegPressureL
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
unsigned Occupancy = ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
- *MF.getFunction());
+ MF.getFunction());
switch (RC->getID()) {
default:
return AMDGPURegisterInfo::getRegPressureLimit(RC, MF);
Modified: llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp Fri Dec 15 14:22:58 2017
@@ -286,7 +286,7 @@ static void shrinkScalarCompare(const SI
}
bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
MachineRegisterInfo &MRI = MF.getRegInfo();
Modified: llvm/trunk/lib/Target/AMDGPU/SIWholeQuadMode.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIWholeQuadMode.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIWholeQuadMode.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIWholeQuadMode.cpp Fri Dec 15 14:22:58 2017
@@ -307,7 +307,7 @@ void SIWholeQuadMode::markInstructionUse
char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
std::vector<WorkItem> &Worklist) {
char GlobalFlags = 0;
- bool WQMOutputs = MF.getFunction()->hasFnAttribute("amdgpu-ps-wqm-outputs");
+ bool WQMOutputs = MF.getFunction().hasFnAttribute("amdgpu-ps-wqm-outputs");
SmallVector<MachineInstr *, 4> SetInactiveInstrs;
// We need to visit the basic blocks in reverse post-order so that we visit
@@ -842,7 +842,7 @@ bool SIWholeQuadMode::runOnMachineFuncti
Blocks.clear();
LiveMaskQueries.clear();
LowerToCopyInstrs.clear();
- CallingConv = MF.getFunction()->getCallingConv();
+ CallingConv = MF.getFunction().getCallingConv();
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Modified: llvm/trunk/lib/Target/ARC/ARCBranchFinalize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARC/ARCBranchFinalize.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARC/ARCBranchFinalize.cpp (original)
+++ llvm/trunk/lib/Target/ARC/ARCBranchFinalize.cpp Fri Dec 15 14:22:58 2017
@@ -142,7 +142,7 @@ void ARCBranchFinalize::replaceWithCmpBc
bool ARCBranchFinalize::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "Running ARC Branch Finalize on "
- << MF.getFunction()->getName() << "\n");
+ << MF.getName() << "\n");
std::vector<MachineInstr *> Branches;
bool Changed = false;
unsigned MaxSize = 0;
@@ -172,7 +172,7 @@ bool ARCBranchFinalize::runOnMachineFunc
isInt<9>(MaxSize) ? replaceWithBRcc(P.first) : replaceWithCmpBcc(P.first);
}
- DEBUG(dbgs() << "Estimated function size for " << MF.getFunction()->getName()
+ DEBUG(dbgs() << "Estimated function size for " << MF.getName()
<< ": " << MaxSize << "\n");
return Changed;
Modified: llvm/trunk/lib/Target/ARC/ARCFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARC/ARCFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARC/ARCFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARC/ARCFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -88,7 +88,7 @@ determineLastCalleeSave(const std::vecto
void ARCFrameLowering::determineCalleeSaves(MachineFunction &MF,
BitVector &SavedRegs,
RegScavenger *RS) const {
- DEBUG(dbgs() << "Determine Callee Saves: " << MF.getFunction()->getName()
+ DEBUG(dbgs() << "Determine Callee Saves: " << MF.getName()
<< "\n");
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
SavedRegs.set(ARC::BLINK);
@@ -115,7 +115,7 @@ void ARCFrameLowering::adjustStackToMatc
/// registers onto the stack, when enough callee saved registers are required.
void ARCFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- DEBUG(dbgs() << "Emit Prologue: " << MF.getFunction()->getName() << "\n");
+ DEBUG(dbgs() << "Emit Prologue: " << MF.getName() << "\n");
auto *AFI = MF.getInfo<ARCFunctionInfo>();
MachineModuleInfo &MMI = MF.getMMI();
MCContext &Context = MMI.getContext();
@@ -131,7 +131,7 @@ void ARCFrameLowering::emitPrologue(Mach
unsigned StackSlotsUsedByFunclet = 0;
bool SavedBlink = false;
unsigned AlreadyAdjusted = 0;
- if (MF.getFunction()->isVarArg()) {
+ if (MF.getFunction().isVarArg()) {
// Add in the varargs area here first.
DEBUG(dbgs() << "Varargs\n");
unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex());
@@ -235,7 +235,7 @@ void ARCFrameLowering::emitPrologue(Mach
/// registers onto the stack, when enough callee saved registers are required.
void ARCFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- DEBUG(dbgs() << "Emit Epilogue: " << MF.getFunction()->getName() << "\n");
+ DEBUG(dbgs() << "Emit Epilogue: " << MF.getName() << "\n");
auto *AFI = MF.getInfo<ARCFunctionInfo>();
const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
@@ -302,7 +302,7 @@ void ARCFrameLowering::emitEpilogue(Mach
}
// Relieve the varargs area if necessary.
- if (MF.getFunction()->isVarArg()) {
+ if (MF.getFunction().isVarArg()) {
// Add in the varargs area here first.
DEBUG(dbgs() << "Varargs\n");
unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex());
@@ -383,7 +383,7 @@ bool ARCFrameLowering::spillCalleeSavedR
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
DEBUG(dbgs() << "Spill callee saved registers: "
- << MBB.getParent()->getFunction()->getName() << "\n");
+ << MBB.getParent()->getName() << "\n");
// There are routines for saving at least 3 registers (r13 to r15, etc.)
unsigned Last = determineLastCalleeSave(CSI);
if (UseSaveRestoreFunclet && Last > ARC::R14) {
@@ -400,7 +400,7 @@ bool ARCFrameLowering::restoreCalleeSave
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
std::vector<CalleeSavedInfo> &CSI, const TargetRegisterInfo *TRI) const {
DEBUG(dbgs() << "Restore callee saved registers: "
- << MBB.getParent()->getFunction()->getName() << "\n");
+ << MBB.getParent()->getName() << "\n");
// There are routines for saving at least 3 registers (r13 to r15, etc.)
unsigned Last = determineLastCalleeSave(CSI);
if (UseSaveRestoreFunclet && Last > ARC::R14) {
@@ -415,7 +415,7 @@ void ARCFrameLowering::processFunctionBe
MachineFunction &MF, RegScavenger *RS) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
DEBUG(dbgs() << "Process function before frame finalized: "
- << MF.getFunction()->getName() << "\n");
+ << MF.getName() << "\n");
MachineFrameInfo &MFI = MF.getFrameInfo();
DEBUG(dbgs() << "Current stack size: " << MFI.getStackSize() << "\n");
const TargetRegisterClass *RC = &ARC::GPR32RegClass;
@@ -440,8 +440,7 @@ static void emitRegUpdate(MachineBasicBl
MachineBasicBlock::iterator ARCFrameLowering::eliminateCallFramePseudoInstr(
MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- DEBUG(dbgs() << "EmitCallFramePseudo: " << MF.getFunction()->getName()
- << "\n");
+ DEBUG(dbgs() << "EmitCallFramePseudo: " << MF.getName() << "\n");
const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo();
MachineInstr &Old = *I;
DebugLoc dl = Old.getDebugLoc();
Modified: llvm/trunk/lib/Target/ARC/ARCRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARC/ARCRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARC/ARCRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARC/ARCRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -125,8 +125,7 @@ static void ReplaceFrameIndex(MachineBas
ARCRegisterInfo::ARCRegisterInfo() : ARCGenRegisterInfo(ARC::BLINK) {}
bool ARCRegisterInfo::needsFrameMoves(const MachineFunction &MF) {
- return MF.getMMI().hasDebugInfo() ||
- MF.getFunction()->needsUnwindTableEntry();
+ return MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry();
}
const MCPhysReg *
Modified: llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp Fri Dec 15 14:22:58 2017
@@ -655,7 +655,7 @@ bool A15SDOptimizer::runOnInstruction(Ma
}
bool A15SDOptimizer::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
const ARMSubtarget &STI = Fn.getSubtarget<ARMSubtarget>();
Modified: llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp Fri Dec 15 14:22:58 2017
@@ -109,7 +109,7 @@ bool ARMAsmPrinter::runOnMachineFunction
Subtarget = &MF.getSubtarget<ARMSubtarget>();
SetupMachineFunction(MF);
- const Function* F = MF.getFunction();
+ const Function &F = MF.getFunction();
const TargetMachine& TM = MF.getTarget();
// Collect all globals that had their storage promoted to a constant pool.
@@ -120,13 +120,13 @@ bool ARMAsmPrinter::runOnMachineFunction
// Calculate this function's optimization goal.
unsigned OptimizationGoal;
- if (F->hasFnAttribute(Attribute::OptimizeNone))
+ if (F.hasFnAttribute(Attribute::OptimizeNone))
// For best debugging illusion, speed and small size sacrificed
OptimizationGoal = 6;
- else if (F->optForMinSize())
+ else if (F.optForMinSize())
// Aggressively for small size, speed and debug illusion sacrificed
OptimizationGoal = 4;
- else if (F->optForSize())
+ else if (F.optForSize())
// For small size, but speed and debugging illusion preserved
OptimizationGoal = 3;
else if (TM.getOptLevel() == CodeGenOpt::Aggressive)
@@ -146,7 +146,7 @@ bool ARMAsmPrinter::runOnMachineFunction
OptimizationGoals = 0;
if (Subtarget->isTargetCOFF()) {
- bool Internal = F->hasInternalLinkage();
+ bool Internal = F.hasInternalLinkage();
COFF::SymbolStorageClass Scl = Internal ? COFF::IMAGE_SYM_CLASS_STATIC
: COFF::IMAGE_SYM_CLASS_EXTERNAL;
int Type = COFF::IMAGE_SYM_DTYPE_FUNCTION << COFF::SCT_COMPLEX_TYPE_SHIFT;
Modified: llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp Fri Dec 15 14:22:58 2017
@@ -1512,18 +1512,18 @@ static unsigned duplicateCPV(MachineFunc
4, ACPV->getModifier(), ACPV->mustAddCurrentAddress());
else if (ACPV->isExtSymbol())
NewCPV = ARMConstantPoolSymbol::
- Create(MF.getFunction()->getContext(),
+ Create(MF.getFunction().getContext(),
cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
else if (ACPV->isBlockAddress())
NewCPV = ARMConstantPoolConstant::
Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
ARMCP::CPBlockAddress, 4);
else if (ACPV->isLSDA())
- NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId,
+ NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId,
ARMCP::CPLSDA, 4);
else if (ACPV->isMachineBasicBlock())
NewCPV = ARMConstantPoolMBB::
- Create(MF.getFunction()->getContext(),
+ Create(MF.getFunction().getContext(),
cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
else
llvm_unreachable("Unexpected ARM constantpool value type!!");
@@ -1843,7 +1843,7 @@ isProfitableToIfCvt(MachineBasicBlock &M
// If we are optimizing for size, see if the branch in the predecessor can be
// lowered to cbn?z by the constant island lowering pass, and return false if
// so. This results in a shorter instruction sequence.
- if (MBB.getParent()->getFunction()->optForSize()) {
+ if (MBB.getParent()->getFunction().optForSize()) {
MachineBasicBlock *Pred = *MBB.pred_begin();
if (!Pred->empty()) {
MachineInstr *LastMI = &*Pred->rbegin();
@@ -2210,7 +2210,7 @@ bool llvm::tryFoldSPUpdateIntoPushPop(co
unsigned NumBytes) {
// This optimisation potentially adds lots of load and store
// micro-operations, it's only really a great benefit to code-size.
- if (!MF.getFunction()->optForMinSize())
+ if (!MF.getFunction().optForMinSize())
return false;
// If only one register is pushed/popped, LLVM can use an LDR/STR
@@ -3982,7 +3982,7 @@ int ARMBaseInstrInfo::getOperandLatencyI
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI.getParent()->getParent();
// FIXME: Use Function::optForSize().
- if (MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize))
+ if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize))
--Latency;
}
return Latency;
Modified: llvm/trunk/lib/Target/ARM/ARMBaseRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMBaseRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMBaseRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMBaseRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -71,17 +71,17 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(
? CSR_iOS_SaveList
: (UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList);
- const Function *F = MF->getFunction();
- if (F->getCallingConv() == CallingConv::GHC) {
+ const Function &F = MF->getFunction();
+ if (F.getCallingConv() == CallingConv::GHC) {
// GHC set of callee saved regs is empty as all those regs are
// used for passing STG regs around
return CSR_NoRegs_SaveList;
- } else if (F->hasFnAttribute("interrupt")) {
+ } else if (F.hasFnAttribute("interrupt")) {
if (STI.isMClass()) {
// M-class CPUs have hardware which saves the registers needed to allow a
// function conforming to the AAPCS to function as a handler.
return UseSplitPush ? CSR_AAPCS_SplitPush_SaveList : CSR_AAPCS_SaveList;
- } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") {
+ } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") {
// Fast interrupt mode gives the handler a private copy of R8-R14, so less
// need to be saved to restore user-mode state.
return CSR_FIQ_SaveList;
@@ -93,7 +93,7 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(
}
if (STI.getTargetLowering()->supportSwiftError() &&
- F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
if (STI.isTargetDarwin())
return CSR_iOS_SwiftError_SaveList;
@@ -101,7 +101,7 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(
CSR_AAPCS_SwiftError_SaveList;
}
- if (STI.isTargetDarwin() && F->getCallingConv() == CallingConv::CXX_FAST_TLS)
+ if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS)
return MF->getInfo<ARMFunctionInfo>()->isSplitCSR()
? CSR_iOS_CXX_TLS_PE_SaveList
: CSR_iOS_CXX_TLS_SaveList;
@@ -111,7 +111,7 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(
const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<ARMFunctionInfo>()->isSplitCSR())
return CSR_iOS_CXX_TLS_ViaCopy_SaveList;
return nullptr;
@@ -126,7 +126,7 @@ ARMBaseRegisterInfo::getCallPreservedMas
return CSR_NoRegs_RegMask;
if (STI.getTargetLowering()->supportSwiftError() &&
- MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask
: CSR_AAPCS_SwiftError_RegMask;
@@ -440,7 +440,7 @@ void ARMBaseRegisterInfo::emitLoadConstP
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C =
- ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
+ ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
Modified: llvm/trunk/lib/Target/ARM/ARMCallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMCallLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMCallLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMCallLowering.cpp Fri Dec 15 14:22:58 2017
@@ -190,7 +190,7 @@ void ARMCallLowering::splitToValueTypes(
LLVMContext &Ctx = OrigArg.Ty->getContext();
const DataLayout &DL = MF.getDataLayout();
MachineRegisterInfo &MRI = MF.getRegInfo();
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
SmallVector<EVT, 4> SplitVTs;
SmallVector<uint64_t, 4> Offsets;
@@ -218,7 +218,7 @@ void ARMCallLowering::splitToValueTypes(
bool NeedsConsecutiveRegisters =
TLI.functionArgumentNeedsConsecutiveRegisters(
- SplitTy, F->getCallingConv(), F->isVarArg());
+ SplitTy, F.getCallingConv(), F.isVarArg());
if (NeedsConsecutiveRegisters) {
Flags.setInConsecutiveRegs();
if (i == e - 1)
@@ -244,7 +244,7 @@ bool ARMCallLowering::lowerReturnVal(Mac
return true;
auto &MF = MIRBuilder.getMF();
- const auto &F = *MF.getFunction();
+ const auto &F = MF.getFunction();
auto DL = MF.getDataLayout();
auto &TLI = *getTLI<ARMTargetLowering>();
Modified: llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp Fri Dec 15 14:22:58 2017
@@ -1259,7 +1259,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBa
MachineConstantPool *MCP = MF->getConstantPool();
unsigned PCLabelID = AFI->createPICLabelUId();
MachineConstantPoolValue *CPV =
- ARMConstantPoolSymbol::Create(MF->getFunction()->getContext(),
+ ARMConstantPoolSymbol::Create(MF->getFunction().getContext(),
"__aeabi_read_tp", PCLabelID, 0);
unsigned Reg = MI.getOperand(0).getReg();
MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
Modified: llvm/trunk/lib/Target/ARM/ARMFastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMFastISel.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMFastISel.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMFastISel.cpp Fri Dec 15 14:22:58 2017
@@ -2958,7 +2958,7 @@ unsigned ARMFastISel::ARMLowerPICELF(con
unsigned Align, MVT VT) {
bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
- LLVMContext *Context = &MF->getFunction()->getContext();
+ LLVMContext *Context = &MF->getFunction().getContext();
unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
Modified: llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -203,10 +203,10 @@ static int sizeOfSPAdjustment(const Mach
static bool WindowsRequiresStackProbe(const MachineFunction &MF,
size_t StackSizeInBytes) {
const MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
unsigned StackProbeSize = (MFI.getStackProtectorIndex() > 0) ? 4080 : 4096;
- if (F->hasFnAttribute("stack-probe-size"))
- F->getFnAttribute("stack-probe-size")
+ if (F.hasFnAttribute("stack-probe-size"))
+ F.getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
return StackSizeInBytes >= StackProbeSize;
@@ -370,7 +370,7 @@ void ARMFrameLowering::emitPrologue(Mach
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
StackAdjustingInsts DefCFAOffsetCandidates;
@@ -448,7 +448,7 @@ void ARMFrameLowering::emitPrologue(Mach
int FramePtrOffsetInPush = 0;
if (HasFP) {
int FPOffset = MFI.getObjectOffset(FramePtrSpillFI);
- assert(getMaxFPOffset(*MF.getFunction(), *AFI) <= FPOffset &&
+ assert(getMaxFPOffset(MF.getFunction(), *AFI) <= FPOffset &&
"Max FP estimation is wrong");
FramePtrOffsetInPush = FPOffset + ArgRegsSaveSize;
AFI->setFramePtrSpillOffset(MFI.getObjectOffset(FramePtrSpillFI) +
@@ -766,7 +766,7 @@ void ARMFrameLowering::emitEpilogue(Mach
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
- if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
+ if (MF.getFunction().getCallingConv() == CallingConv::GHC)
return;
// First put ourselves on the first (from top) terminator instructions.
@@ -1533,7 +1533,7 @@ checkNumAlignedDPRCS2Regs(MachineFunctio
return;
// Naked functions don't spill callee-saved registers.
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
return;
// We are planning to use NEON instructions vst1 / vld1.
@@ -1744,7 +1744,7 @@ void ARMFrameLowering::determineCalleeSa
EstimatedStackSize += 16; // For possible paddings.
unsigned EstimatedRSStackSizeLimit = estimateRSStackSizeLimit(MF, this);
- int MaxFPOffset = getMaxFPOffset(*MF.getFunction(), *AFI);
+ int MaxFPOffset = getMaxFPOffset(MF.getFunction(), *AFI);
bool BigFrameOffsets = EstimatedStackSize >= EstimatedRSStackSizeLimit ||
MFI.hasVarSizedObjects() ||
(MFI.adjustsStack() && !canSimplifyCallFramePseudos(MF)) ||
@@ -2102,7 +2102,7 @@ void ARMFrameLowering::adjustForSegmente
// Sadly, this currently doesn't support varargs, platforms other than
// android/linux. Note that thumb1/thumb2 are support for android/linux.
- if (MF.getFunction()->isVarArg())
+ if (MF.getFunction().isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
if (!ST->isTargetAndroid() && !ST->isTargetLinux())
report_fatal_error("Segmented stacks not supported on this platform.");
@@ -2250,7 +2250,7 @@ void ARMFrameLowering::adjustForSegmente
if (Thumb && ST->isThumb1Only()) {
unsigned PCLabelId = ARMFI->createPICLabelUId();
ARMConstantPoolValue *NewCPV = ARMConstantPoolSymbol::Create(
- MF.getFunction()->getContext(), "__STACK_LIMIT", PCLabelId, 0);
+ MF.getFunction().getContext(), "__STACK_LIMIT", PCLabelId, 0);
MachineConstantPool *MCP = MF.getConstantPool();
unsigned CPI = MCP->getConstantPoolIndex(NewCPV, 4);
Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -1773,7 +1773,7 @@ ARMTargetLowering::LowerCall(TargetLower
bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
bool isThisReturn = false;
bool isSibCall = false;
- auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
// Disable tail calls if they're not supported.
if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true")
@@ -1782,7 +1782,7 @@ ARMTargetLowering::LowerCall(TargetLower
if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
- isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
+ isVarArg, isStructRet, MF.getFunction().hasStructRetAttr(),
Outs, OutVals, Ins, DAG);
if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall())
report_fatal_error("failed to perform tail call elimination on a call "
@@ -1981,7 +1981,7 @@ ARMTargetLowering::LowerCall(TargetLower
bool isDirect = false;
const TargetMachine &TM = getTargetMachine();
- const Module *Mod = MF.getFunction()->getParent();
+ const Module *Mod = MF.getFunction().getParent();
const GlobalValue *GV = nullptr;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
GV = G->getGlobal();
@@ -2033,7 +2033,7 @@ ARMTargetLowering::LowerCall(TargetLower
auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
auto *BB = CLI.CS.getParent();
bool PreferIndirect =
- Subtarget->isThumb() && MF.getFunction()->optForMinSize() &&
+ Subtarget->isThumb() && MF.getFunction().optForMinSize() &&
count_if(GV->users(), [&BB](const User *U) {
return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
}) > 2;
@@ -2105,7 +2105,7 @@ ARMTargetLowering::LowerCall(TargetLower
CallOpc = ARMISD::CALL_NOLINK;
else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
// Emit regular call when code size is the priority
- !MF.getFunction()->optForMinSize())
+ !MF.getFunction().optForMinSize())
// "mov lr, pc; b _foo" to avoid confusing the RSP
CallOpc = ARMISD::CALL_NOLINK;
else
@@ -2280,8 +2280,8 @@ ARMTargetLowering::IsEligibleForTailCall
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = MF.getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
assert(Subtarget->supportsTailCall());
@@ -2298,7 +2298,7 @@ ARMTargetLowering::IsEligibleForTailCall
// Exception-handling functions need a special set of instructions to indicate
// a return to the hardware. Tail-calling another function would probably
// break this.
- if (CallerF->hasFnAttribute("interrupt"))
+ if (CallerF.hasFnAttribute("interrupt"))
return false;
// Also avoid sibcall optimization if either caller or callee uses struct
@@ -2410,9 +2410,9 @@ ARMTargetLowering::CanLowerReturn(Callin
static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
const SDLoc &DL, SelectionDAG &DAG) {
const MachineFunction &MF = DAG.getMachineFunction();
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
- StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
+ StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString();
// See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
// version of the "preferred return address". These offsets affect the return
@@ -2553,7 +2553,7 @@ ARMTargetLowering::LowerReturn(SDValue C
//
// M-class CPUs actually use a normal return sequence with a special
// (hardware-provided) value in LR, so the normal code path works.
- if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
+ if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
!Subtarget->isMClass()) {
if (Subtarget->isThumb1Only())
report_fatal_error("interrupt attribute is not supported in Thumb1");
@@ -2691,7 +2691,7 @@ SDValue ARMTargetLowering::LowerConstant
auto T = const_cast<Type*>(CP->getType());
auto C = const_cast<Constant*>(CP->getConstVal());
auto M = const_cast<Module*>(DAG.getMachineFunction().
- getFunction()->getParent());
+ getFunction().getParent());
auto GV = new GlobalVariable(
*M, T, /*isConst=*/true, GlobalVariable::InternalLinkage, C,
Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
@@ -2800,7 +2800,7 @@ ARMTargetLowering::LowerGlobalTLSAddress
// trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
// silly).
auto TRI =
- getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo();
+ getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo();
auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
@@ -3055,7 +3055,7 @@ static SDValue promoteToConstantPool(con
// This is a win if the constant is only used in one function (so it doesn't
// need to be duplicated) or duplicating the constant wouldn't increase code
// size (implying the constant is no larger than 4 bytes).
- const Function *F = DAG.getMachineFunction().getFunction();
+ const Function &F = DAG.getMachineFunction().getFunction();
// We rely on this decision to inline being idemopotent and unrelated to the
// use-site. We know that if we inline a variable at one use site, we'll
@@ -3113,7 +3113,7 @@ static SDValue promoteToConstantPool(con
// in multiple functions but it no larger than a pointer. We also check if
// GVar has constant (non-ConstantExpr) users. If so, it essentially has its
// address taken.
- if (!allUsersAreInFunction(GVar, F) &&
+ if (!allUsersAreInFunction(GVar, &F) &&
!(Size <= 4 && allUsersAreInFunctions(GVar)))
return SDValue();
@@ -3322,7 +3322,7 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHA
bool IsPositionIndependent = isPositionIndependent();
unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
ARMConstantPoolValue *CPV =
- ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
+ ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex,
ARMCP::CPLSDA, PCAdj);
CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
@@ -3598,7 +3598,7 @@ SDValue ARMTargetLowering::LowerFormalAr
SmallVector<SDValue, 16> ArgValues;
SDValue ArgValue;
- Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
// Initially ArgRegsSaveSize is zero.
@@ -7754,9 +7754,9 @@ static SDValue LowerFPOWI(SDValue Op, co
SDValue InChain = DAG.getEntryNode();
SDValue TCChain = InChain;
- const auto *F = DAG.getMachineFunction().getFunction();
+ const Function &F = DAG.getMachineFunction().getFunction();
bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
- F->getReturnType() == LCRTy;
+ F.getReturnType() == LCRTy;
if (IsTC)
InChain = TCChain;
@@ -7954,7 +7954,7 @@ void ARMTargetLowering::SetupEntryBlockF
MachineRegisterInfo *MRI = &MF->getRegInfo();
MachineConstantPool *MCP = MF->getConstantPool();
ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
bool isThumb = Subtarget->isThumb();
bool isThumb2 = Subtarget->isThumb2();
@@ -7962,7 +7962,7 @@ void ARMTargetLowering::SetupEntryBlockF
unsigned PCLabelId = AFI->createPICLabelUId();
unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
ARMConstantPoolValue *CPV =
- ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
+ ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj);
unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
@@ -8248,7 +8248,7 @@ void ARMTargetLowering::EmitSjLjDispatch
.add(predOps(ARMCC::AL));
} else {
MachineConstantPool *ConstantPool = MF->getConstantPool();
- Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
// MachineConstantPool wants an explicit alignment.
@@ -8349,7 +8349,7 @@ void ARMTargetLowering::EmitSjLjDispatch
.add(predOps(ARMCC::AL));
} else {
MachineConstantPool *ConstantPool = MF->getConstantPool();
- Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
// MachineConstantPool wants an explicit alignment.
@@ -8645,7 +8645,7 @@ ARMTargetLowering::EmitStructByval(Machi
UnitSize = 2;
} else {
// Check whether we can use NEON instructions.
- if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
+ if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
Subtarget->hasNEON()) {
if ((Align % 16 == 0) && SizeVal >= 16)
UnitSize = 16;
@@ -8751,7 +8751,7 @@ ARMTargetLowering::EmitStructByval(Machi
.add(predOps(ARMCC::AL));
} else {
MachineConstantPool *ConstantPool = MF->getConstantPool();
- Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
+ Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
// MachineConstantPool wants an explicit alignment.
@@ -12417,11 +12417,11 @@ EVT ARMTargetLowering::getOptimalMemOpTy
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// See if we can use NEON instructions for this...
if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ !F.hasFnAttribute(Attribute::NoImplicitFloat)) {
bool Fast;
if (Size >= 16 &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
@@ -14364,7 +14364,7 @@ void ARMTargetLowering::insertCopiesSpli
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.h (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.h Fri Dec 15 14:22:58 2017
@@ -692,8 +692,8 @@ class VectorType;
SDValue ThisVal) const;
bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
Modified: llvm/trunk/lib/Target/ARM/ARMLegalizerInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMLegalizerInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMLegalizerInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMLegalizerInfo.cpp Fri Dec 15 14:22:58 2017
@@ -318,7 +318,7 @@ bool ARMLegalizerInfo::legalizeCustom(Ma
// Our divmod libcalls return a struct containing the quotient and the
// remainder. We need to create a virtual register for it.
- auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
Type *ArgTy = Type::getInt32Ty(Ctx);
StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true);
auto RetVal = MRI.createGenericVirtualRegister(
@@ -359,7 +359,7 @@ bool ARMLegalizerInfo::legalizeCustom(Ma
return true;
}
- auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
+ auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size");
auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
auto *RetTy = Type::getInt32Ty(Ctx);
Modified: llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Fri Dec 15 14:22:58 2017
@@ -1273,7 +1273,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSM
// can still change to a writeback form as that will save us 2 bytes
// of code size. It can create WAW hazards though, so only do it if
// we're minimizing code size.
- if (!MBB.getParent()->getFunction()->optForMinSize() || !BaseKill)
+ if (!MBB.getParent()->getFunction().optForMinSize() || !BaseKill)
return false;
bool HighRegsUsed = false;
@@ -1953,7 +1953,7 @@ bool ARMLoadStoreOpt::CombineMovBx(Machi
}
bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
MF = &Fn;
@@ -2035,7 +2035,7 @@ INITIALIZE_PASS(ARMPreAllocLoadStoreOpt,
ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false)
bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- if (AssumeMisalignedLoadStores || skipFunction(*Fn.getFunction()))
+ if (AssumeMisalignedLoadStores || skipFunction(Fn.getFunction()))
return false;
TD = &Fn.getDataLayout();
@@ -2130,9 +2130,9 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWor
return false;
unsigned Align = (*Op0->memoperands_begin())->getAlignment();
- const Function *Func = MF->getFunction();
+ const Function &Func = MF->getFunction();
unsigned ReqAlign = STI->hasV6Ops()
- ? TD->getABITypeAlignment(Type::getInt64Ty(Func->getContext()))
+ ? TD->getABITypeAlignment(Type::getInt64Ty(Func.getContext()))
: 8; // Pre-v6 need 8-byte align
if (Align < ReqAlign)
return false;
Modified: llvm/trunk/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMOptimizeBarriersPass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMOptimizeBarriersPass.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMOptimizeBarriersPass.cpp Fri Dec 15 14:22:58 2017
@@ -49,7 +49,7 @@ static bool CanMovePastDMB(const Machine
}
bool ARMOptimizeBarriersPass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// Vector to store the DMBs we will remove after the first iteration
Modified: llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMSelectionDAGInfo.cpp Fri Dec 15 14:22:58 2017
@@ -171,7 +171,7 @@ SDValue ARMSelectionDAGInfo::EmitTargetC
// Code size optimisation: do not inline memcpy if expansion results in
// more instructions than the libary call.
- if (NumMEMCPYs > 1 && DAG.getMachineFunction().getFunction()->optForMinSize()) {
+ if (NumMEMCPYs > 1 && DAG.getMachineFunction().getFunction().optForMinSize()) {
return SDValue();
}
Modified: llvm/trunk/lib/Target/ARM/ARMSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMSubtarget.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMSubtarget.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMSubtarget.cpp Fri Dec 15 14:22:58 2017
@@ -373,7 +373,7 @@ bool ARMSubtarget::useStride4VFPs(const
// For general targets, the prologue can grow when VFPs are allocated with
// stride 4 (more vpush instructions). But WatchOS uses a compact unwind
// format which it's more important to get right.
- return isTargetWatchABI() || (isSwift() && !MF.getFunction()->optForMinSize());
+ return isTargetWatchABI() || (isSwift() && !MF.getFunction().optForMinSize());
}
bool ARMSubtarget::useMovt(const MachineFunction &MF) const {
@@ -381,7 +381,7 @@ bool ARMSubtarget::useMovt(const Machine
// immediates as it is inherently position independent, and may be out of
// range otherwise.
return !NoMovt && hasV8MBaselineOps() &&
- (isTargetWindows() || !MF.getFunction()->optForMinSize() || genExecuteOnly());
+ (isTargetWindows() || !MF.getFunction().optForMinSize() || genExecuteOnly());
}
bool ARMSubtarget::useFastISel() const {
Modified: llvm/trunk/lib/Target/ARM/MLxExpansionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/MLxExpansionPass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/MLxExpansionPass.cpp (original)
+++ llvm/trunk/lib/Target/ARM/MLxExpansionPass.cpp Fri Dec 15 14:22:58 2017
@@ -371,7 +371,7 @@ bool MLxExpansion::ExpandFPMLxInstructio
}
bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
TII = static_cast<const ARMBaseInstrInfo *>(Fn.getSubtarget().getInstrInfo());
Modified: llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp (original)
+++ llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp Fri Dec 15 14:22:58 2017
@@ -449,7 +449,7 @@ Thumb2SizeReduce::ReduceLoadStore(Machin
break;
case ARM::t2LDR_POST:
case ARM::t2STR_POST: {
- if (!MBB.getParent()->getFunction()->optForMinSize())
+ if (!MBB.getParent()->getFunction().optForMinSize())
return false;
if (!MI->hasOneMemOperand() ||
@@ -1084,7 +1084,7 @@ bool Thumb2SizeReduce::ReduceMBB(Machine
}
bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
- if (PredicateFtor && !PredicateFtor(*MF.getFunction()))
+ if (PredicateFtor && !PredicateFtor(MF.getFunction()))
return false;
STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
@@ -1094,8 +1094,8 @@ bool Thumb2SizeReduce::runOnMachineFunct
TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
// Optimizing / minimizing size? Minimizing size implies optimizing for size.
- OptimizeSize = MF.getFunction()->optForSize();
- MinimizeSize = MF.getFunction()->optForMinSize();
+ OptimizeSize = MF.getFunction().optForSize();
+ MinimizeSize = MF.getFunction().optForMinSize();
BlockInfo.clear();
BlockInfo.resize(MF.getNumBlockIDs());
Modified: llvm/trunk/lib/Target/ARM/ThumbRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ThumbRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ThumbRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ThumbRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -70,7 +70,7 @@ static void emitThumb1LoadConstPool(Mach
const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C = ConstantInt::get(
- Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
+ Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
@@ -89,7 +89,7 @@ static void emitThumb2LoadConstPool(Mach
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
MachineConstantPool *ConstantPool = MF.getConstantPool();
const Constant *C = ConstantInt::get(
- Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
+ Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
Modified: llvm/trunk/lib/Target/AVR/AVRFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AVR/AVRFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AVR/AVRFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/AVR/AVRFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -53,7 +53,7 @@ bool AVRFrameLowering::hasReservedCallFr
void AVRFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
- CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallConv = MF.getFunction().getCallingConv();
DebugLoc DL = (MBBI != MBB.end()) ? MBBI->getDebugLoc() : DebugLoc();
const AVRSubtarget &STI = MF.getSubtarget<AVRSubtarget>();
const AVRInstrInfo &TII = *STI.getInstrInfo();
@@ -143,7 +143,7 @@ void AVRFrameLowering::emitPrologue(Mach
void AVRFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
- CallingConv::ID CallConv = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallConv = MF.getFunction().getCallingConv();
bool isHandler = (CallConv == CallingConv::AVR_INTR ||
CallConv == CallingConv::AVR_SIGNAL);
Modified: llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -1039,7 +1039,7 @@ SDValue AVRTargetLowering::LowerFormalAr
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
- analyzeArguments(nullptr, MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo,
+ analyzeArguments(nullptr, &MF.getFunction(), &DL, 0, &Ins, CallConv, ArgLocs, CCInfo,
false, isVarArg);
SDValue ArgValue;
@@ -1391,7 +1391,7 @@ AVRTargetLowering::LowerReturn(SDValue C
// Don't emit the ret/reti instruction when the naked attribute is present in
// the function being compiled.
- if (MF.getFunction()->getAttributes().hasAttribute(
+ if (MF.getFunction().getAttributes().hasAttribute(
AttributeList::FunctionIndex, Attribute::Naked)) {
return Chain;
}
Modified: llvm/trunk/lib/Target/AVR/AVRRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AVR/AVRRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AVR/AVRRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AVR/AVRRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -34,7 +34,7 @@ AVRRegisterInfo::AVRRegisterInfo() : AVR
const uint16_t *
AVRRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- CallingConv::ID CC = MF->getFunction()->getCallingConv();
+ CallingConv::ID CC = MF->getFunction().getCallingConv();
return ((CC == CallingConv::AVR_INTR || CC == CallingConv::AVR_SIGNAL)
? CSR_Interrupts_SaveList
Modified: llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -36,7 +36,7 @@ using namespace llvm;
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
- DiagnosticInfoUnsupported(*MF.getFunction(), Msg, DL.getDebugLoc()));
+ DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
}
static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg,
@@ -48,7 +48,7 @@ static void fail(const SDLoc &DL, Select
Val->print(OS);
OS.flush();
DAG.getContext()->diagnose(
- DiagnosticInfoUnsupported(*MF.getFunction(), Str, DL.getDebugLoc()));
+ DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc()));
}
BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
@@ -227,7 +227,7 @@ SDValue BPFTargetLowering::LowerFormalAr
}
}
- if (IsVarArg || MF.getFunction()->hasStructRetAttr()) {
+ if (IsVarArg || MF.getFunction().hasStructRetAttr()) {
fail(DL, DAG, "functions with VarArgs or StructRet are not supported");
}
@@ -382,7 +382,7 @@ BPFTargetLowering::LowerReturn(SDValue C
// CCState - Info about the registers and stack slot.
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
- if (MF.getFunction()->getReturnType()->isAggregateType()) {
+ if (MF.getFunction().getReturnType()->isAggregateType()) {
fail(DL, DAG, "only integer returns supported");
return DAG.getNode(Opc, DL, MVT::Other, Chain);
}
Modified: llvm/trunk/lib/Target/BPF/BPFRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/BPF/BPFRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/BPF/BPFRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/BPF/BPFRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -45,12 +45,12 @@ BitVector BPFRegisterInfo::getReservedRe
static void WarnSize(int Offset, MachineFunction &MF, DebugLoc& DL)
{
if (Offset <= -512) {
- auto F = MF.getFunction();
- DiagnosticInfoUnsupported DiagStackSize(*F,
+ const Function &F = MF.getFunction();
+ DiagnosticInfoUnsupported DiagStackSize(F,
"Looks like the BPF stack limit of 512 bytes is exceeded. "
"Please move large on stack variables into BPF per-cpu array map.\n",
DL);
- F->getContext().diagnose(DiagStackSize);
+ F.getContext().diagnose(DiagStackSize);
}
}
Modified: llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonBitSimplify.cpp Fri Dec 15 14:22:58 2017
@@ -2631,7 +2631,7 @@ bool BitSimplification::processBlock(Mac
}
bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto &HST = MF.getSubtarget<HexagonSubtarget>();
@@ -3181,7 +3181,7 @@ bool HexagonLoopRescheduling::processLoo
}
bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto &HST = MF.getSubtarget<HexagonSubtarget>();
Modified: llvm/trunk/lib/Target/Hexagon/HexagonBitTracker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonBitTracker.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonBitTracker.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonBitTracker.cpp Fri Dec 15 14:22:58 2017
@@ -61,7 +61,7 @@ HexagonEvaluator::HexagonEvaluator(const
// passed via registers.
unsigned InVirtReg, InPhysReg = 0;
- for (const Argument &Arg : MF.getFunction()->args()) {
+ for (const Argument &Arg : MF.getFunction().args()) {
Type *ATy = Arg.getType();
unsigned Width = 0;
if (ATy->isIntegerTy())
Modified: llvm/trunk/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonCFGOptimizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonCFGOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonCFGOptimizer.cpp Fri Dec 15 14:22:58 2017
@@ -114,7 +114,7 @@ bool HexagonCFGOptimizer::isOnFallThroug
}
bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
- if (skipFunction(*Fn.getFunction()))
+ if (skipFunction(Fn.getFunction()))
return false;
// Loop over all of the basic blocks.
Modified: llvm/trunk/lib/Target/Hexagon/HexagonConstExtenders.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonConstExtenders.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonConstExtenders.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonConstExtenders.cpp Fri Dec 15 14:22:58 2017
@@ -1831,7 +1831,7 @@ const MachineOperand &HCE::getStoredValu
}
bool HCE::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
DEBUG(MF.print(dbgs() << "Before " << getPassName() << '\n', nullptr));
Modified: llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonConstPropagation.cpp Fri Dec 15 14:22:58 2017
@@ -280,7 +280,7 @@ namespace {
public:
MachineConstEvaluator(MachineFunction &Fn)
: TRI(*Fn.getSubtarget().getRegisterInfo()),
- MF(Fn), CX(Fn.getFunction()->getContext()) {}
+ MF(Fn), CX(Fn.getFunction().getContext()) {}
virtual ~MachineConstEvaluator() = default;
// The required interface:
@@ -1890,10 +1890,8 @@ namespace {
}
bool runOnMachineFunction(MachineFunction &MF) override {
- const Function *F = MF.getFunction();
- if (!F)
- return false;
- if (skipFunction(*F))
+ const Function &F = MF.getFunction();
+ if (skipFunction(F))
return false;
HexagonConstEvaluator HCE(MF);
@@ -2925,7 +2923,7 @@ bool HexagonConstEvaluator::rewriteHexCo
DEBUG({
if (!NewInstrs.empty()) {
MachineFunction &MF = *MI.getParent()->getParent();
- dbgs() << "In function: " << MF.getFunction()->getName() << "\n";
+ dbgs() << "In function: " << MF.getName() << "\n";
dbgs() << "Rewrite: for " << MI << " created " << *NewInstrs[0];
for (unsigned i = 1; i < NewInstrs.size(); ++i)
dbgs() << " " << *NewInstrs[i];
Modified: llvm/trunk/lib/Target/Hexagon/HexagonCopyToCombine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonCopyToCombine.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonCopyToCombine.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonCopyToCombine.cpp Fri Dec 15 14:22:58 2017
@@ -459,7 +459,7 @@ HexagonCopyToCombine::findPotentialNewif
}
bool HexagonCopyToCombine::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (IsCombinesDisabled) return false;
@@ -471,8 +471,8 @@ bool HexagonCopyToCombine::runOnMachineF
TRI = ST->getRegisterInfo();
TII = ST->getInstrInfo();
- const Function *F = MF.getFunction();
- bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
+ const Function &F = MF.getFunction();
+ bool OptForSize = F.hasFnAttribute(Attribute::OptimizeForSize);
// Combine aggressively (for code size)
ShouldCombineAggressively =
Modified: llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp Fri Dec 15 14:22:58 2017
@@ -1047,7 +1047,7 @@ void HexagonEarlyIfConversion::simplifyF
}
bool HexagonEarlyIfConversion::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto &ST = MF.getSubtarget<HexagonSubtarget>();
Modified: llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp Fri Dec 15 14:22:58 2017
@@ -1243,7 +1243,7 @@ bool HexagonExpandCondsets::coalesceSegm
}
bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
HII = static_cast<const HexagonInstrInfo*>(MF.getSubtarget().getInstrInfo());
@@ -1253,7 +1253,7 @@ bool HexagonExpandCondsets::runOnMachine
MRI = &MF.getRegInfo();
DEBUG(LIS->print(dbgs() << "Before expand-condsets\n",
- MF.getFunction()->getParent()));
+ MF.getFunction().getParent()));
bool Changed = false;
std::set<unsigned> CoalUpd, PredUpd;
@@ -1281,7 +1281,7 @@ bool HexagonExpandCondsets::runOnMachine
KillUpd.insert(Op.getReg());
updateLiveness(KillUpd, false, true, false);
DEBUG(LIS->print(dbgs() << "After coalescing\n",
- MF.getFunction()->getParent()));
+ MF.getFunction().getParent()));
// First, simply split all muxes into a pair of conditional transfers
// and update the live intervals to reflect the new arrangement. The
@@ -1298,7 +1298,7 @@ bool HexagonExpandCondsets::runOnMachine
// (because of predicated defs), so make sure they are left untouched.
// Predication does not use live intervals.
DEBUG(LIS->print(dbgs() << "After splitting\n",
- MF.getFunction()->getParent()));
+ MF.getFunction().getParent()));
// Traverse all blocks and collapse predicable instructions feeding
// conditional transfers into predicated instructions.
@@ -1307,7 +1307,7 @@ bool HexagonExpandCondsets::runOnMachine
for (auto &B : MF)
Changed |= predicateInBlock(B, PredUpd);
DEBUG(LIS->print(dbgs() << "After predicating\n",
- MF.getFunction()->getParent()));
+ MF.getFunction().getParent()));
PredUpd.insert(CoalUpd.begin(), CoalUpd.end());
updateLiveness(PredUpd, true, true, true);
@@ -1315,7 +1315,7 @@ bool HexagonExpandCondsets::runOnMachine
DEBUG({
if (Changed)
LIS->print(dbgs() << "After expand-condsets\n",
- MF.getFunction()->getParent());
+ MF.getFunction().getParent());
});
return Changed;
Modified: llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp Fri Dec 15 14:22:58 2017
@@ -89,7 +89,7 @@ static bool isHardwareLoop(const Machine
}
bool HexagonFixupHwLoops::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
return fixupLoopInstrs(MF);
}
Modified: llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -225,7 +225,7 @@ namespace {
bool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) {
auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering();
bool NeedCFI = MF.getMMI().hasDebugInfo() ||
- MF.getFunction()->needsUnwindTableEntry();
+ MF.getFunction().needsUnwindTableEntry();
if (!NeedCFI)
return false;
@@ -375,17 +375,17 @@ static bool isRestoreCall(unsigned Opc)
}
static inline bool isOptNone(const MachineFunction &MF) {
- return MF.getFunction()->hasFnAttribute(Attribute::OptimizeNone) ||
+ return MF.getFunction().hasFnAttribute(Attribute::OptimizeNone) ||
MF.getTarget().getOptLevel() == CodeGenOpt::None;
}
static inline bool isOptSize(const MachineFunction &MF) {
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
return F.optForSize() && !F.optForMinSize();
}
static inline bool isMinSize(const MachineFunction &MF) {
- return MF.getFunction()->optForMinSize();
+ return MF.getFunction().optForMinSize();
}
/// Implements shrink-wrapping of the stack frame. By default, stack frame
@@ -960,7 +960,7 @@ void HexagonFrameLowering::insertCFIInst
}
bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
return false;
auto &MFI = MF.getFrameInfo();
@@ -1396,8 +1396,7 @@ static void dump_registers(BitVector &Re
bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const {
- DEBUG(dbgs() << __func__ << " on "
- << MF.getFunction()->getName() << '\n');
+ DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n');
MachineFrameInfo &MFI = MF.getFrameInfo();
BitVector SRegs(Hexagon::NUM_TARGET_REGS);
Modified: llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonGenInsert.cpp Fri Dec 15 14:22:58 2017
@@ -1482,7 +1482,7 @@ bool HexagonGenInsert::removeDeadCode(Ma
}
bool HexagonGenInsert::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool Timing = OptTiming, TimingDetail = Timing && OptTimingDetail;
Modified: llvm/trunk/lib/Target/Hexagon/HexagonGenMux.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonGenMux.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonGenMux.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonGenMux.cpp Fri Dec 15 14:22:58 2017
@@ -368,7 +368,7 @@ bool HexagonGenMux::genMuxInBlock(Machin
}
bool HexagonGenMux::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
Modified: llvm/trunk/lib/Target/Hexagon/HexagonGenPredicate.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonGenPredicate.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonGenPredicate.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonGenPredicate.cpp Fri Dec 15 14:22:58 2017
@@ -492,7 +492,7 @@ bool HexagonGenPredicate::eliminatePredC
}
bool HexagonGenPredicate::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
Modified: llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonHardwareLoops.cpp Fri Dec 15 14:22:58 2017
@@ -377,7 +377,7 @@ FunctionPass *llvm::createHexagonHardwar
bool HexagonHardwareLoops::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********* Hexagon Hardware Loops *********\n");
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool Changed = false;
Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp Fri Dec 15 14:22:58 2017
@@ -1746,10 +1746,10 @@ unsigned HexagonDAGToDAGISel::getUsesInF
return GAUsesInFunction[V];
unsigned Result = 0;
- const Function *CurF = CurDAG->getMachineFunction().getFunction();
+ const Function &CurF = CurDAG->getMachineFunction().getFunction();
for (const User *U : V->users()) {
if (isa<Instruction>(U) &&
- cast<Instruction>(U)->getParent()->getParent() == CurF)
+ cast<Instruction>(U)->getParent()->getParent() == &CurF)
++Result;
}
Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -717,12 +717,12 @@ HexagonTargetLowering::LowerCall(TargetL
else
CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
- auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
if (Attr.getValueAsString() == "true")
IsTailCall = false;
if (IsTailCall) {
- bool StructAttrFlag = MF.getFunction()->hasStructRetAttr();
+ bool StructAttrFlag = MF.getFunction().hasStructRetAttr();
IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
IsVarArg, IsStructRet,
StructAttrFlag,
@@ -3006,8 +3006,8 @@ bool HexagonTargetLowering::IsEligibleFo
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
- const Function *CallerF = DAG.getMachineFunction().getFunction();
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ const Function &CallerF = DAG.getMachineFunction().getFunction();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
// ***************************************************************************
Modified: llvm/trunk/lib/Target/Hexagon/HexagonMachineScheduler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonMachineScheduler.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonMachineScheduler.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonMachineScheduler.cpp Fri Dec 15 14:22:58 2017
@@ -188,7 +188,7 @@ bool VLIWResourceModel::reserveResources
void VLIWMachineScheduler::schedule() {
DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "
<< printMBBReference(*BB) << " " << BB->getName() << " in_func "
- << BB->getParent()->getFunction()->getName() << " at loop depth "
+ << BB->getParent()->getName() << " at loop depth "
<< MLI->getLoopDepth(BB) << " \n");
buildDAGWithRegPressure();
Modified: llvm/trunk/lib/Target/Hexagon/HexagonNewValueJump.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonNewValueJump.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonNewValueJump.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonNewValueJump.cpp Fri Dec 15 14:22:58 2017
@@ -434,7 +434,7 @@ bool HexagonNewValueJump::runOnMachineFu
DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n"
<< "********** Function: " << MF.getName() << "\n");
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// If we move NewValueJump before register allocation we'll need live variable
Modified: llvm/trunk/lib/Target/Hexagon/HexagonOptAddrMode.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonOptAddrMode.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonOptAddrMode.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonOptAddrMode.cpp Fri Dec 15 14:22:58 2017
@@ -595,7 +595,7 @@ bool HexagonOptAddrMode::processBlock(No
}
bool HexagonOptAddrMode::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool Changed = false;
Modified: llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonPeephole.cpp Fri Dec 15 14:22:58 2017
@@ -108,7 +108,7 @@ INITIALIZE_PASS(HexagonPeephole, "hexago
false, false)
bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
QII = static_cast<const HexagonInstrInfo *>(MF.getSubtarget().getInstrInfo());
Modified: llvm/trunk/lib/Target/Hexagon/HexagonRDFOpt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonRDFOpt.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonRDFOpt.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonRDFOpt.cpp Fri Dec 15 14:22:58 2017
@@ -280,7 +280,7 @@ bool HexagonDCE::rewrite(NodeAddr<InstrN
}
bool HexagonRDFOpt::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (RDFLimit.getPosition()) {
Modified: llvm/trunk/lib/Target/Hexagon/HexagonSplitDouble.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonSplitDouble.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonSplitDouble.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonSplitDouble.cpp Fri Dec 15 14:22:58 2017
@@ -1163,7 +1163,7 @@ bool HexagonSplitDoubleRegs::runOnMachin
DEBUG(dbgs() << "Splitting double registers in function: "
<< MF.getName() << '\n');
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
auto &ST = MF.getSubtarget<HexagonSubtarget>();
Modified: llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonStoreWidening.cpp Fri Dec 15 14:22:58 2017
@@ -585,7 +585,7 @@ bool HexagonStoreWidening::processBasicB
}
bool HexagonStoreWidening::runOnMachineFunction(MachineFunction &MFn) {
- if (skipFunction(*MFn.getFunction()))
+ if (skipFunction(MFn.getFunction()))
return false;
MF = &MFn;
Modified: llvm/trunk/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp Fri Dec 15 14:22:58 2017
@@ -199,7 +199,7 @@ static MachineBasicBlock::iterator moveI
}
bool HexagonPacketizer::runOnMachineFunction(MachineFunction &MF) {
- if (DisablePacketizer || skipFunction(*MF.getFunction()))
+ if (DisablePacketizer || skipFunction(MF.getFunction()))
return false;
HII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
Modified: llvm/trunk/lib/Target/Hexagon/RDFGraph.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/RDFGraph.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/RDFGraph.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/RDFGraph.cpp Fri Dec 15 14:22:58 2017
@@ -766,7 +766,7 @@ unsigned DataFlowGraph::DefStack::nextDo
RegisterSet DataFlowGraph::getLandingPadLiveIns() const {
RegisterSet LR;
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn()
: nullptr;
const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
Modified: llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -513,7 +513,7 @@ SDValue LanaiTargetLowering::LowerCCCArg
// The Lanai ABI for returning structs by value requires that we copy
// the sret argument into rv for the return. Save the argument into
// a virtual register so that we can access it from the return points.
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
unsigned Reg = LanaiMFI->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
@@ -568,7 +568,7 @@ LanaiTargetLowering::LowerReturn(SDValue
// the sret argument into rv for the return. We saved the argument into
// a virtual register in the entry block, so now we copy the value out
// and into rv.
- if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
+ if (DAG.getMachineFunction().getFunction().hasStructRetAttr()) {
MachineFunction &MF = DAG.getMachineFunction();
LanaiMachineFunctionInfo *LanaiMFI = MF.getInfo<LanaiMachineFunctionInfo>();
unsigned Reg = LanaiMFI->getSRetReturnReg();
Modified: llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -746,7 +746,7 @@ MSP430TargetLowering::LowerReturn(SDValu
RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
}
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>();
unsigned Reg = FuncInfo->getSRetReturnReg();
Modified: llvm/trunk/lib/Target/MSP430/MSP430RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/MSP430/MSP430RegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/MSP430/MSP430RegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/MSP430/MSP430RegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -38,7 +38,7 @@ MSP430RegisterInfo::MSP430RegisterInfo()
const MCPhysReg*
MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const MSP430FrameLowering *TFI = getFrameLowering(*MF);
- const Function* F = MF->getFunction();
+ const Function* F = &MF->getFunction();
static const MCPhysReg CalleeSavedRegs[] = {
MSP430::FP, MSP430::R5, MSP430::R6, MSP430::R7,
MSP430::R8, MSP430::R9, MSP430::R10,
Modified: llvm/trunk/lib/Target/Mips/MipsAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsAsmPrinter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsAsmPrinter.cpp Fri Dec 15 14:22:58 2017
@@ -381,7 +381,7 @@ void MipsAsmPrinter::EmitFunctionBodySta
MCInstLowering.Initialize(&MF->getContext());
- bool IsNakedFunction = MF->getFunction()->hasFnAttribute(Attribute::Naked);
+ bool IsNakedFunction = MF->getFunction().hasFnAttribute(Attribute::Naked);
if (!IsNakedFunction)
emitFrameDirective();
Modified: llvm/trunk/lib/Target/Mips/MipsCCState.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsCCState.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsCCState.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsCCState.cpp Fri Dec 15 14:22:58 2017
@@ -101,9 +101,9 @@ void MipsCCState::PreAnalyzeReturnForF12
const MachineFunction &MF = getMachineFunction();
for (unsigned i = 0; i < Outs.size(); ++i) {
OriginalArgWasF128.push_back(
- originalTypeIsF128(MF.getFunction()->getReturnType(), nullptr));
+ originalTypeIsF128(MF.getFunction().getReturnType(), nullptr));
OriginalArgWasFloat.push_back(
- MF.getFunction()->getReturnType()->isFloatingPointTy());
+ MF.getFunction().getReturnType()->isFloatingPointTy());
}
}
@@ -149,7 +149,7 @@ void MipsCCState::PreAnalyzeFormalArgume
const SmallVectorImpl<ISD::InputArg> &Ins) {
const MachineFunction &MF = getMachineFunction();
for (unsigned i = 0; i < Ins.size(); ++i) {
- Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
// SRet arguments cannot originate from f128 or {f128} returns so we just
// push false. We have to handle this specially since SRet arguments
@@ -161,7 +161,7 @@ void MipsCCState::PreAnalyzeFormalArgume
continue;
}
- assert(Ins[i].getOrigArgIndex() < MF.getFunction()->arg_size());
+ assert(Ins[i].getOrigArgIndex() < MF.getFunction().arg_size());
std::advance(FuncArg, Ins[i].getOrigArgIndex());
OriginalArgWasF128.push_back(
Modified: llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp Fri Dec 15 14:22:58 2017
@@ -1661,7 +1661,7 @@ void MipsConstantIslands::prescanForCons
int64_t V = Literal.getImm();
DEBUG(dbgs() << "literal " << V << "\n");
Type *Int32Ty =
- Type::getInt32Ty(MF->getFunction()->getContext());
+ Type::getInt32Ty(MF->getFunction().getContext());
const Constant *C = ConstantInt::get(Int32Ty, V);
unsigned index = MCP->getConstantPoolIndex(C, 4);
I->getOperand(2).ChangeToImmediate(index);
Modified: llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -3359,10 +3359,10 @@ SDValue MipsTargetLowering::LowerFormalA
MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
- const Function *Func = DAG.getMachineFunction().getFunction();
- Function::const_arg_iterator FuncArg = Func->arg_begin();
+ const Function &Func = DAG.getMachineFunction().getFunction();
+ Function::const_arg_iterator FuncArg = Func.arg_begin();
- if (Func->hasFnAttribute("interrupt") && !Func->arg_empty())
+ if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
report_fatal_error(
"Functions with the interrupt attribute cannot have arguments!");
@@ -3600,7 +3600,7 @@ MipsTargetLowering::LowerReturn(SDValue
// the sret argument into $v0 for the return. We saved the argument into
// a virtual register in the entry block, so now we copy the value out
// and into $v0.
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
unsigned Reg = MipsFI->getSRetReturnReg();
@@ -3622,7 +3622,7 @@ MipsTargetLowering::LowerReturn(SDValue
RetOps.push_back(Flag);
// ISRs must use "eret".
- if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt"))
+ if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
return LowerInterruptReturn(RetOps, DL, DAG);
// Standard return on Mips is a "jr $ra"
Modified: llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -93,8 +93,8 @@ MipsRegisterInfo::getRegPressureLimit(co
const MCPhysReg *
MipsRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const MipsSubtarget &Subtarget = MF->getSubtarget<MipsSubtarget>();
- const Function *F = MF->getFunction();
- if (F->hasFnAttribute("interrupt")) {
+ const Function &F = MF->getFunction();
+ if (F.hasFnAttribute("interrupt")) {
if (Subtarget.hasMips64())
return Subtarget.hasMips64r6() ? CSR_Interrupt_64R6_SaveList
: CSR_Interrupt_64_SaveList;
@@ -238,7 +238,7 @@ getReservedRegs(const MachineFunction &M
Reserved.set(Mips::RA_64);
Reserved.set(Mips::T0);
Reserved.set(Mips::T1);
- if (MF.getFunction()->hasFnAttribute("saveS2") || MipsFI->hasSaveS2())
+ if (MF.getFunction().hasFnAttribute("saveS2") || MipsFI->hasSaveS2())
Reserved.set(Mips::S2);
}
Modified: llvm/trunk/lib/Target/Mips/MipsSEFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsSEFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsSEFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsSEFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -434,7 +434,7 @@ void MipsSEFrameLowering::emitPrologue(M
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
- if (MF.getFunction()->hasFnAttribute("interrupt"))
+ if (MF.getFunction().hasFnAttribute("interrupt"))
emitInterruptPrologueStub(MF, MBB);
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
@@ -582,7 +582,7 @@ void MipsSEFrameLowering::emitInterruptP
// Perform ISR handling like GCC
StringRef IntKind =
- MF.getFunction()->getFnAttribute("interrupt").getValueAsString();
+ MF.getFunction().getFnAttribute("interrupt").getValueAsString();
const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
// EIC interrupt handling needs to read the Cause register to disable
@@ -726,7 +726,7 @@ void MipsSEFrameLowering::emitEpilogue(M
}
}
- if (MF.getFunction()->hasFnAttribute("interrupt"))
+ if (MF.getFunction().hasFnAttribute("interrupt"))
emitInterruptEpilogueStub(MF, MBB);
// Get the number of bytes from FrameInfo
@@ -809,8 +809,8 @@ spillCalleeSavedRegisters(MachineBasicBl
// spilled to the stack frame.
bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 ||
Reg == Mips::HI0 || Reg == Mips::HI0_64);
- const Function *Func = MBB.getParent()->getFunction();
- if (IsLOHI && Func->hasFnAttribute("interrupt")) {
+ const Function &Func = MBB.getParent()->getFunction();
+ if (IsLOHI && Func.hasFnAttribute("interrupt")) {
DebugLoc DL = MI->getDebugLoc();
unsigned Op = 0;
Modified: llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp Fri Dec 15 14:22:58 2017
@@ -161,7 +161,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseR
// lui $v0, %hi(%neg(%gp_rel(fname)))
// daddu $v1, $v0, $t9
// daddiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
- const GlobalValue *FName = MF.getFunction();
+ const GlobalValue *FName = &MF.getFunction();
BuildMI(MBB, I, DL, TII.get(Mips::LUi64), V0)
.addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
BuildMI(MBB, I, DL, TII.get(Mips::DADDu), V1).addReg(V0)
@@ -190,7 +190,7 @@ void MipsSEDAGToDAGISel::initGlobalBaseR
// lui $v0, %hi(%neg(%gp_rel(fname)))
// addu $v1, $v0, $t9
// addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname)))
- const GlobalValue *FName = MF.getFunction();
+ const GlobalValue *FName = &MF.getFunction();
BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0)
.addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI);
BuildMI(MBB, I, DL, TII.get(Mips::ADDu), V1).addReg(V0).addReg(Mips::T9);
@@ -1247,7 +1247,7 @@ bool MipsSEDAGToDAGISel::trySelect(SDNod
// handled by the ldi case.
if (ResNonZero) {
IntegerType *Int32Ty =
- IntegerType::get(MF->getFunction()->getContext(), 32);
+ IntegerType::get(MF->getFunction().getContext(), 32);
const ConstantInt *Const32 = ConstantInt::get(Int32Ty, 32);
SDValue Ops[4] = {HiResNonZero ? SDValue(HiRes, 0) : Zero64Val,
CurDAG->getConstant(*Const32, DL, MVT::i32),
Modified: llvm/trunk/lib/Target/Mips/MipsSEInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsSEInstrInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsSEInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsSEInstrInfo.cpp Fri Dec 15 14:22:58 2017
@@ -231,8 +231,8 @@ storeRegToStack(MachineBasicBlock &MBB,
// Hi, Lo are normally caller save but they are callee save
// for interrupt handling.
- const Function *Func = MBB.getParent()->getFunction();
- if (Func->hasFnAttribute("interrupt")) {
+ const Function &Func = MBB.getParent()->getFunction();
+ if (Func.hasFnAttribute("interrupt")) {
if (Mips::HI32RegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(Mips::MFHI), Mips::K0);
SrcReg = Mips::K0;
@@ -262,8 +262,8 @@ loadRegFromStack(MachineBasicBlock &MBB,
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
unsigned Opc = 0;
- const Function *Func = MBB.getParent()->getFunction();
- bool ReqIndirectLoad = Func->hasFnAttribute("interrupt") &&
+ const Function &Func = MBB.getParent()->getFunction();
+ bool ReqIndirectLoad = Func.hasFnAttribute("interrupt") &&
(DestReg == Mips::LO0 || DestReg == Mips::LO0_64 ||
DestReg == Mips::HI0 || DestReg == Mips::HI0_64);
Modified: llvm/trunk/lib/Target/Mips/MipsTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsTargetMachine.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsTargetMachine.cpp Fri Dec 15 14:22:58 2017
@@ -200,7 +200,7 @@ MipsTargetMachine::getSubtargetImpl(cons
void MipsTargetMachine::resetSubtarget(MachineFunction *MF) {
DEBUG(dbgs() << "resetSubtarget\n");
- Subtarget = const_cast<MipsSubtarget *>(getSubtargetImpl(*MF->getFunction()));
+ Subtarget = const_cast<MipsSubtarget *>(getSubtargetImpl(MF->getFunction()));
MF->setSubtarget(Subtarget);
}
Modified: llvm/trunk/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXAsmPrinter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXAsmPrinter.cpp Fri Dec 15 14:22:58 2017
@@ -457,8 +457,8 @@ void NVPTXAsmPrinter::printReturnValStr(
void NVPTXAsmPrinter::printReturnValStr(const MachineFunction &MF,
raw_ostream &O) {
- const Function *F = MF.getFunction();
- printReturnValStr(F, O);
+ const Function &F = MF.getFunction();
+ printReturnValStr(&F, O);
}
// Return true if MBB is the header of a loop marked with
@@ -502,13 +502,13 @@ void NVPTXAsmPrinter::EmitFunctionEntryL
raw_svector_ostream O(Str);
if (!GlobalsEmitted) {
- emitGlobals(*MF->getFunction()->getParent());
+ emitGlobals(*MF->getFunction().getParent());
GlobalsEmitted = true;
}
// Set up
MRI = &MF->getRegInfo();
- F = MF->getFunction();
+ F = &MF->getFunction();
emitLinkageDirective(F, O);
if (isKernelFunction(*F))
O << ".entry ";
@@ -536,7 +536,7 @@ void NVPTXAsmPrinter::EmitFunctionBodySt
SmallString<128> Str;
raw_svector_ostream O(Str);
- emitDemotedVars(MF->getFunction(), O);
+ emitDemotedVars(&MF->getFunction(), O);
OutStreamer->EmitRawText(O.str());
}
@@ -1708,8 +1708,8 @@ void NVPTXAsmPrinter::emitFunctionParamL
void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction &MF,
raw_ostream &O) {
- const Function *F = MF.getFunction();
- emitFunctionParamList(F, O);
+ const Function &F = MF.getFunction();
+ emitFunctionParamList(&F, O);
}
void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
@@ -2156,7 +2156,7 @@ NVPTXAsmPrinter::lowerConstantForGV(cons
raw_string_ostream OS(S);
OS << "Unsupported expression in static initializer: ";
CE->printAsOperand(OS, /*PrintType=*/false,
- !MF ? nullptr : MF->getFunction()->getParent());
+ !MF ? nullptr : MF->getFunction().getParent());
report_fatal_error(OS.str());
}
@@ -2170,7 +2170,7 @@ NVPTXAsmPrinter::lowerConstantForGV(cons
raw_string_ostream OS(S);
OS << "Unsupported expression in static initializer: ";
CE->printAsOperand(OS, /*PrintType=*/ false,
- !MF ? nullptr : MF->getFunction()->getParent());
+ !MF ? nullptr : MF->getFunction().getParent());
report_fatal_error(OS.str());
}
Modified: llvm/trunk/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp Fri Dec 15 14:22:58 2017
@@ -1003,7 +1003,7 @@ static bool canLowerToLDG(MemSDNode *N,
return true;
// Load wasn't explicitly invariant. Attempt to infer invariance.
- if (!isKernelFunction(*F->getFunction()))
+ if (!isKernelFunction(F->getFunction()))
return false;
// We use GetUnderlyingObjects() here instead of
Modified: llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -123,10 +123,10 @@ bool NVPTXTargetLowering::useF32FTZ(cons
// If nvptx-f32ftz is used on the command-line, always honor it
return FtzEnabled;
} else {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// Otherwise, check for an nvptx-f32ftz attribute on the function
- if (F->hasFnAttribute("nvptx-f32ftz"))
- return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
+ if (F.hasFnAttribute("nvptx-f32ftz"))
+ return F.getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
else
return false;
}
@@ -2329,7 +2329,7 @@ SDValue NVPTXTargetLowering::LowerFormal
const DataLayout &DL = DAG.getDataLayout();
auto PtrVT = getPointerTy(DAG.getDataLayout());
- const Function *F = MF.getFunction();
+ const Function *F = &MF.getFunction();
const AttributeList &PAL = F->getAttributes();
const TargetLowering *TLI = STI.getTargetLowering();
@@ -2525,7 +2525,7 @@ NVPTXTargetLowering::LowerReturn(SDValue
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- Type *RetTy = MF.getFunction()->getReturnType();
+ Type *RetTy = MF.getFunction().getReturnType();
bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
@@ -4022,9 +4022,9 @@ bool NVPTXTargetLowering::allowUnsafeFPM
return true;
// Allow unsafe math if unsafe-fp-math attribute explicitly says so.
- const Function *F = MF.getFunction();
- if (F->hasFnAttribute("unsafe-fp-math")) {
- Attribute Attr = F->getFnAttribute("unsafe-fp-math");
+ const Function &F = MF.getFunction();
+ if (F.hasFnAttribute("unsafe-fp-math")) {
+ Attribute Attr = F.getFnAttribute("unsafe-fp-math");
StringRef Val = Attr.getValueAsString();
if (Val == "true")
return true;
Modified: llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXPeephole.cpp Fri Dec 15 14:22:58 2017
@@ -125,7 +125,7 @@ static void CombineCVTAToLocal(MachineIn
}
bool NVPTXPeephole::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool Changed = false;
Modified: llvm/trunk/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp Fri Dec 15 14:22:58 2017
@@ -158,7 +158,7 @@ findIndexForHandle(MachineOperand &Op, M
unsigned Param = atoi(Sym.data()+ParamBaseName.size());
std::string NewSym;
raw_string_ostream NewSymStr(NewSym);
- NewSymStr << MF.getFunction()->getName() << "_param_" << Param;
+ NewSymStr << MF.getName() << "_param_" << Param;
InstrsToRemove.insert(&TexHandleDef);
Idx = MFI->getImageHandleSymbolIndex(NewSymStr.str().c_str());
Modified: llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp Fri Dec 15 14:22:58 2017
@@ -507,7 +507,7 @@ void PPCAsmPrinter::EmitInstruction(cons
MCInst TmpInst;
bool isPPC64 = Subtarget->isPPC64();
bool isDarwin = TM.getTargetTriple().isOSDarwin();
- const Module *M = MF->getFunction()->getParent();
+ const Module *M = MF->getFunction().getParent();
PICLevel::Level PL = M->getPICLevel();
// Lower multi-instruction pseudo operations.
@@ -1228,7 +1228,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEnt
// linux/ppc32 - Normal entry label.
if (!Subtarget->isPPC64() &&
(!isPositionIndependent() ||
- MF->getFunction()->getParent()->getPICLevel() == PICLevel::SmallPIC))
+ MF->getFunction().getParent()->getPICLevel() == PICLevel::SmallPIC))
return AsmPrinter::EmitFunctionEntryLabel();
if (!Subtarget->isPPC64()) {
Modified: llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCBranchCoalescing.cpp Fri Dec 15 14:22:58 2017
@@ -714,7 +714,7 @@ bool PPCBranchCoalescing::mergeCandidate
bool PPCBranchCoalescing::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()) || MF.empty())
+ if (skipFunction(MF.getFunction()) || MF.empty())
return false;
bool didSomething = false;
Modified: llvm/trunk/lib/Target/PowerPC/PPCEarlyReturn.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCEarlyReturn.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCEarlyReturn.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCEarlyReturn.cpp Fri Dec 15 14:22:58 2017
@@ -173,7 +173,7 @@ protected:
public:
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
TII = MF.getSubtarget().getInstrInfo();
Modified: llvm/trunk/lib/Target/PowerPC/PPCFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -434,7 +434,7 @@ unsigned PPCFrameLowering::determineFram
const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned LR = RegInfo->getRARegister();
- bool DisableRedZone = MF.getFunction()->hasFnAttribute(Attribute::NoRedZone);
+ bool DisableRedZone = MF.getFunction().hasFnAttribute(Attribute::NoRedZone);
bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca.
!MFI.adjustsStack() && // No calls.
!MustSaveLR(MF, LR) && // No need to save LR.
@@ -499,7 +499,7 @@ bool PPCFrameLowering::needsFP(const Mac
// Naked functions have no stack frame pushed, so we don't have a frame
// pointer.
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
return false;
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
@@ -692,7 +692,7 @@ void PPCFrameLowering::emitPrologue(Mach
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
DebugLoc dl;
bool needsCFI = MMI.hasDebugInfo() ||
- MF.getFunction()->needsUnwindTableEntry();
+ MF.getFunction().needsUnwindTableEntry();
// Get processor type.
bool isPPC64 = Subtarget.isPPC64();
@@ -1505,7 +1505,7 @@ void PPCFrameLowering::emitEpilogue(Mach
unsigned RetOpcode = MBBI->getOpcode();
if (MF.getTarget().Options.GuaranteedTailCallOpt &&
(RetOpcode == PPC::BLR || RetOpcode == PPC::BLR8) &&
- MF.getFunction()->getCallingConv() == CallingConv::Fast) {
+ MF.getFunction().getCallingConv() == CallingConv::Fast) {
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
unsigned CallerAllocatedAmt = FI->getMinReservedArea();
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelDAGToDAG.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelDAGToDAG.cpp Fri Dec 15 14:22:58 2017
@@ -391,7 +391,7 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseRe
// Insert the set of GlobalBaseReg into the first MBB of the function
MachineBasicBlock &FirstMBB = MF->front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- const Module *M = MF->getFunction()->getParent();
+ const Module *M = MF->getFunction().getParent();
DebugLoc dl;
if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) == MVT::i32) {
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -2573,7 +2573,7 @@ SDValue PPCTargetLowering::LowerGlobalTL
const GlobalValue *GV = GA->getGlobal();
EVT PtrVT = getPointerTy(DAG.getDataLayout());
bool is64bit = Subtarget.isPPC64();
- const Module *M = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *M = DAG.getMachineFunction().getFunction().getParent();
PICLevel::Level picLevel = M->getPICLevel();
TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
@@ -3542,7 +3542,7 @@ SDValue PPCTargetLowering::LowerFormalAr
unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
unsigned &QFPR_idx = FPR_idx;
SmallVector<SDValue, 8> MemOps;
- Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
SDValue ArgVal;
@@ -3986,7 +3986,7 @@ SDValue PPCTargetLowering::LowerFormalAr
SmallVector<SDValue, 8> MemOps;
unsigned nAltivecParamsAtEnd = 0;
- Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
+ Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
unsigned CurArgIdx = 0;
for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
SDValue ArgVal;
@@ -4422,9 +4422,9 @@ PPCTargetLowering::IsEligibleForTailCall
// Variadic argument functions are not supported.
if (isVarArg) return false;
- auto *Caller = DAG.getMachineFunction().getFunction();
+ auto &Caller = DAG.getMachineFunction().getFunction();
// Check that the calling conventions are compatible for tco.
- if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), CalleeCC))
+ if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
return false;
// Caller contains any byval parameter is not supported.
@@ -4446,7 +4446,7 @@ PPCTargetLowering::IsEligibleForTailCall
// If the caller and callee potentially have different TOC bases then we
// cannot tail call since we need to restore the TOC pointer after the call.
// ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
- if (!callsShareTOCBase(Caller, Callee, getTargetMachine()))
+ if (!callsShareTOCBase(&Caller, Callee, getTargetMachine()))
return false;
// TCO allows altering callee ABI, so we don't have to check further.
@@ -4458,7 +4458,7 @@ PPCTargetLowering::IsEligibleForTailCall
// If callee use the same argument list that caller is using, then we can
// apply SCO on this case. If it is not, then we need to check if callee needs
// stack for passing arguments.
- if (!hasSameArgumentList(Caller, CS) &&
+ if (!hasSameArgumentList(&Caller, CS) &&
needStackSlotPassParameters(Subtarget, Outs)) {
return false;
}
@@ -4483,7 +4483,7 @@ PPCTargetLowering::IsEligibleForTailCall
return false;
MachineFunction &MF = DAG.getMachineFunction();
- CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
// Functions containing by val parameters are not supported.
for (unsigned i = 0; i != Ins.size(); i++) {
@@ -4735,7 +4735,7 @@ PrepareCall(SelectionDAG &DAG, SDValue &
// we're building with the leopard linker or later, which automatically
// synthesizes these stubs.
const TargetMachine &TM = DAG.getTarget();
- const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
const GlobalValue *GV = nullptr;
if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee))
GV = G->getGlobal();
@@ -5028,7 +5028,7 @@ SDValue PPCTargetLowering::FinishCall(
// any other variadic arguments).
Ops.insert(std::next(Ops.begin()), AddTOC);
} else if (CallOpc == PPCISD::CALL &&
- !callsShareTOCBase(MF.getFunction(), Callee, DAG.getTarget())) {
+ !callsShareTOCBase(&MF.getFunction(), Callee, DAG.getTarget())) {
// Otherwise insert NOP for non-local calls.
CallOpc = PPCISD::CALL_NOP;
}
@@ -9797,7 +9797,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(Mach
// Naked functions never have a base pointer, and so we use r1. For all
// other functions, this decision must be delayed until during PEI.
unsigned BaseReg;
- if (MF->getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF->getFunction().hasFnAttribute(Attribute::Naked))
BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
else
BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
@@ -13251,7 +13251,7 @@ SDValue PPCTargetLowering::LowerFRAMEADD
// Naked functions never have a frame pointer, and so we use r1. For all
// other functions, this decision must be delayed until during PEI.
unsigned FrameReg;
- if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
+ if (MF.getFunction().hasFnAttribute(Attribute::Naked))
FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
else
FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
@@ -13495,12 +13495,12 @@ EVT PPCTargetLowering::getOptimalMemOpTy
bool MemcpyStrSrc,
MachineFunction &MF) const {
if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// When expanding a memset, require at least two QPX instructions to cover
// the cost of loading the value to be stored from the constant pool.
if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
(!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
- !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ !F.hasFnAttribute(Attribute::NoImplicitFloat)) {
return MVT::v4f64;
}
@@ -13719,7 +13719,7 @@ void PPCTargetLowering::insertCopiesSpli
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h Fri Dec 15 14:22:58 2017
@@ -586,8 +586,8 @@ namespace llvm {
bool supportSplitCSR(MachineFunction *MF) const override {
return
- MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
Modified: llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp Fri Dec 15 14:22:58 2017
@@ -3133,7 +3133,7 @@ PPCInstrInfo::isSignOrZeroExtended(const
const PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>();
// We check the ZExt/SExt flags for a method parameter.
if (MI.getParent()->getBasicBlock() ==
- &MF->getFunction()->getEntryBlock()) {
+ &MF->getFunction().getEntryBlock()) {
unsigned VReg = MI.getOperand(0).getReg();
if (MF->getRegInfo().isLiveIn(VReg))
return SignExt ? FuncInfo->isLiveInSExt(VReg) :
Modified: llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCMIPeephole.cpp Fri Dec 15 14:22:58 2017
@@ -106,7 +106,7 @@ public:
// Main entry point for this pass.
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
initialize(MF);
return simplifyCode();
Modified: llvm/trunk/lib/Target/PowerPC/PPCPreEmitPeephole.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCPreEmitPeephole.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCPreEmitPeephole.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCPreEmitPeephole.cpp Fri Dec 15 14:22:58 2017
@@ -56,7 +56,7 @@ namespace {
}
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()) || !RunPreEmitPeephole)
+ if (skipFunction(MF.getFunction()) || !RunPreEmitPeephole)
return false;
bool Changed = false;
const PPCInstrInfo *TII = MF.getSubtarget<PPCSubtarget>().getInstrInfo();
Modified: llvm/trunk/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCQPXLoadSplat.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCQPXLoadSplat.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCQPXLoadSplat.cpp Fri Dec 15 14:22:58 2017
@@ -60,7 +60,7 @@ FunctionPass *llvm::createPPCQPXLoadSpla
}
bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
bool MadeChange = false;
Modified: llvm/trunk/lib/Target/PowerPC/PPCReduceCRLogicals.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCReduceCRLogicals.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCReduceCRLogicals.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCReduceCRLogicals.cpp Fri Dec 15 14:22:58 2017
@@ -211,7 +211,7 @@ public:
MachineInstr *lookThroughCRCopy(unsigned Reg, unsigned &Subreg,
MachineInstr *&CpDef);
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// If the subtarget doesn't use CR bits, there's nothing to do.
Modified: llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -123,7 +123,7 @@ PPCRegisterInfo::getPointerRegClass(cons
const MCPhysReg*
PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>();
- if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg) {
+ if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) {
if (Subtarget.hasVSX())
return CSR_64_AllRegs_VSX_SaveList;
if (Subtarget.hasAltivec())
@@ -161,7 +161,7 @@ PPCRegisterInfo::getCalleeSavedRegsViaCo
return nullptr;
if (!TM.isPPC64())
return nullptr;
- if (MF->getFunction()->getCallingConv() != CallingConv::CXX_FAST_TLS)
+ if (MF->getFunction().getCallingConv() != CallingConv::CXX_FAST_TLS)
return nullptr;
if (!MF->getInfo<PPCFunctionInfo>()->isSplitCSR())
return nullptr;
@@ -901,7 +901,7 @@ PPCRegisterInfo::eliminateFrameIndex(Mac
// Naked functions have stack size 0, although getStackSize may not reflect
// that because we didn't call all the pieces that compute it for naked
// functions.
- if (!MF.getFunction()->hasFnAttribute(Attribute::Naked)) {
+ if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) {
if (!(hasBasePointer(MF) && FrameIndex < 0))
Offset += MFI.getStackSize();
}
Modified: llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCVSXFMAMutate.cpp Fri Dec 15 14:22:58 2017
@@ -343,7 +343,7 @@ protected:
public:
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// If we don't have VSX then go ahead and return without doing
Modified: llvm/trunk/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp Fri Dec 15 14:22:58 2017
@@ -191,7 +191,7 @@ private:
public:
// Main entry point for this pass.
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
// If we don't have VSX on the subtarget, don't do anything.
Modified: llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -264,7 +264,7 @@ SparcTargetLowering::LowerReturn_32(SDVa
unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
// If the function returns a struct, copy the SRetReturnReg to I0
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
unsigned Reg = SFI->getSRetReturnReg();
if (!Reg)
@@ -519,7 +519,7 @@ SDValue SparcTargetLowering::LowerFormal
InVals.push_back(Load);
}
- if (MF.getFunction()->hasStructRetAttr()) {
+ if (MF.getFunction().hasStructRetAttr()) {
// Copy the SRet Argument to SRetReturnReg.
SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
unsigned Reg = SFI->getSRetReturnReg();
@@ -701,8 +701,8 @@ static bool hasReturnsTwiceAttr(Selectio
CalleeFn = dyn_cast<Function>(G->getGlobal());
} else if (ExternalSymbolSDNode *E =
dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const Function *Fn = DAG.getMachineFunction().getFunction();
- const Module *M = Fn->getParent();
+ const Function &Fn = DAG.getMachineFunction().getFunction();
+ const Module *M = Fn.getParent();
const char *CalleeName = E->getSymbol();
CalleeFn = M->getFunction(CalleeName);
}
@@ -1057,8 +1057,8 @@ SparcTargetLowering::getSRetArgSize(Sele
CalleeFn = dyn_cast<Function>(G->getGlobal());
} else if (ExternalSymbolSDNode *E =
dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const Function *Fn = DAG.getMachineFunction().getFunction();
- const Module *M = Fn->getParent();
+ const Function &F = DAG.getMachineFunction().getFunction();
+ const Module *M = F.getParent();
const char *CalleeName = E->getSymbol();
CalleeFn = M->getFunction(CalleeName);
if (!CalleeFn && isFP128ABICall(CalleeName))
Modified: llvm/trunk/lib/Target/SystemZ/SystemZElimCompare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZElimCompare.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZElimCompare.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZElimCompare.cpp Fri Dec 15 14:22:58 2017
@@ -593,7 +593,7 @@ bool SystemZElimCompare::processBlock(Ma
}
bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo());
Modified: llvm/trunk/lib/Target/SystemZ/SystemZFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -71,7 +71,7 @@ void SystemZFrameLowering::determineCall
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
bool HasFP = hasFP(MF);
SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
- bool IsVarArg = MF.getFunction()->isVarArg();
+ bool IsVarArg = MF.getFunction().isVarArg();
// va_start stores incoming FPR varargs in the normal way, but delegates
// the saving of incoming GPR varargs to spillCalleeSavedRegisters().
@@ -139,7 +139,7 @@ spillCalleeSavedRegisters(MachineBasicBl
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
- bool IsVarArg = MF.getFunction()->isVarArg();
+ bool IsVarArg = MF.getFunction().isVarArg();
DebugLoc DL;
// Scan the call-saved GPRs and find the bounds of the register spill area.
@@ -374,7 +374,7 @@ void SystemZFrameLowering::emitPrologue(
uint64_t StackSize = getAllocatedStackSize(MF);
if (StackSize) {
// Determine if we want to store a backchain.
- bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
+ bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
// If we need backchain, save current stack pointer. R1 is free at this
// point.
Modified: llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -3039,8 +3039,8 @@ SDValue SystemZTargetLowering::
lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
MachineFunction &MF = DAG.getMachineFunction();
- bool RealignOpt = !MF.getFunction()-> hasFnAttribute("no-realign-stack");
- bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
+ bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack");
+ bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
@@ -3572,7 +3572,7 @@ SDValue SystemZTargetLowering::lowerSTAC
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
- bool StoreBackchain = MF.getFunction()->hasFnAttribute("backchain");
+ bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
SDValue Chain = Op.getOperand(0);
SDValue NewSP = Op.getOperand(1);
Modified: llvm/trunk/lib/Target/SystemZ/SystemZLDCleanup.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZLDCleanup.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZLDCleanup.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZLDCleanup.cpp Fri Dec 15 14:22:58 2017
@@ -64,7 +64,7 @@ void SystemZLDCleanup::getAnalysisUsage(
}
bool SystemZLDCleanup::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
TII = static_cast<const SystemZInstrInfo *>(F.getSubtarget().getInstrInfo());
Modified: llvm/trunk/lib/Target/SystemZ/SystemZRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -109,7 +109,7 @@ SystemZRegisterInfo::getRegAllocationHin
const MCPhysReg *
SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
- MF->getFunction()->getAttributes().hasAttrSomewhere(
+ MF->getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_SystemZ_SwiftError_SaveList;
return CSR_SystemZ_SaveList;
@@ -119,7 +119,7 @@ const uint32_t *
SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
- MF.getFunction()->getAttributes().hasAttrSomewhere(
+ MF.getFunction().getAttributes().hasAttrSomewhere(
Attribute::SwiftError))
return CSR_SystemZ_SwiftError_RegMask;
return CSR_SystemZ_RegMask;
Modified: llvm/trunk/lib/Target/SystemZ/SystemZShortenInst.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZShortenInst.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZShortenInst.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZShortenInst.cpp Fri Dec 15 14:22:58 2017
@@ -309,7 +309,7 @@ bool SystemZShortenInst::processBlock(Ma
}
bool SystemZShortenInst::runOnMachineFunction(MachineFunction &F) {
- if (skipFunction(*F.getFunction()))
+ if (skipFunction(F.getFunction()))
return false;
const SystemZSubtarget &ST = F.getSubtarget<SystemZSubtarget>();
Modified: llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp Fri Dec 15 14:22:58 2017
@@ -63,7 +63,7 @@ bool X86AsmPrinter::runOnMachineFunction
SetupMachineFunction(MF);
if (Subtarget->isTargetCOFF()) {
- bool Local = MF.getFunction()->hasLocalLinkage();
+ bool Local = MF.getFunction().hasLocalLinkage();
OutStreamer->BeginCOFFSymbolDef(CurrentFnSym);
OutStreamer->EmitCOFFSymbolStorageClass(
Local ? COFF::IMAGE_SYM_CLASS_STATIC : COFF::IMAGE_SYM_CLASS_EXTERNAL);
Modified: llvm/trunk/lib/Target/X86/X86CallFrameOptimization.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86CallFrameOptimization.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86CallFrameOptimization.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86CallFrameOptimization.cpp Fri Dec 15 14:22:58 2017
@@ -148,7 +148,7 @@ bool X86CallFrameOptimization::isLegal(M
// is a danger of that being generated.
if (STI->isTargetDarwin() &&
(!MF.getLandingPads().empty() ||
- (MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF))))
+ (MF.getFunction().needsUnwindTableEntry() && !TFL->hasFP(MF))))
return false;
// It is not valid to change the stack pointer outside the prolog/epilog
@@ -243,7 +243,7 @@ bool X86CallFrameOptimization::runOnMach
assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
Log2SlotSize = Log2_32(SlotSize);
- if (skipFunction(*MF.getFunction()) || !isLegal(MF))
+ if (skipFunction(MF.getFunction()) || !isLegal(MF))
return false;
unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
Modified: llvm/trunk/lib/Target/X86/X86CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86CallLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86CallLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86CallLowering.cpp Fri Dec 15 14:22:58 2017
@@ -177,7 +177,7 @@ bool X86CallLowering::lowerReturn(Machin
MachineFunction &MF = MIRBuilder.getMF();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = MF.getDataLayout();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
ArgInfo OrigArg{VReg, Val->getType()};
setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
@@ -334,7 +334,7 @@ bool X86CallLowering::lowerCall(MachineI
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = F.getParent()->getDataLayout();
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
Modified: llvm/trunk/lib/Target/X86/X86CmovConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86CmovConversion.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86CmovConversion.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86CmovConversion.cpp Fri Dec 15 14:22:58 2017
@@ -164,7 +164,7 @@ void X86CmovConverterPass::getAnalysisUs
}
bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (!EnableCmovConverter)
return false;
Modified: llvm/trunk/lib/Target/X86/X86DomainReassignment.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86DomainReassignment.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86DomainReassignment.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86DomainReassignment.cpp Fri Dec 15 14:22:58 2017
@@ -678,7 +678,7 @@ void X86DomainReassignment::initConverte
}
bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (DisableX86DomainReassignment)
return false;
Modified: llvm/trunk/lib/Target/X86/X86ExpandPseudo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ExpandPseudo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ExpandPseudo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ExpandPseudo.cpp Fri Dec 15 14:22:58 2017
@@ -222,7 +222,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBa
case X86::EH_RESTORE: {
// Restore ESP and EBP, and optionally ESI if required.
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction()->getPersonalityFn()));
+ MBB.getParent()->getFunction().getPersonalityFn()));
X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
MBBI->eraseFromParent();
return true;
Modified: llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp Fri Dec 15 14:22:58 2017
@@ -146,12 +146,12 @@ INITIALIZE_PASS(FixupBWInstPass, FIXUPBW
FunctionPass *llvm::createX86FixupBWInsts() { return new FixupBWInstPass(); }
bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
- if (!FixupBWInsts || skipFunction(*MF.getFunction()))
+ if (!FixupBWInsts || skipFunction(MF.getFunction()))
return false;
this->MF = &MF;
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
- OptForSize = MF.getFunction()->optForSize();
+ OptForSize = MF.getFunction().optForSize();
MLI = &getAnalysis<MachineLoopInfo>();
LiveRegs.init(TII->getRegisterInfo());
Modified: llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FixupLEAs.cpp Fri Dec 15 14:22:58 2017
@@ -191,12 +191,12 @@ FixupLEAPass::postRAConvertToLEA(Machine
FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
- if (skipFunction(*Func.getFunction()))
+ if (skipFunction(Func.getFunction()))
return false;
MF = &Func;
const X86Subtarget &ST = Func.getSubtarget<X86Subtarget>();
- OptIncDec = !ST.slowIncDec() || Func.getFunction()->optForMinSize();
+ OptIncDec = !ST.slowIncDec() || Func.getFunction().optForMinSize();
OptLEA = ST.LEAusesAG() || ST.slowLEA() || ST.slow3OpsLEA();
if (!OptLEA && !OptIncDec)
Modified: llvm/trunk/lib/Target/X86/X86FloatingPoint.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FloatingPoint.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FloatingPoint.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FloatingPoint.cpp Fri Dec 15 14:22:58 2017
@@ -349,7 +349,7 @@ bool FPS::runOnMachineFunction(MachineFu
// In regcall convention, some FP registers may not be passed through
// the stack, so they will need to be assigned to the stack first
- if ((Entry->getParent()->getFunction()->getCallingConv() ==
+ if ((Entry->getParent()->getFunction().getCallingConv() ==
CallingConv::X86_RegCall) && (Bundle.Mask && !Bundle.FixCount)) {
// In the register calling convention, up to one FP argument could be
// saved in the first FP register.
@@ -973,7 +973,7 @@ void FPS::handleCall(MachineBasicBlock::
unsigned R = MO.getReg() - X86::FP0;
if (R < 8) {
- if (MF->getFunction()->getCallingConv() != CallingConv::X86_RegCall) {
+ if (MF->getFunction().getCallingConv() != CallingConv::X86_RegCall) {
assert(MO.isDef() && MO.isImplicit());
}
Modified: llvm/trunk/lib/Target/X86/X86FrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -148,8 +148,7 @@ static unsigned findDeadCallerSavedReg(M
const X86RegisterInfo *TRI,
bool Is64Bit) {
const MachineFunction *MF = MBB.getParent();
- const Function *F = MF->getFunction();
- if (!F || MF->callsEHReturn())
+ if (MF->callsEHReturn())
return 0;
const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
@@ -820,7 +819,7 @@ uint64_t X86FrameLowering::calculateMaxS
const MachineFrameInfo &MFI = MF.getFrameInfo();
uint64_t MaxAlign = MFI.getMaxAlignment(); // Desired stack alignment.
unsigned StackAlign = getStackAlignment();
- if (MF.getFunction()->hasFnAttribute("stackrealign")) {
+ if (MF.getFunction().hasFnAttribute("stackrealign")) {
if (MFI.hasCalls())
MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
else if (MaxAlign < SlotSize)
@@ -935,28 +934,28 @@ void X86FrameLowering::emitPrologue(Mach
"MF used frame lowering for wrong subtarget");
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function *Fn = MF.getFunction();
+ const Function &Fn = MF.getFunction();
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
bool IsFunclet = MBB.isEHFuncletEntry();
EHPersonality Personality = EHPersonality::Unknown;
- if (Fn->hasPersonalityFn())
- Personality = classifyEHPersonality(Fn->getPersonalityFn());
+ if (Fn.hasPersonalityFn())
+ Personality = classifyEHPersonality(Fn.getPersonalityFn());
bool FnHasClrFunclet =
MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
bool HasFP = hasFP(MF);
- bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
+ bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool NeedsWin64CFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
+ bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
// FIXME: Emit FPO data for EH funclets.
bool NeedsWinFPO =
!IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
bool NeedsDwarfCFI =
- !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ !IsWin64Prologue && (MMI.hasDebugInfo() || Fn.needsUnwindTableEntry());
unsigned FramePtr = TRI->getFrameRegister(MF);
const unsigned MachineFramePtr =
STI.isTarget64BitILP32()
@@ -982,16 +981,16 @@ void X86FrameLowering::emitPrologue(Mach
// The default stack probe size is 4096 if the function has no stackprobesize
// attribute.
unsigned StackProbeSize = 4096;
- if (Fn->hasFnAttribute("stack-probe-size"))
- Fn->getFnAttribute("stack-probe-size")
+ if (Fn.hasFnAttribute("stack-probe-size"))
+ Fn.getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
// Re-align the stack on 64-bit if the x86-interrupt calling convention is
// used and an error code was pushed, since the x86-64 ABI requires a 16-byte
// stack alignment.
- if (Fn->getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
- Fn->arg_size() == 2) {
+ if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
+ Fn.arg_size() == 2) {
StackSize += 8;
MFI.setStackSize(StackSize);
emitSPUpdate(MBB, MBBI, -8, /*InEpilogue=*/false);
@@ -1002,7 +1001,7 @@ void X86FrameLowering::emitPrologue(Mach
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
- if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
+ if (Is64Bit && !Fn.hasFnAttribute(Attribute::NoRedZone) &&
!TRI->needsStackRealignment(MF) &&
!MFI.hasVarSizedObjects() && // No dynamic alloca.
!MFI.adjustsStack() && // No calls.
@@ -1447,7 +1446,7 @@ void X86FrameLowering::emitPrologue(Mach
// 1. The interrupt handling function uses any of the "rep" instructions.
// 2. Interrupt handling function calls another function.
//
- if (Fn->getCallingConv() == CallingConv::X86_INTR)
+ if (Fn.getCallingConv() == CallingConv::X86_INTR)
BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
.setMIFlag(MachineInstr::FrameSetup);
@@ -1508,7 +1507,7 @@ X86FrameLowering::getWinEHFuncletFrameSi
// This is the amount of stack a funclet needs to allocate.
unsigned UsedSize;
EHPersonality Personality =
- classifyEHPersonality(MF.getFunction()->getPersonalityFn());
+ classifyEHPersonality(MF.getFunction().getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
// CLR funclets need to hold enough space to include the PSPSym, at the
// same offset from the stack pointer (immediately after the prolog) as it
@@ -1551,7 +1550,7 @@ void X86FrameLowering::emitEpilogue(Mach
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWin64CFI =
- IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
+ IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
// Get the number of bytes to allocate from the FrameInfo.
@@ -1981,7 +1980,7 @@ void X86FrameLowering::emitCatchRetRetur
MachineInstr *CatchRet) const {
// SEH shouldn't use catchret.
assert(!isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction()->getPersonalityFn())) &&
+ MBB.getParent()->getFunction().getPersonalityFn())) &&
"SEH should not use CATCHRET");
DebugLoc DL = CatchRet->getDebugLoc();
MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
@@ -2021,9 +2020,9 @@ bool X86FrameLowering::restoreCalleeSave
// Don't restore CSRs before an SEH catchret. SEH except blocks do not form
// funclets. emitEpilogue transforms these to normal jumps.
if (MI->getOpcode() == X86::CATCHRET) {
- const Function *Func = MBB.getParent()->getFunction();
+ const Function &F = MBB.getParent()->getFunction();
bool IsSEH = isAsynchronousEHPersonality(
- classifyEHPersonality(Func->getPersonalityFn()));
+ classifyEHPersonality(F.getPersonalityFn()));
if (IsSEH)
return true;
}
@@ -2095,8 +2094,8 @@ void X86FrameLowering::determineCalleeSa
static bool
HasNestArgument(const MachineFunction *MF) {
- const Function *F = MF->getFunction();
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
+ const Function &F = MF->getFunction();
+ for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; I++) {
if (I->hasNestAttr())
return true;
@@ -2110,7 +2109,7 @@ HasNestArgument(const MachineFunction *M
/// needed. Set primary to true for the first register, false for the second.
static unsigned
GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
- CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
// Erlang stuff.
if (CallingConvention == CallingConv::HiPE) {
@@ -2160,7 +2159,7 @@ void X86FrameLowering::adjustForSegmente
assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
"Scratch register is live-in");
- if (MF.getFunction()->isVarArg())
+ if (MF.getFunction().isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
!STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
@@ -2434,8 +2433,8 @@ void X86FrameLowering::adjustForHiPEProl
Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
const unsigned Guaranteed = HipeLeafWords * SlotSize;
- unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
- MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
+ unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
+ MF.getFunction().arg_size() - CCRegisteredArgs : 0;
unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
assert(STI.isTargetLinux() &&
@@ -2649,10 +2648,10 @@ eliminateCallFramePseudoInstr(MachineFun
Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool DwarfCFI = !WindowsCFI &&
- (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ bool DwarfCFI = !WindowsCFI &&
+ (MMI.hasDebugInfo() || F.needsUnwindTableEntry());
// If we have any exception handlers in this function, and we adjust
// the SP before calls, we may need to indicate this to the unwinder
@@ -2694,7 +2693,7 @@ eliminateCallFramePseudoInstr(MachineFun
StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
if (StackAdjustment) {
- if (!(Fn->optForMinSize() &&
+ if (!(F.optForMinSize() &&
adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
/*InEpilogue=*/false);
@@ -2767,13 +2766,13 @@ bool X86FrameLowering::canUseAsEpilogue(
bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
// If we may need to emit frameless compact unwind information, give
// up as this is currently broken: PR25614.
- return (MF.getFunction()->hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
+ return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
// The lowering of segmented stack and HiPE only support entry blocks
// as prologue blocks: PR26107.
// This limitation may be lifted if we fix:
// - adjustForSegmentedStacks
// - adjustForHiPEPrologue
- MF.getFunction()->getCallingConv() != CallingConv::HiPE &&
+ MF.getFunction().getCallingConv() != CallingConv::HiPE &&
!MF.shouldSplitStack();
}
@@ -3003,9 +3002,9 @@ void X86FrameLowering::processFunctionBe
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
if (!STI.is64Bit() || !MF.hasEHFunclets() ||
- classifyEHPersonality(Fn->getPersonalityFn()) != EHPersonality::MSVC_CXX)
+ classifyEHPersonality(F.getPersonalityFn()) != EHPersonality::MSVC_CXX)
return;
// Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Fri Dec 15 14:22:58 2017
@@ -619,8 +619,8 @@ static bool isCalleeLoad(SDValue Callee,
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptFor[Min]Size are used in pattern predicates that isel is matching.
- OptForSize = MF->getFunction()->optForSize();
- OptForMinSize = MF->getFunction()->optForMinSize();
+ OptForSize = MF->getFunction().optForSize();
+ OptForMinSize = MF->getFunction().optForMinSize();
assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
@@ -753,9 +753,9 @@ void X86DAGToDAGISel::emitSpecialCodeFor
void X86DAGToDAGISel::EmitFunctionEntryCode() {
// If this is main, emit special code for main.
- if (const Function *Fn = MF->getFunction())
- if (Fn->hasExternalLinkage() && Fn->getName() == "main")
- emitSpecialCodeForMain();
+ const Function &F = MF->getFunction();
+ if (F.hasExternalLinkage() && F.getName() == "main")
+ emitSpecialCodeForMain();
}
static bool isDispSafeForFrameIndex(int64_t Val) {
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Dec 15 14:22:58 2017
@@ -94,7 +94,7 @@ static void errorUnsupported(SelectionDA
const char *Msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
- DiagnosticInfoUnsupported(*MF.getFunction(), Msg, dl.getDebugLoc()));
+ DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
}
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
@@ -1843,8 +1843,8 @@ X86TargetLowering::getOptimalMemOpType(u
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
- const Function *F = MF.getFunction();
- if (!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ const Function &F = MF.getFunction();
+ if (!F.hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(!Subtarget.isUnalignedMem16Slow() ||
((DstAlign == 0 || DstAlign >= 16) &&
@@ -1940,7 +1940,7 @@ void X86TargetLowering::markLibCallAttri
if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
return;
unsigned ParamRegs = 0;
- if (auto *M = MF->getFunction()->getParent())
+ if (auto *M = MF->getFunction().getParent())
ParamRegs = M->getNumberRegisterParameters();
// Mark the first N int arguments as having reg
@@ -2207,7 +2207,7 @@ X86TargetLowering::LowerReturn(SDValue C
// For example, when they are used for argument passing.
bool ShouldDisableCalleeSavedRegister =
CallConv == CallingConv::X86_RegCall ||
- MF.getFunction()->hasFnAttribute("no_caller_saved_registers");
+ MF.getFunction().hasFnAttribute("no_caller_saved_registers");
if (CallConv == CallingConv::X86_INTR && !Outs.empty())
report_fatal_error("X86 interrupts may not return any value");
@@ -2889,8 +2889,8 @@ static ArrayRef<MCPhysReg> get64BitArgum
return None;
}
- const Function *Fn = MF.getFunction();
- bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
+ const Function &F = MF.getFunction();
+ bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool isSoftFloat = Subtarget.useSoftFloat();
assert(!(isSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
@@ -2923,10 +2923,9 @@ SDValue X86TargetLowering::LowerFormalAr
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
- const Function *Fn = MF.getFunction();
- if (Fn->hasExternalLinkage() &&
- Subtarget.isTargetCygMing() &&
- Fn->getName() == "main")
+ const Function &F = MF.getFunction();
+ if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
+ F.getName() == "main")
FuncInfo->setForceFramePointer(true);
MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -3101,7 +3100,7 @@ SDValue X86TargetLowering::LowerFormalAr
// Figure out if XMM registers are in use.
assert(!(Subtarget.useSoftFloat() &&
- Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
+ F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
"SSE register cannot be used when SSE is disabled!");
// 64-bit calling conventions support varargs and register parameters, so we
@@ -3258,7 +3257,7 @@ SDValue X86TargetLowering::LowerFormalAr
FuncInfo->setArgumentStackSize(StackSize);
if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
- EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn());
+ EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
assert(Is64Bit);
// TODO: Add a mechanism to frame lowering that will allow us to indicate
@@ -3275,7 +3274,7 @@ SDValue X86TargetLowering::LowerFormalAr
}
if (CallConv == CallingConv::X86_RegCall ||
- Fn->hasFnAttribute("no_caller_saved_registers")) {
+ F.hasFnAttribute("no_caller_saved_registers")) {
MachineRegisterInfo &MRI = MF.getRegInfo();
for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
MRI.disableCalleeSavedRegister(Pair.first);
@@ -3366,7 +3365,7 @@ X86TargetLowering::LowerCall(TargetLower
StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
- auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
@@ -3401,7 +3400,7 @@ X86TargetLowering::LowerCall(TargetLower
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, SR != NotStructReturn,
- MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
+ MF.getFunction().hasStructRetAttr(), CLI.RetTy,
Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
@@ -3747,7 +3746,7 @@ X86TargetLowering::LowerCall(TargetLower
}
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlags =
Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
@@ -3796,10 +3795,10 @@ X86TargetLowering::LowerCall(TargetLower
// FIXME: Model this more precisely so that we can register allocate across
// the normal edge and spill and fill across the exceptional edge.
if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
- const Function *CallerFn = MF.getFunction();
+ const Function &CallerFn = MF.getFunction();
EHPersonality Pers =
- CallerFn->hasPersonalityFn()
- ? classifyEHPersonality(CallerFn->getPersonalityFn())
+ CallerFn.hasPersonalityFn()
+ ? classifyEHPersonality(CallerFn.getPersonalityFn())
: EHPersonality::Unknown;
if (isFuncletEHPersonality(Pers))
Mask = RegInfo->getNoPreservedMask();
@@ -4047,15 +4046,15 @@ bool X86TargetLowering::IsEligibleForTai
// If -tailcallopt is specified, make fastcc functions tail-callable.
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
+ const Function &CallerF = MF.getFunction();
// If the function return type is x86_fp80 and the callee return type is not,
// then the FP_EXTEND of the call result is not a nop. It's not safe to
// perform a tailcall optimization here.
- if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
+ if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
return false;
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
@@ -4639,7 +4638,7 @@ bool X86TargetLowering::canMergeStoresTo
const SelectionDAG &DAG) const {
// Do not merge to float value size (128 bytes) if no implicit
// float attribute is set.
- bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
if (NoFloat) {
@@ -6927,7 +6926,7 @@ static SDValue lowerBuildVectorAsBroadca
// TODO: If multiple splats are generated to load the same constant,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
- bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// Handle broadcasting a single constant scalar from the constant pool
// into a vector.
@@ -14903,7 +14902,7 @@ SDValue X86TargetLowering::LowerINSERT_V
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
- bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
+ bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
@@ -15044,7 +15043,7 @@ X86TargetLowering::LowerExternalSymbol(S
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
auto PtrVT = getPointerTy(DAG.getDataLayout());
@@ -16968,7 +16967,7 @@ SDValue X86TargetLowering::EmitTest(SDVa
// An add of one will be selected as an INC.
if (C->isOne() &&
(!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
Opcode = X86ISD::INC;
NumOperands = 1;
break;
@@ -16977,7 +16976,7 @@ SDValue X86TargetLowering::EmitTest(SDVa
// An add of negative one (subtract of one) will be selected as a DEC.
if (C->isAllOnesValue() &&
(!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
Opcode = X86ISD::DEC;
NumOperands = 1;
break;
@@ -17172,7 +17171,7 @@ SDValue X86TargetLowering::EmitCmp(SDVal
// with an immediate. 16 bit immediates are to be avoided.
if ((Op0.getValueType() == MVT::i16 &&
(isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
- !DAG.getMachineFunction().getFunction()->optForMinSize() &&
+ !DAG.getMachineFunction().getFunction().optForMinSize() &&
!Subtarget.isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@@ -19242,8 +19241,8 @@ X86TargetLowering::LowerDYNAMIC_STACKALL
if (Is64Bit) {
// The 64 bit implementation of segmented stacks needs to clobber both r10
// r11. This makes it impossible to use it along with nested parameters.
- const Function *F = MF.getFunction();
- for (const auto &A : F->args()) {
+ const Function &F = MF.getFunction();
+ for (const auto &A : F.args()) {
if (A.hasNestAttr())
report_fatal_error("Cannot use segmented stacks with functions that "
"have nested arguments.");
@@ -19290,7 +19289,7 @@ SDValue X86TargetLowering::LowerVASTART(
SDLoc DL(Op);
if (!Subtarget.is64Bit() ||
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) {
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
@@ -19344,7 +19343,7 @@ SDValue X86TargetLowering::LowerVAARG(SD
assert(Op.getNumOperands() == 4);
MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
// The Win64 ABI uses char* instead of a structure.
return DAG.expandVAArg(Op.getNode());
@@ -19375,7 +19374,7 @@ SDValue X86TargetLowering::LowerVAARG(SD
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!Subtarget.useSoftFloat() &&
- !(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) &&
+ !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
Subtarget.hasSSE1());
}
@@ -19403,7 +19402,7 @@ static SDValue LowerVACOPY(SDValue Op, c
// where a va_list is still an i8*.
assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
if (Subtarget.isCallingConvWin64(
- DAG.getMachineFunction().getFunction()->getCallingConv()))
+ DAG.getMachineFunction().getFunction().getCallingConv()))
// Probably a Win64 va_copy.
return DAG.expandVACopy(Op.getNode());
@@ -23939,7 +23938,7 @@ static SDValue lowerAtomicArithWithLOCK(
if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
// Convert to inc/dec if they aren't slow or we are optimizing for size.
if (AllowIncDec && (!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
if ((NewOpc == X86ISD::LADD && C->isOne()) ||
(NewOpc == X86ISD::LSUB && C->isAllOnesValue()))
return DAG.getMemIntrinsicNode(X86ISD::LINC, SDLoc(N),
@@ -26085,7 +26084,7 @@ MachineBasicBlock *X86TargetLowering::Em
int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
- if (!Subtarget.isCallingConvWin64(F->getFunction()->getCallingConv())) {
+ if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
// If %al is 0, branch around the XMM save block.
BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
@@ -26728,7 +26727,7 @@ X86TargetLowering::EmitLoweredCatchRet(M
DebugLoc DL = MI.getDebugLoc();
assert(!isAsynchronousEHPersonality(
- classifyEHPersonality(MF->getFunction()->getPersonalityFn())) &&
+ classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
"SEH does not use catchret!");
// Only 32-bit EH needs to worry about manually restoring stack pointers.
@@ -26755,7 +26754,7 @@ MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const Constant *PerFn = MF->getFunction()->getPersonalityFn();
+ const Constant *PerFn = MF->getFunction().getPersonalityFn();
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
// Only 32-bit SEH requires special handling for catchpad.
if (IsSEH && Subtarget.is32Bit()) {
@@ -32161,7 +32160,7 @@ static SDValue reduceVMULWidth(SDNode *N
// pmulld is supported since SSE41. It is better to use pmulld
// instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
// the expansion.
- bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
+ bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
return SDValue();
@@ -32354,7 +32353,7 @@ static SDValue combineMul(SDNode *N, Sel
if (!MulConstantOptimization)
return SDValue();
// An imul is usually smaller than the alternative sequence.
- if (DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
@@ -33572,7 +33571,7 @@ static SDValue combineOr(SDNode *N, Sele
return SDValue();
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
- bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// SHLD/SHRD instructions have lower register pressure, but on some
// platforms they have higher latency than the equivalent
@@ -34512,8 +34511,8 @@ static SDValue combineStore(SDNode *N, S
if (VT.getSizeInBits() != 64)
return SDValue();
- const Function *F = DAG.getMachineFunction().getFunction();
- bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
+ const Function &F = DAG.getMachineFunction().getFunction();
+ bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal =
!Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
if ((VT.isVector() ||
@@ -35388,7 +35387,7 @@ static SDValue combineFMinNumFMaxNum(SDN
// This takes at least 3 instructions, so favor a library call when operating
// on a scalar and minimizing code size.
- if (!VT.isVector() && DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
SDValue Op0 = N->getOperand(0);
@@ -38403,7 +38402,7 @@ void X86TargetLowering::insertCopiesSpli
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
@@ -38426,8 +38425,8 @@ bool X86TargetLowering::supportSwiftErro
/// string if not applicable.
StringRef X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
// If the function specifically requests stack probes, emit them.
- if (MF.getFunction()->hasFnAttribute("probe-stack"))
- return MF.getFunction()->getFnAttribute("probe-stack").getValueAsString();
+ if (MF.getFunction().hasFnAttribute("probe-stack"))
+ return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
// Generally, if we aren't on Windows, the platform ABI does not include
// support for stack probes, so don't emit them.
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Fri Dec 15 14:22:58 2017
@@ -1228,8 +1228,8 @@ namespace llvm {
const SDLoc &dl, SelectionDAG &DAG) const override;
bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
void insertCopiesSplitCSR(
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Fri Dec 15 14:22:58 2017
@@ -7726,7 +7726,7 @@ static bool ExpandMOVImmSExti8(MachineIn
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsDwarfCFI =
!IsWin64Prologue &&
- (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry());
+ (MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry());
bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
if (EmitCFI) {
TFL->BuildCFI(MBB, I, DL,
@@ -8409,7 +8409,7 @@ MachineInstr *X86InstrInfo::foldMemoryOp
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
- if (isSlowTwoMemOps && !MF.getFunction()->optForMinSize() &&
+ if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() &&
(MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
MI.getOpcode() == X86::PUSH64r))
@@ -8417,7 +8417,7 @@ MachineInstr *X86InstrInfo::foldMemoryOp
// Avoid partial register update stalls unless optimizing for size.
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
unsigned NumOps = MI.getDesc().getNumOperands();
@@ -8586,7 +8586,7 @@ X86InstrInfo::foldMemoryOperandImpl(Mach
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
// Don't fold subreg spills, or reloads that use a high subreg.
@@ -8785,7 +8785,7 @@ MachineInstr *X86InstrInfo::foldMemoryOp
// Avoid partial register update stalls unless optimizing for size.
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
// Determine the alignment of the load.
@@ -8881,16 +8881,16 @@ MachineInstr *X86InstrInfo::foldMemoryOp
Type *Ty;
unsigned Opc = LoadMI.getOpcode();
if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
- Ty = Type::getFloatTy(MF.getFunction()->getContext());
+ Ty = Type::getFloatTy(MF.getFunction().getContext());
else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
- Ty = Type::getDoubleTy(MF.getFunction()->getContext());
+ Ty = Type::getDoubleTy(MF.getFunction().getContext());
else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()),16);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16);
else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8);
else
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4);
bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
Opc == X86::AVX512_512_SETALLONES ||
@@ -10691,7 +10691,7 @@ namespace {
LDTLSCleanup() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
@@ -10852,16 +10852,16 @@ X86InstrInfo::getOutlininingCandidateInf
bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// Does the function use a red zone? If it does, then we can't risk messing
// with the stack.
- if (!F->hasFnAttribute(Attribute::NoRedZone))
+ if (!F.hasFnAttribute(Attribute::NoRedZone))
return false;
// If we *don't* want to outline from things that could potentially be deduped
// then return false.
- if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage())
+ if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
// This function is viable for outlining, so return true.
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Fri Dec 15 14:22:58 2017
@@ -918,11 +918,11 @@ def IsNotPIC : Predicate<"!TM.isPosi
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
- def OptForSize : Predicate<"MF->getFunction()->optForSize()">;
- def OptForMinSize : Predicate<"MF->getFunction()->optForMinSize()">;
- def OptForSpeed : Predicate<"!MF->getFunction()->optForSize()">;
+ def OptForSize : Predicate<"MF->getFunction().optForSize()">;
+ def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">;
+ def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">;
def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
- "MF->getFunction()->optForSize()">;
+ "MF->getFunction().optForSize()">;
}
def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
Modified: llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86OptimizeLEAs.cpp Fri Dec 15 14:22:58 2017
@@ -672,7 +672,7 @@ bool OptimizeLEAPass::removeRedundantLEA
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
- if (DisableX86LEAOpt || skipFunction(*MF.getFunction()))
+ if (DisableX86LEAOpt || skipFunction(MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
@@ -696,7 +696,7 @@ bool OptimizeLEAPass::runOnMachineFuncti
// Remove redundant address calculations. Do it only for -Os/-Oz since only
// a code size gain is expected from this part of the pass.
- if (MF.getFunction()->optForSize())
+ if (MF.getFunction().optForSize())
Changed |= removeRedundantAddrCalc(LEAs);
}
Modified: llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86PadShortFunction.cpp Fri Dec 15 14:22:58 2017
@@ -96,10 +96,10 @@ FunctionPass *llvm::createX86PadShortFun
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
- if (MF.getFunction()->optForSize()) {
+ if (MF.getFunction().optForSize()) {
return false;
}
Modified: llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -218,13 +218,13 @@ X86RegisterInfo::getPointerRegClass(cons
const TargetRegisterClass *
X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
- const Function *F = MF.getFunction();
- if (IsWin64 || (F && F->getCallingConv() == CallingConv::Win64))
+ const Function &F = MF.getFunction();
+ if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
return &X86::GR64_TCW64RegClass;
else if (Is64Bit)
return &X86::GR64_TCRegClass;
- bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
+ bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
if (hasHipeCC)
return &X86::GR32RegClass;
return &X86::GR32_TCRegClass;
@@ -266,17 +266,17 @@ X86RegisterInfo::getCalleeSavedRegs(cons
assert(MF && "MachineFunction required");
const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
bool CallsEHReturn = MF->callsEHReturn();
- CallingConv::ID CC = F->getCallingConv();
+ CallingConv::ID CC = F.getCallingConv();
// If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
// convention because it has the CSR list.
- if (MF->getFunction()->hasFnAttribute("no_caller_saved_registers"))
+ if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
CC = CallingConv::X86_INTR;
switch (CC) {
@@ -362,7 +362,7 @@ X86RegisterInfo::getCalleeSavedRegs(cons
if (Is64Bit) {
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
- F->getAttributes().hasAttrSomewhere(Attribute::SwiftError);
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_SaveList
: CSR_64_SwiftError_SaveList;
@@ -380,7 +380,7 @@ X86RegisterInfo::getCalleeSavedRegs(cons
const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
return nullptr;
@@ -473,9 +473,9 @@ X86RegisterInfo::getCallPreservedMask(co
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
// callsEHReturn().
if (Is64Bit) {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
- F->getAttributes().hasAttrSomewhere(Attribute::SwiftError);
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
@@ -519,7 +519,7 @@ BitVector X86RegisterInfo::getReservedRe
// Set the base-pointer register and its aliases as reserved if needed.
if (hasBasePointer(MF)) {
- CallingConv::ID CC = MF.getFunction()->getCallingConv();
+ CallingConv::ID CC = MF.getFunction().getCallingConv();
const uint32_t *RegMask = getCallPreservedMask(MF, CC);
if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
report_fatal_error(
Modified: llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86SelectionDAGInfo.cpp Fri Dec 15 14:22:58 2017
@@ -247,7 +247,7 @@ SDValue X86SelectionDAGInfo::EmitTargetC
Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
if (Repeats.BytesLeft() > 0 &&
- DAG.getMachineFunction().getFunction()->optForMinSize()) {
+ DAG.getMachineFunction().getFunction().optForMinSize()) {
// When agressively optimizing for size, avoid generating the code to
// handle BytesLeft.
Repeats.AVT = MVT::i8;
Modified: llvm/trunk/lib/Target/X86/X86VZeroUpper.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86VZeroUpper.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86VZeroUpper.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86VZeroUpper.cpp Fri Dec 15 14:22:58 2017
@@ -285,7 +285,7 @@ bool VZeroUpperInserter::runOnMachineFun
TII = ST.getInstrInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
EverMadeChange = false;
- IsX86INTR = MF.getFunction()->getCallingConv() == CallingConv::X86_INTR;
+ IsX86INTR = MF.getFunction().getCallingConv() == CallingConv::X86_INTR;
bool FnHasLiveInYmmOrZmm = checkFnHasLiveInYmmOrZmm(MRI);
Modified: llvm/trunk/lib/Target/X86/X86WinAllocaExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86WinAllocaExpander.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86WinAllocaExpander.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86WinAllocaExpander.cpp Fri Dec 15 14:22:58 2017
@@ -279,9 +279,9 @@ bool X86WinAllocaExpander::runOnMachineF
SlotSize = TRI->getSlotSize();
StackProbeSize = 4096;
- if (MF.getFunction()->hasFnAttribute("stack-probe-size")) {
+ if (MF.getFunction().hasFnAttribute("stack-probe-size")) {
MF.getFunction()
- ->getFnAttribute("stack-probe-size")
+ .getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
}
Modified: llvm/trunk/lib/Target/XCore/XCoreFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/XCore/XCoreFrameLowering.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/XCore/XCoreFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/XCore/XCoreFrameLowering.cpp Fri Dec 15 14:22:58 2017
@@ -238,7 +238,7 @@ void XCoreFrameLowering::emitPrologue(Ma
report_fatal_error("emitPrologue unsupported alignment: "
+ Twine(MFI.getMaxAlignment()));
- const AttributeList &PAL = MF.getFunction()->getAttributes();
+ const AttributeList &PAL = MF.getFunction().getAttributes();
if (PAL.hasAttrSomewhere(Attribute::Nest))
BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0);
// FIX: Needs addMemOperand() but can't use getFixedStack() or getStack().
@@ -324,7 +324,7 @@ void XCoreFrameLowering::emitPrologue(Ma
if (XFI->hasEHSpillSlot()) {
// The unwinder requires stack slot & CFI offsets for the exception info.
// We do not save/spill these registers.
- const Function *Fn = MF.getFunction();
+ const Function *Fn = &MF.getFunction();
const Constant *PersonalityFn =
Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr;
SmallVector<StackSlotInfo, 2> SpillList;
@@ -359,7 +359,7 @@ void XCoreFrameLowering::emitEpilogue(Ma
if (RetOpcode == XCore::EH_RETURN) {
// 'Restore' the exception info the unwinder has placed into the stack
// slots.
- const Function *Fn = MF.getFunction();
+ const Function *Fn = &MF.getFunction();
const Constant *PersonalityFn =
Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr;
SmallVector<StackSlotInfo, 2> SpillList;
@@ -542,7 +542,7 @@ void XCoreFrameLowering::determineCallee
const MachineRegisterInfo &MRI = MF.getRegInfo();
bool LRUsed = MRI.isPhysRegModified(XCore::LR);
- if (!LRUsed && !MF.getFunction()->isVarArg() &&
+ if (!LRUsed && !MF.getFunction().isVarArg() &&
MF.getFrameInfo().estimateStackSize(MF))
// If we need to extend the stack it is more efficient to use entsp / retsp.
// We force the LR to be saved so these instructions are used.
Modified: llvm/trunk/lib/Target/XCore/XCoreInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/XCore/XCoreInstrInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/XCore/XCoreInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/XCore/XCoreInstrInfo.cpp Fri Dec 15 14:22:58 2017
@@ -443,7 +443,7 @@ MachineBasicBlock::iterator XCoreInstrIn
}
MachineConstantPool *ConstantPool = MBB.getParent()->getConstantPool();
const Constant *C = ConstantInt::get(
- Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Value);
+ Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Value);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
return BuildMI(MBB, MI, dl, get(XCore::LDWCP_lru6), Reg)
.addConstantPoolIndex(Idx)
Modified: llvm/trunk/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/XCore/XCoreMachineFunctionInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/XCore/XCoreMachineFunctionInfo.cpp (original)
+++ llvm/trunk/lib/Target/XCore/XCoreMachineFunctionInfo.cpp Fri Dec 15 14:22:58 2017
@@ -39,7 +39,7 @@ int XCoreFunctionInfo::createLRSpillSlot
const TargetRegisterClass &RC = XCore::GRRegsRegClass;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
- if (! MF.getFunction()->isVarArg()) {
+ if (! MF.getFunction().isVarArg()) {
// A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
LRSpillSlot = MFI.CreateFixedObject(TRI.getSpillSize(RC), 0, true);
} else {
Modified: llvm/trunk/lib/Target/XCore/XCoreRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/XCore/XCoreRegisterInfo.cpp?rev=320884&r1=320883&r2=320884&view=diff
==============================================================================
--- llvm/trunk/lib/Target/XCore/XCoreRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/XCore/XCoreRegisterInfo.cpp Fri Dec 15 14:22:58 2017
@@ -204,8 +204,7 @@ static void InsertSPConstInst(MachineBas
}
bool XCoreRegisterInfo::needsFrameMoves(const MachineFunction &MF) {
- return MF.getMMI().hasDebugInfo() ||
- MF.getFunction()->needsUnwindTableEntry();
+ return MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry();
}
const MCPhysReg *
More information about the llvm-commits
mailing list