[llvm] r274920 - ARM: Remove implicit iterator conversions, NFC
Duncan P. N. Exon Smith via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 8 13:21:18 PDT 2016
Author: dexonsmith
Date: Fri Jul 8 15:21:17 2016
New Revision: 274920
URL: http://llvm.org/viewvc/llvm-project?rev=274920&view=rev
Log:
ARM: Remove implicit iterator conversions, NFC
Remove remaining implicit conversions from MachineInstrBundleIterator to
MachineInstr* from the ARM backend. In most cases, I made them less attractive
by preferring MachineInstr& or using a ranged-based for loop.
Once all the backends are fixed I'll make the operator explicit so that this
doesn't bitrot back.
Modified:
llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp
llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp
llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp
llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp
llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp
llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
llvm/trunk/lib/Target/ARM/Thumb1FrameLowering.cpp
Modified: llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp?rev=274920&r1=274919&r2=274920&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/ARM/A15SDOptimizer.cpp Fri Jul 8 15:21:17 2016
@@ -693,7 +693,7 @@ bool A15SDOptimizer::runOnMachineFunctio
for (MachineBasicBlock::iterator MI = MFI->begin(), ME = MFI->end();
MI != ME;) {
- Modified |= runOnInstruction(MI++);
+ Modified |= runOnInstruction(&*MI++);
}
}
Modified: llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp?rev=274920&r1=274919&r2=274920&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp Fri Jul 8 15:21:17 2016
@@ -360,9 +360,9 @@ ARMBaseInstrInfo::AnalyzeBranch(MachineB
if (AllowModify) {
MachineBasicBlock::iterator DI = std::next(I);
while (DI != MBB.end()) {
- MachineInstr *InstToDelete = DI;
+ MachineInstr &InstToDelete = *DI;
++DI;
- InstToDelete->eraseFromParent();
+ InstToDelete.eraseFromParent();
}
}
}
@@ -1227,12 +1227,11 @@ unsigned ARMBaseInstrInfo::isLoadFromSta
/// \brief Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
/// depending on whether the result is used.
-void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MBBI) const {
+void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
bool isThumb1 = Subtarget.isThumb1Only();
bool isThumb2 = Subtarget.isThumb2();
const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo();
- MachineInstr *MI = MBBI;
DebugLoc dl = MI->getDebugLoc();
MachineBasicBlock *BB = MI->getParent();
@@ -1275,7 +1274,7 @@ void ARMBaseInstrInfo::expandMEMCPY(Mach
STM.addReg(Reg, RegState::Kill);
}
- BB->erase(MBBI);
+ BB->erase(MI);
}
Modified: llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp?rev=274920&r1=274919&r2=274920&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp Fri Jul 8 15:21:17 2016
@@ -722,14 +722,10 @@ unsigned ARMConstantIslands::getCPELogAl
/// information about the sizes of each block and the locations of all
/// the jump tables.
void ARMConstantIslands::scanFunctionJumpTables() {
- for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
-
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I)
- if (I->isBranch() && I->getOpcode() == ARM::t2BR_JT)
- T2JumpTables.push_back(I);
+ for (MachineBasicBlock &MBB : *MF) {
+ for (MachineInstr &I : MBB)
+ if (I.isBranch() && I.getOpcode() == ARM::t2BR_JT)
+ T2JumpTables.push_back(&I);
}
}
@@ -756,22 +752,18 @@ initializeFunctionInfo(const std::vector
adjustBBOffsetsAfter(&MF->front());
// Now go back through the instructions and build up our data structures.
- for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
-
+ for (MachineBasicBlock &MBB : *MF) {
// If this block doesn't fall through into the next MBB, then this is
// 'water' that a constant pool island could be placed.
if (!BBHasFallthrough(&MBB))
WaterList.push_back(&MBB);
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- if (I->isDebugValue())
+ for (MachineInstr &I : MBB) {
+ if (I.isDebugValue())
continue;
- unsigned Opc = I->getOpcode();
- if (I->isBranch()) {
+ unsigned Opc = I.getOpcode();
+ if (I.isBranch()) {
bool isCond = false;
unsigned Bits = 0;
unsigned Scale = 1;
@@ -780,7 +772,7 @@ initializeFunctionInfo(const std::vector
default:
continue; // Ignore other JT branches
case ARM::t2BR_JT:
- T2JumpTables.push_back(I);
+ T2JumpTables.push_back(&I);
continue; // Does not get an entry in ImmBranches
case ARM::Bcc:
isCond = true;
@@ -814,11 +806,11 @@ initializeFunctionInfo(const std::vector
// Record this immediate branch.
unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
- ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
+ ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
}
if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
- PushPopMIs.push_back(I);
+ PushPopMIs.push_back(&I);
if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
@@ -826,8 +818,8 @@ initializeFunctionInfo(const std::vector
continue;
// Scan the instructions for constant pool operands.
- for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
- if (I->getOperand(op).isCPI() || I->getOperand(op).isJTI()) {
+ for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
+ if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
// We found one. The addressing mode tells us the max displacement
// from the PC that this instruction permits.
@@ -886,15 +878,15 @@ initializeFunctionInfo(const std::vector
}
// Remember that this is a user of a CP entry.
- unsigned CPI = I->getOperand(op).getIndex();
- if (I->getOperand(op).isJTI()) {
+ unsigned CPI = I.getOperand(op).getIndex();
+ if (I.getOperand(op).isJTI()) {
JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
CPI = JumpTableEntryIndices[CPI];
}
MachineInstr *CPEMI = CPEMIs[CPI];
unsigned MaxOffs = ((1 << Bits)-1) * Scale;
- CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm));
+ CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
// Increment corresponding CPEntry reference count.
CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
@@ -917,15 +909,14 @@ void ARMConstantIslands::computeBlockSiz
BBI.Unalign = 0;
BBI.PostAlign = 0;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
- ++I) {
- BBI.Size += TII->GetInstSizeInBytes(*I);
+ for (MachineInstr &I : *MBB) {
+ BBI.Size += TII->GetInstSizeInBytes(I);
// For inline asm, GetInstSizeInBytes returns a conservative estimate.
// The actual size may be smaller, but still a multiple of the instr size.
- if (I->isInlineAsm())
+ if (I.isInlineAsm())
BBI.Unalign = isThumb ? 1 : 2;
// Also consider instructions that may be shrunk later.
- else if (isThumb && mayOptimizeThumb2Instruction(I))
+ else if (isThumb && mayOptimizeThumb2Instruction(&I))
BBI.Unalign = 1;
}
@@ -1472,7 +1463,7 @@ void ARMConstantIslands::createNewWater(
Offset < BaseInsertOffset;
Offset += TII->GetInstSizeInBytes(*MI), MI = std::next(MI)) {
assert(MI != UserMBB->end() && "Fell off end of block");
- if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == MI) {
+ if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
CPUser &U = CPUsers[CPUIndex];
if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
// Shift intertion point by one unit of alignment so it is within reach.
@@ -1489,7 +1480,7 @@ void ARMConstantIslands::createNewWater(
// Remember the last IT instruction.
if (MI->getOpcode() == ARM::t2IT)
- LastIT = MI;
+ LastIT = &*MI;
}
--MI;
@@ -1506,7 +1497,7 @@ void ARMConstantIslands::createNewWater(
DEBUG(unsigned PredReg;
assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL));
- NewMBB = splitBlockBeforeInstr(MI);
+ NewMBB = splitBlockBeforeInstr(&*MI);
}
/// handleConstantPoolUser - Analyze the specified user, checking to see if it
@@ -1627,7 +1618,7 @@ void ARMConstantIslands::removeDeadCPEMI
CPEBB->setAlignment(0);
} else
// Entries are sorted by descending alignment, so realign from the front.
- CPEBB->setAlignment(getCPELogAlign(CPEBB->begin()));
+ CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
adjustBBOffsetsAfter(CPEBB);
// An island has only one predecessor BB and one successor BB. Check if
Modified: llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp?rev=274920&r1=274919&r2=274920&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp Fri Jul 8 15:21:17 2016
@@ -1033,7 +1033,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBa
.addReg(JumpTarget.getReg(), RegState::Kill);
}
- MachineInstr *NewMI = std::prev(MBBI);
+ auto NewMI = std::prev(MBBI);
for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
NewMI->addOperand(MBBI->getOperand(i));
Modified: llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp?rev=274920&r1=274919&r2=274920&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMFrameLowering.cpp Fri Jul 8 15:21:17 2016
@@ -98,23 +98,22 @@ ARMFrameLowering::canSimplifyCallFramePs
return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
}
-static bool isCSRestore(MachineInstr *MI,
- const ARMBaseInstrInfo &TII,
+static bool isCSRestore(MachineInstr &MI, const ARMBaseInstrInfo &TII,
const MCPhysReg *CSRegs) {
// Integer spill area is handled with "pop".
- if (isPopOpcode(MI->getOpcode())) {
+ if (isPopOpcode(MI.getOpcode())) {
// The first two operands are predicates. The last two are
// imp-def and imp-use of SP. Check everything in between.
- for (int i = 5, e = MI->getNumOperands(); i != e; ++i)
- if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
+ for (int i = 5, e = MI.getNumOperands(); i != e; ++i)
+ if (!isCalleeSavedRegister(MI.getOperand(i).getReg(), CSRegs))
return false;
return true;
}
- if ((MI->getOpcode() == ARM::LDR_POST_IMM ||
- MI->getOpcode() == ARM::LDR_POST_REG ||
- MI->getOpcode() == ARM::t2LDR_POST) &&
- isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) &&
- MI->getOperand(1).getReg() == ARM::SP)
+ if ((MI.getOpcode() == ARM::LDR_POST_IMM ||
+ MI.getOpcode() == ARM::LDR_POST_REG ||
+ MI.getOpcode() == ARM::t2LDR_POST) &&
+ isCalleeSavedRegister(MI.getOperand(0).getReg(), CSRegs) &&
+ MI.getOperand(1).getReg() == ARM::SP)
return true;
return false;
@@ -143,9 +142,9 @@ static void emitSPUpdate(bool isARM, Mac
MIFlags, Pred, PredReg);
}
-static int sizeOfSPAdjustment(const MachineInstr *MI) {
+static int sizeOfSPAdjustment(const MachineInstr &MI) {
int RegSize;
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
case ARM::VSTMDDB_UPD:
RegSize = 8;
break;
@@ -163,7 +162,7 @@ static int sizeOfSPAdjustment(const Mach
int count = 0;
// ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
// pred) so the list starts at 4.
- for (int i = MI->getNumOperands() - 1; i >= 4; --i)
+ for (int i = MI.getNumOperands() - 1; i >= 4; --i)
count += RegSize;
return count;
}
@@ -415,7 +414,7 @@ void ARMFrameLowering::emitPrologue(Mach
// .cfi_offset operations will reflect that.
if (DPRGapSize) {
assert(DPRGapSize == 4 && "unexpected alignment requirements for DPRs");
- if (tryFoldSPUpdateIntoPushPop(STI, MF, LastPush, DPRGapSize))
+ if (tryFoldSPUpdateIntoPushPop(STI, MF, &*LastPush, DPRGapSize))
DefCFAOffsetCandidates.addExtraBytes(LastPush, DPRGapSize);
else {
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRGapSize,
@@ -429,7 +428,7 @@ void ARMFrameLowering::emitPrologue(Mach
// Since vpush register list cannot have gaps, there may be multiple vpush
// instructions in the prologue.
while (MBBI->getOpcode() == ARM::VSTMDDB_UPD) {
- DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(MBBI));
+ DefCFAOffsetCandidates.addInst(MBBI, sizeOfSPAdjustment(*MBBI));
LastPush = MBBI++;
}
}
@@ -493,7 +492,7 @@ void ARMFrameLowering::emitPrologue(Mach
if (NumBytes) {
// Adjust SP after all the callee-save spills.
if (AFI->getNumAlignedDPRCS2Regs() == 0 &&
- tryFoldSPUpdateIntoPushPop(STI, MF, LastPush, NumBytes))
+ tryFoldSPUpdateIntoPushPop(STI, MF, &*LastPush, NumBytes))
DefCFAOffsetCandidates.addExtraBytes(LastPush, NumBytes);
else {
emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes,
@@ -521,7 +520,7 @@ void ARMFrameLowering::emitPrologue(Mach
// that push.
if (HasFP) {
MachineBasicBlock::iterator AfterPush = std::next(GPRCS1Push);
- unsigned PushSize = sizeOfSPAdjustment(GPRCS1Push);
+ unsigned PushSize = sizeOfSPAdjustment(*GPRCS1Push);
emitRegPlusImmediate(!AFI->isThumbFunction(), MBB, AfterPush,
dl, TII, FramePtr, ARM::SP,
PushSize + FramePtrOffsetInPush,
@@ -726,8 +725,8 @@ void ARMFrameLowering::emitEpilogue(Mach
if (MBBI != MBB.begin()) {
do {
--MBBI;
- } while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
- if (!isCSRestore(MBBI, TII, CSRegs))
+ } while (MBBI != MBB.begin() && isCSRestore(*MBBI, TII, CSRegs));
+ if (!isCSRestore(*MBBI, TII, CSRegs))
++MBBI;
}
@@ -773,8 +772,8 @@ void ARMFrameLowering::emitEpilogue(Mach
.addReg(FramePtr));
}
} else if (NumBytes &&
- !tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
+ !tryFoldSPUpdateIntoPushPop(STI, MF, &*MBBI, NumBytes))
+ emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
// Increment past our save areas.
if (AFI->getDPRCalleeSavedAreaSize()) {
@@ -1748,9 +1747,9 @@ MachineBasicBlock::iterator ARMFrameLowe
// If we have alloca, convert as follows:
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
// ADJCALLSTACKUP -> add, sp, sp, amount
- MachineInstr *Old = I;
- DebugLoc dl = Old->getDebugLoc();
- unsigned Amount = Old->getOperand(0).getImm();
+ MachineInstr &Old = *I;
+ DebugLoc dl = Old.getDebugLoc();
+ unsigned Amount = Old.getOperand(0).getImm();
if (Amount != 0) {
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
@@ -1763,18 +1762,19 @@ MachineBasicBlock::iterator ARMFrameLowe
bool isARM = !AFI->isThumbFunction();
// Replace the pseudo instruction with a new instruction...
- unsigned Opc = Old->getOpcode();
- int PIdx = Old->findFirstPredOperandIdx();
- ARMCC::CondCodes Pred = (PIdx == -1)
- ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
+ unsigned Opc = Old.getOpcode();
+ int PIdx = Old.findFirstPredOperandIdx();
+ ARMCC::CondCodes Pred =
+ (PIdx == -1) ? ARMCC::AL
+ : (ARMCC::CondCodes)Old.getOperand(PIdx).getImm();
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
// Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
- unsigned PredReg = Old->getOperand(2).getReg();
+ unsigned PredReg = Old.getOperand(2).getReg();
emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, MachineInstr::NoFlags,
Pred, PredReg);
} else {
// Note: PredReg is operand 3 for ADJCALLSTACKUP.
- unsigned PredReg = Old->getOperand(3).getReg();
+ unsigned PredReg = Old.getOperand(3).getReg();
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
emitSPUpdate(isARM, MBB, I, dl, TII, Amount, MachineInstr::NoFlags,
Pred, PredReg);
Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=274920&r1=274919&r2=274920&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Fri Jul 8 15:21:17 2016
@@ -8099,7 +8099,7 @@ ARMTargetLowering::EmitStructByval(Machi
// Add epilogue to handle BytesLeft.
BB = exitMBB;
- MachineInstr *StartOfExit = exitMBB->begin();
+ auto StartOfExit = exitMBB->begin();
// [scratch, srcOut] = LDRB_POST(srcLoop, 1)
// [destOut] = STRB_POST(scratch, destLoop, 1)
Modified: llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp?rev=274920&r1=274919&r2=274920&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Fri Jul 8 15:21:17 2016
@@ -115,8 +115,8 @@ namespace {
MachineInstr *MI;
int Offset; ///< Load/Store offset.
unsigned Position; ///< Position as counted from end of basic block.
- MemOpQueueEntry(MachineInstr *MI, int Offset, unsigned Position)
- : MI(MI), Offset(Offset), Position(Position) {}
+ MemOpQueueEntry(MachineInstr &MI, int Offset, unsigned Position)
+ : MI(&MI), Offset(Offset), Position(Position) {}
};
typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
@@ -174,8 +174,8 @@ namespace {
INITIALIZE_PASS(ARMLoadStoreOpt, "arm-load-store-opt", ARM_LOAD_STORE_OPT_NAME, false, false)
-static bool definesCPSR(const MachineInstr *MI) {
- for (const auto &MO : MI->operands()) {
+static bool definesCPSR(const MachineInstr &MI) {
+ for (const auto &MO : MI.operands()) {
if (!MO.isReg())
continue;
if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
@@ -187,11 +187,11 @@ static bool definesCPSR(const MachineIns
return false;
}
-static int getMemoryOpOffset(const MachineInstr *MI) {
- unsigned Opcode = MI->getOpcode();
+static int getMemoryOpOffset(const MachineInstr &MI) {
+ unsigned Opcode = MI.getOpcode();
bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
- unsigned NumOperands = MI->getDesc().getNumOperands();
- unsigned OffField = MI->getOperand(NumOperands-3).getImm();
+ unsigned NumOperands = MI.getDesc().getNumOperands();
+ unsigned OffField = MI.getOperand(NumOperands - 3).getImm();
if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
@@ -491,7 +491,7 @@ void ARMLoadStoreOpt::UpdateBaseRegUses(
InsertSub = true;
} else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
- !definesCPSR(MBBI)) {
+ !definesCPSR(*MBBI)) {
// SUBS/ADDS using this register, with a dead def of the CPSR.
// Merge it with the update; if the merged offset is too large,
// insert a new sub instead.
@@ -515,7 +515,7 @@ void ARMLoadStoreOpt::UpdateBaseRegUses(
InsertSub = true;
}
- } else if (definesCPSR(MBBI) || MBBI->isCall() || MBBI->isBranch()) {
+ } else if (definesCPSR(*MBBI) || MBBI->isCall() || MBBI->isBranch()) {
// Since SUBS sets the condition flags, we can't place the base reset
// after an instruction that has a live CPSR def.
// The base register might also contain an argument for a function call.
@@ -854,7 +854,7 @@ MachineInstr *ARMLoadStoreOpt::MergeOpsU
MachineInstr *LatestMI = Cand.Instrs[Cand.LatestMIIdx];
iterator InsertBefore = std::next(iterator(LatestMI));
MachineBasicBlock &MBB = *LatestMI->getParent();
- unsigned Offset = getMemoryOpOffset(First);
+ unsigned Offset = getMemoryOpOffset(*First);
unsigned Base = getLoadStoreBaseOp(*First).getReg();
bool BaseKill = LatestMI->killsRegister(Base);
unsigned PredReg = 0;
@@ -1146,7 +1146,7 @@ static int isIncrementOrDecrement(const
MIPredReg != PredReg)
return 0;
- if (CheckCPSRDef && definesCPSR(&MI))
+ if (CheckCPSRDef && definesCPSR(MI))
return 0;
return MI.getOperand(2).getImm() * Scale;
}
@@ -1606,7 +1606,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairO
bool BaseUndef = BaseOp.isUndef();
bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
- int OffImm = getMemoryOpOffset(MI);
+ int OffImm = getMemoryOpOffset(*MI);
unsigned PredReg = 0;
ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
@@ -1715,13 +1715,13 @@ bool ARMLoadStoreOpt::LoadStoreMultipleO
unsigned Base = getLoadStoreBaseOp(*MBBI).getReg();
unsigned PredReg = 0;
ARMCC::CondCodes Pred = getInstrPredicate(*MBBI, PredReg);
- int Offset = getMemoryOpOffset(MBBI);
+ int Offset = getMemoryOpOffset(*MBBI);
if (CurrBase == 0) {
// Start of a new chain.
CurrBase = Base;
CurrOpc = Opcode;
CurrPred = Pred;
- MemOps.push_back(MemOpQueueEntry(MBBI, Offset, Position));
+ MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
continue;
}
// Note: No need to match PredReg in the next if.
@@ -1749,7 +1749,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleO
if (!Overlap) {
// Check offset and sort memory operation into the current chain.
if (Offset > MemOps.back().Offset) {
- MemOps.push_back(MemOpQueueEntry(MBBI, Offset, Position));
+ MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
continue;
} else {
MemOpQueue::iterator MI, ME;
@@ -1765,7 +1765,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleO
}
}
if (MI != MemOps.end()) {
- MemOps.insert(MI, MemOpQueueEntry(MBBI, Offset, Position));
+ MemOps.insert(MI, MemOpQueueEntry(*MBBI, Offset, Position));
continue;
}
}
@@ -1782,7 +1782,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleO
MBBI->getOpcode() == ARM::t2STRDi8) {
// ARMPreAllocLoadStoreOpt has already formed some LDRD/STRD instructions
// remember them because we may still be able to merge add/sub into them.
- MergeBaseCandidates.push_back(MBBI);
+ MergeBaseCandidates.push_back(&*MBBI);
}
@@ -1864,20 +1864,20 @@ bool ARMLoadStoreOpt::MergeReturnIntoLDM
// Ignore any DBG_VALUE instructions.
while (PrevI->isDebugValue() && PrevI != MBB.begin())
--PrevI;
- MachineInstr *PrevMI = PrevI;
- unsigned Opcode = PrevMI->getOpcode();
+ MachineInstr &PrevMI = *PrevI;
+ unsigned Opcode = PrevMI.getOpcode();
if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
- MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
+ MachineOperand &MO = PrevMI.getOperand(PrevMI.getNumOperands() - 1);
if (MO.getReg() != ARM::LR)
return false;
unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
- PrevMI->setDesc(TII->get(NewOpc));
+ PrevMI.setDesc(TII->get(NewOpc));
MO.setReg(ARM::PC);
- PrevMI->copyImplicitOps(*MBB.getParent(), *MBBI);
+ PrevMI.copyImplicitOps(*MBB.getParent(), *MBBI);
MBB.erase(MBBI);
return true;
}
@@ -2099,7 +2099,7 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWor
return false;
// Then make sure the immediate offset fits.
- int OffImm = getMemoryOpOffset(Op0);
+ int OffImm = getMemoryOpOffset(*Op0);
if (isT2) {
int Limit = (1 << 8) * Scale;
if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
@@ -2135,11 +2135,11 @@ bool ARMPreAllocLoadStoreOpt::Reschedule
// Sort by offset (in reverse order).
std::sort(Ops.begin(), Ops.end(),
[](const MachineInstr *LHS, const MachineInstr *RHS) {
- int LOffset = getMemoryOpOffset(LHS);
- int ROffset = getMemoryOpOffset(RHS);
- assert(LHS == RHS || LOffset != ROffset);
- return LOffset > ROffset;
- });
+ int LOffset = getMemoryOpOffset(*LHS);
+ int ROffset = getMemoryOpOffset(*RHS);
+ assert(LHS == RHS || LOffset != ROffset);
+ return LOffset > ROffset;
+ });
// The loads / stores of the same base are in order. Scan them from first to
// last and check for the following:
@@ -2171,7 +2171,7 @@ bool ARMPreAllocLoadStoreOpt::Reschedule
if (LastOpcode && LSMOpcode != LastOpcode)
break;
- int Offset = getMemoryOpOffset(Op);
+ int Offset = getMemoryOpOffset(*Op);
unsigned Bytes = getLSMultipleTransferSize(Op);
if (LastBytes) {
if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
@@ -2206,8 +2206,8 @@ bool ARMPreAllocLoadStoreOpt::Reschedule
} else {
// This is the new location for the loads / stores.
MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
- while (InsertPos != MBB->end()
- && (MemOps.count(InsertPos) || InsertPos->isDebugValue()))
+ while (InsertPos != MBB->end() &&
+ (MemOps.count(&*InsertPos) || InsertPos->isDebugValue()))
++InsertPos;
// If we are moving a pair of loads / stores, see if it makes sense
@@ -2302,25 +2302,25 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadS
MachineBasicBlock::iterator E = MBB->end();
while (MBBI != E) {
for (; MBBI != E; ++MBBI) {
- MachineInstr *MI = MBBI;
- if (MI->isCall() || MI->isTerminator()) {
+ MachineInstr &MI = *MBBI;
+ if (MI.isCall() || MI.isTerminator()) {
// Stop at barriers.
++MBBI;
break;
}
- if (!MI->isDebugValue())
- MI2LocMap[MI] = ++Loc;
+ if (!MI.isDebugValue())
+ MI2LocMap[&MI] = ++Loc;
- if (!isMemoryOp(*MI))
+ if (!isMemoryOp(MI))
continue;
unsigned PredReg = 0;
- if (getInstrPredicate(*MI, PredReg) != ARMCC::AL)
+ if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
continue;
- int Opc = MI->getOpcode();
+ int Opc = MI.getOpcode();
bool isLd = isLoadSingle(Opc);
- unsigned Base = MI->getOperand(1).getReg();
+ unsigned Base = MI.getOperand(1).getReg();
int Offset = getMemoryOpOffset(MI);
bool StopHere = false;
@@ -2329,15 +2329,15 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadS
Base2LdsMap.find(Base);
if (BI != Base2LdsMap.end()) {
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
- if (Offset == getMemoryOpOffset(BI->second[i])) {
+ if (Offset == getMemoryOpOffset(*BI->second[i])) {
StopHere = true;
break;
}
}
if (!StopHere)
- BI->second.push_back(MI);
+ BI->second.push_back(&MI);
} else {
- Base2LdsMap[Base].push_back(MI);
+ Base2LdsMap[Base].push_back(&MI);
LdBases.push_back(Base);
}
} else {
@@ -2345,15 +2345,15 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadS
Base2StsMap.find(Base);
if (BI != Base2StsMap.end()) {
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
- if (Offset == getMemoryOpOffset(BI->second[i])) {
+ if (Offset == getMemoryOpOffset(*BI->second[i])) {
StopHere = true;
break;
}
}
if (!StopHere)
- BI->second.push_back(MI);
+ BI->second.push_back(&MI);
} else {
- Base2StsMap[Base].push_back(MI);
+ Base2StsMap[Base].push_back(&MI);
StBases.push_back(Base);
}
}
Modified: llvm/trunk/lib/Target/ARM/Thumb1FrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/Thumb1FrameLowering.cpp?rev=274920&r1=274919&r2=274920&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/Thumb1FrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/Thumb1FrameLowering.cpp Fri Jul 8 15:21:17 2016
@@ -59,9 +59,9 @@ eliminateCallFramePseudoInstr(MachineFun
// If we have alloca, convert as follows:
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
// ADJCALLSTACKUP -> add, sp, sp, amount
- MachineInstr *Old = I;
- DebugLoc dl = Old->getDebugLoc();
- unsigned Amount = Old->getOperand(0).getImm();
+ MachineInstr &Old = *I;
+ DebugLoc dl = Old.getDebugLoc();
+ unsigned Amount = Old.getOperand(0).getImm();
if (Amount != 0) {
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
@@ -70,7 +70,7 @@ eliminateCallFramePseudoInstr(MachineFun
Amount = (Amount+Align-1)/Align*Align;
// Replace the pseudo instruction with a new instruction...
- unsigned Opc = Old->getOpcode();
+ unsigned Opc = Old.getOpcode();
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
emitSPUpdate(MBB, I, TII, dl, *RegInfo, -Amount);
} else {
@@ -188,7 +188,7 @@ void Thumb1FrameLowering::emitPrologue(M
int FramePtrOffsetInBlock = 0;
unsigned adjustedGPRCS1Size = GPRCS1Size;
- if (tryFoldSPUpdateIntoPushPop(STI, MF, std::prev(MBBI), NumBytes)) {
+ if (tryFoldSPUpdateIntoPushPop(STI, MF, &*std::prev(MBBI), NumBytes)) {
FramePtrOffsetInBlock = NumBytes;
adjustedGPRCS1Size += NumBytes;
NumBytes = 0;
@@ -303,16 +303,15 @@ void Thumb1FrameLowering::emitPrologue(M
AFI->setShouldRestoreSPFromFP(true);
}
-static bool isCSRestore(MachineInstr *MI, const MCPhysReg *CSRegs) {
- if (MI->getOpcode() == ARM::tLDRspi &&
- MI->getOperand(1).isFI() &&
- isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs))
+static bool isCSRestore(MachineInstr &MI, const MCPhysReg *CSRegs) {
+ if (MI.getOpcode() == ARM::tLDRspi && MI.getOperand(1).isFI() &&
+ isCalleeSavedRegister(MI.getOperand(0).getReg(), CSRegs))
return true;
- else if (MI->getOpcode() == ARM::tPOP) {
+ else if (MI.getOpcode() == ARM::tPOP) {
// The first two operands are predicates. The last two are
// imp-def and imp-use of SP. Check everything in between.
- for (int i = 2, e = MI->getNumOperands() - 2; i != e; ++i)
- if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
+ for (int i = 2, e = MI.getNumOperands() - 2; i != e; ++i)
+ if (!isCalleeSavedRegister(MI.getOperand(i).getReg(), CSRegs))
return false;
return true;
}
@@ -345,8 +344,8 @@ void Thumb1FrameLowering::emitEpilogue(M
if (MBBI != MBB.begin()) {
do
--MBBI;
- while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
- if (!isCSRestore(MBBI, CSRegs))
+ while (MBBI != MBB.begin() && isCSRestore(*MBBI, CSRegs));
+ if (!isCSRestore(*MBBI, CSRegs))
++MBBI;
}
@@ -375,11 +374,11 @@ void Thumb1FrameLowering::emitEpilogue(M
.addReg(FramePtr));
} else {
if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tBX_RET &&
- &MBB.front() != MBBI && std::prev(MBBI)->getOpcode() == ARM::tPOP) {
+ &MBB.front() != &*MBBI && std::prev(MBBI)->getOpcode() == ARM::tPOP) {
MachineBasicBlock::iterator PMBBI = std::prev(MBBI);
- if (!tryFoldSPUpdateIntoPushPop(STI, MF, PMBBI, NumBytes))
+ if (!tryFoldSPUpdateIntoPushPop(STI, MF, &*PMBBI, NumBytes))
emitSPUpdate(MBB, PMBBI, TII, dl, *RegInfo, NumBytes);
- } else if (!tryFoldSPUpdateIntoPushPop(STI, MF, MBBI, NumBytes))
+ } else if (!tryFoldSPUpdateIntoPushPop(STI, MF, &*MBBI, NumBytes))
emitSPUpdate(MBB, MBBI, TII, dl, *RegInfo, NumBytes);
}
}
More information about the llvm-commits
mailing list