[llvm] r243894 - Convert some AArch64 code to foreach loops. NFC.
Pete Cooper
peter_cooper at apple.com
Mon Aug 3 12:04:32 PDT 2015
Author: pete
Date: Mon Aug 3 14:04:32 2015
New Revision: 243894
URL: http://llvm.org/viewvc/llvm-project?rev=243894&view=rev
Log:
Convert some AArch64 code to foreach loops. NFC.
Also converted a cast<> to dyn_cast while i was working on the same
line of code.
Modified:
llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp
llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp
Modified: llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp?rev=243894&r1=243893&r2=243894&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ConditionalCompares.cpp Mon Aug 3 14:04:32 2015
@@ -786,13 +786,13 @@ void AArch64ConditionalCompares::updateD
// convert() removes CmpBB which was previously dominated by Head.
// CmpBB children should be transferred to Head.
MachineDomTreeNode *HeadNode = DomTree->getNode(CmpConv.Head);
- for (unsigned i = 0, e = Removed.size(); i != e; ++i) {
- MachineDomTreeNode *Node = DomTree->getNode(Removed[i]);
+ for (MachineBasicBlock *RemovedMBB : Removed) {
+ MachineDomTreeNode *Node = DomTree->getNode(RemovedMBB);
assert(Node != HeadNode && "Cannot erase the head node");
assert(Node->getIDom() == HeadNode && "CmpBB should be dominated by Head");
while (Node->getNumChildren())
DomTree->changeImmediateDominator(Node->getChildren().back(), HeadNode);
- DomTree->eraseNode(Removed[i]);
+ DomTree->eraseNode(RemovedMBB);
}
}
@@ -801,8 +801,8 @@ void
AArch64ConditionalCompares::updateLoops(ArrayRef<MachineBasicBlock *> Removed) {
if (!Loops)
return;
- for (unsigned i = 0, e = Removed.size(); i != e; ++i)
- Loops->removeBlock(Removed[i]);
+ for (MachineBasicBlock *RemovedMBB : Removed)
+ Loops->removeBlock(RemovedMBB);
}
/// Invalidate MachineTraceMetrics before if-conversion.
Modified: llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp?rev=243894&r1=243893&r2=243894&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp Mon Aug 3 14:04:32 2015
@@ -2954,8 +2954,7 @@ bool AArch64FastISel::processCallArgs(Ca
.addImm(NumBytes);
// Process the args.
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
+ for (CCValAssign &VA : ArgLocs) {
const Value *ArgVal = CLI.OutVals[VA.getValNo()];
MVT ArgVT = OutVTs[VA.getValNo()];
@@ -3763,8 +3762,8 @@ bool AArch64FastISel::selectRet(const In
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::RET_ReallyLR));
- for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
- MIB.addReg(RetRegs[i], RegState::Implicit);
+ for (unsigned RetReg : RetRegs)
+ MIB.addReg(RetReg, RegState::Implicit);
return true;
}
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=243894&r1=243893&r2=243894&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Aug 3 14:04:32 2015
@@ -2690,8 +2690,8 @@ bool AArch64TargetLowering::isEligibleFo
*DAG.getContext());
CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true));
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
- if (!ArgLocs[i].isRegLoc())
+ for (const CCValAssign &ArgLoc : ArgLocs)
+ if (!ArgLoc.isRegLoc())
return false;
}
@@ -3039,9 +3039,9 @@ AArch64TargetLowering::LowerCall(CallLow
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs.
SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
+ for (auto &RegToPass : RegsToPass) {
+ Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first,
+ RegToPass.second, InFlag);
InFlag = Chain.getValue(1);
}
@@ -3097,9 +3097,9 @@ AArch64TargetLowering::LowerCall(CallLow
// Add argument registers to the end of the list so that they are known live
// into the call.
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
- Ops.push_back(DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
+ for (auto &RegToPass : RegsToPass)
+ Ops.push_back(DAG.getRegister(RegToPass.first,
+ RegToPass.second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
const uint32_t *Mask;
@@ -5894,11 +5894,10 @@ static SDValue NormalizeBuildVector(SDVa
return Op;
SmallVector<SDValue, 16> Ops;
- for (unsigned I = 0, E = VT.getVectorNumElements(); I != E; ++I) {
- SDValue Lane = Op.getOperand(I);
- if (Lane.getOpcode() == ISD::Constant) {
+ for (SDValue Lane : Op->ops()) {
+ if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
APInt LowBits(EltTy.getSizeInBits(),
- cast<ConstantSDNode>(Lane)->getZExtValue());
+ CstLane->getZExtValue());
Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
}
Ops.push_back(Lane);
Modified: llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp?rev=243894&r1=243893&r2=243894&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp Mon Aug 3 14:04:32 2015
@@ -446,11 +446,10 @@ AArch64LoadStoreOpt::mergePairedInsns(Ma
/// trackRegDefsUses - Remember what registers the specified instruction uses
/// and modifies.
-static void trackRegDefsUses(MachineInstr *MI, BitVector &ModifiedRegs,
+static void trackRegDefsUses(const MachineInstr *MI, BitVector &ModifiedRegs,
BitVector &UsedRegs,
const TargetRegisterInfo *TRI) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ for (const MachineOperand &MO : MI->operands()) {
if (MO.isRegMask())
ModifiedRegs.setBitsNotInMask(MO.getRegMask());
Modified: llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp?rev=243894&r1=243893&r2=243894&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64MCInstLower.cpp Mon Aug 3 14:04:32 2015
@@ -207,9 +207,9 @@ bool AArch64MCInstLower::lowerOperand(co
void AArch64MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.setOpcode(MI->getOpcode());
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ for (const MachineOperand &MO : MI->operands()) {
MCOperand MCOp;
- if (lowerOperand(MI->getOperand(i), MCOp))
+ if (lowerOperand(MO, MCOp))
OutMI.addOperand(MCOp);
}
}
More information about the llvm-commits
mailing list