[llvm-commits] CVS: llvm/lib/Target/ARM/ARMAsmPrinter.cpp ARMConstantIslandPass.cpp ARMISelDAGToDAG.cpp ARMInstrInfo.cpp ARMInstrInfo.td ARMInstrThumb.td ARMInstrVFP.td ARMLoadStoreOptimizer.cpp ARMRegisterInfo.cpp
Evan Cheng
evan.cheng at apple.com
Mon May 14 18:29:25 PDT 2007
Changes in directory llvm/lib/Target/ARM:
ARMAsmPrinter.cpp updated: 1.73 -> 1.74
ARMConstantIslandPass.cpp updated: 1.42 -> 1.43
ARMISelDAGToDAG.cpp updated: 1.111 -> 1.112
ARMInstrInfo.cpp updated: 1.23 -> 1.24
ARMInstrInfo.td updated: 1.100 -> 1.101
ARMInstrThumb.td updated: 1.26 -> 1.27
ARMInstrVFP.td updated: 1.5 -> 1.6
ARMLoadStoreOptimizer.cpp updated: 1.11 -> 1.12
ARMRegisterInfo.cpp updated: 1.95 -> 1.96
---
Log message:
Add PredicateOperand to all ARM instructions that have the condition field.
---
Diffs of the changes: (+495 -363)
ARMAsmPrinter.cpp | 5
ARMConstantIslandPass.cpp | 12 -
ARMISelDAGToDAG.cpp | 40 +++--
ARMInstrInfo.cpp | 37 ++--
ARMInstrInfo.td | 361 ++++++++++++++++++++++++----------------------
ARMInstrThumb.td | 14 +
ARMInstrVFP.td | 168 ++++++++++++---------
ARMLoadStoreOptimizer.cpp | 96 +++++++-----
ARMRegisterInfo.cpp | 125 +++++++++------
9 files changed, 495 insertions(+), 363 deletions(-)
Index: llvm/lib/Target/ARM/ARMAsmPrinter.cpp
diff -u llvm/lib/Target/ARM/ARMAsmPrinter.cpp:1.73 llvm/lib/Target/ARM/ARMAsmPrinter.cpp:1.74
--- llvm/lib/Target/ARM/ARMAsmPrinter.cpp:1.73 Tue May 8 16:08:43 2007
+++ llvm/lib/Target/ARM/ARMAsmPrinter.cpp Mon May 14 20:29:07 2007
@@ -614,8 +614,9 @@
}
void ARMAsmPrinter::printPredicateOperand(const MachineInstr *MI, int opNum) {
- int CC = (int)MI->getOperand(opNum).getImmedValue();
- O << ARMCondCodeToString((ARMCC::CondCodes)CC);
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(opNum).getImmedValue();
+ if (CC != ARMCC::AL)
+ O << ARMCondCodeToString(CC);
}
void ARMAsmPrinter::printPCLabel(const MachineInstr *MI, int opNum) {
Index: llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
diff -u llvm/lib/Target/ARM/ARMConstantIslandPass.cpp:1.42 llvm/lib/Target/ARM/ARMConstantIslandPass.cpp:1.43
--- llvm/lib/Target/ARM/ARMConstantIslandPass.cpp:1.42 Wed May 2 20:11:53 2007
+++ llvm/lib/Target/ARM/ARMConstantIslandPass.cpp Mon May 14 20:29:07 2007
@@ -1265,12 +1265,12 @@
bool MadeChange = false;
for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
MachineInstr *MI = PushPopMIs[i];
- if (MI->getNumOperands() == 1) {
- if (MI->getOpcode() == ARM::tPOP_RET &&
- MI->getOperand(0).getReg() == ARM::PC)
- BuildMI(MI->getParent(), TII->get(ARM::tBX_RET));
- MI->eraseFromParent();
- MadeChange = true;
+ if (MI->getOpcode() == ARM::tPOP_RET &&
+ MI->getOperand(0).getReg() == ARM::PC &&
+ MI->getNumExplicitOperands() == 1) {
+ BuildMI(MI->getParent(), TII->get(ARM::tBX_RET));
+ MI->eraseFromParent();
+ MadeChange = true;
}
}
return MadeChange;
Index: llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
diff -u llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp:1.111 llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp:1.112
--- llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp:1.111 Wed May 2 19:32:00 2007
+++ llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp Mon May 14 20:29:07 2007
@@ -524,6 +524,11 @@
return true;
}
+/// getDefaultPred - Returns a ARMCC::AL immediate node.
+static inline SDOperand getDefaultPred(SelectionDAG *CurDAG) {
+ return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
+}
+
SDNode *ARMDAGToDAGISel::Select(SDOperand Op) {
SDNode *N = Op.Val;
@@ -559,9 +564,10 @@
CPIdx,
CurDAG->getRegister(0, MVT::i32),
CurDAG->getTargetConstant(0, MVT::i32),
+ getDefaultPred(CurDAG),
CurDAG->getEntryNode()
};
- ResNode=CurDAG->getTargetNode(ARM::LDRcp, MVT::i32, MVT::Other, Ops, 4);
+ ResNode=CurDAG->getTargetNode(ARM::LDRcp, MVT::i32, MVT::Other, Ops, 5);
}
ReplaceUses(Op, SDOperand(ResNode, 0));
return NULL;
@@ -573,10 +579,14 @@
case ISD::FrameIndex: {
// Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
int FI = cast<FrameIndexSDNode>(N)->getIndex();
- unsigned Opc = Subtarget->isThumb() ? ARM::tADDrSPi : ARM::ADDri;
SDOperand TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, TFI,
- CurDAG->getTargetConstant(0, MVT::i32));
+ if (Subtarget->isThumb())
+ return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
+ CurDAG->getTargetConstant(0, MVT::i32));
+ else
+ return CurDAG->SelectNodeTo(N, ARM::ADDri, MVT::i32, TFI,
+ CurDAG->getTargetConstant(0, MVT::i32),
+ getDefaultPred(CurDAG));
}
case ISD::ADD: {
// Select add sp, c to tADDhirr.
@@ -606,35 +616,39 @@
AddToISelQueue(V);
unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV-1));
SDOperand Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
- CurDAG->getTargetConstant(ShImm, MVT::i32)
+ CurDAG->getTargetConstant(ShImm, MVT::i32),
+ getDefaultPred(CurDAG)
};
- return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 5);
}
if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
SDOperand V = Op.getOperand(0);
AddToISelQueue(V);
unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV+1));
SDOperand Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
- CurDAG->getTargetConstant(ShImm, MVT::i32)
+ CurDAG->getTargetConstant(ShImm, MVT::i32),
+ getDefaultPred(CurDAG)
};
- return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 4);
+ return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 5);
}
}
break;
case ARMISD::FMRRD:
AddToISelQueue(Op.getOperand(0));
return CurDAG->getTargetNode(ARM::FMRRD, MVT::i32, MVT::i32,
- Op.getOperand(0));
+ Op.getOperand(0), getDefaultPred(CurDAG));
case ARMISD::MULHILOU:
AddToISelQueue(Op.getOperand(0));
AddToISelQueue(Op.getOperand(1));
return CurDAG->getTargetNode(ARM::UMULL, MVT::i32, MVT::i32,
- Op.getOperand(0), Op.getOperand(1));
+ Op.getOperand(0), Op.getOperand(1),
+ getDefaultPred(CurDAG));
case ARMISD::MULHILOS:
AddToISelQueue(Op.getOperand(0));
AddToISelQueue(Op.getOperand(1));
return CurDAG->getTargetNode(ARM::SMULL, MVT::i32, MVT::i32,
- Op.getOperand(0), Op.getOperand(1));
+ Op.getOperand(0), Op.getOperand(1),
+ getDefaultPred(CurDAG));
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(Op);
ISD::MemIndexedMode AM = LD->getAddressingMode();
@@ -674,9 +688,9 @@
AddToISelQueue(Chain);
AddToISelQueue(Base);
AddToISelQueue(Offset);
- SDOperand Ops[] = { Base, Offset, AMOpc, Chain };
+ SDOperand Ops[]= { Base, Offset, AMOpc, getDefaultPred(CurDAG), Chain };
return CurDAG->getTargetNode(Opcode, MVT::i32, MVT::i32,
- MVT::Other, Ops, 4);
+ MVT::Other, Ops, 5);
}
}
// Other cases are autogenerated.
Index: llvm/lib/Target/ARM/ARMInstrInfo.cpp
diff -u llvm/lib/Target/ARM/ARMInstrInfo.cpp:1.23 llvm/lib/Target/ARM/ARMInstrInfo.cpp:1.24
--- llvm/lib/Target/ARM/ARMInstrInfo.cpp:1.23 Sun Apr 29 14:19:30 2007
+++ llvm/lib/Target/ARM/ARMInstrInfo.cpp Mon May 14 20:29:07 2007
@@ -52,7 +52,8 @@
return true;
case ARM::MOVr:
case ARM::tMOVr:
- assert(MI.getNumOperands() >= 2 && MI.getOperand(0).isRegister() &&
+ assert(MI.getInstrDescriptor()->numOperands >= 2 &&
+ MI.getOperand(0).isRegister() &&
MI.getOperand(1).isRegister() &&
"Invalid ARM MOV instruction");
SrcReg = MI.getOperand(1).getReg();
@@ -188,15 +189,17 @@
MachineInstr *UpdateMI = NULL;
MachineInstr *MemMI = NULL;
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
- unsigned NumOps = MI->getNumOperands();
- bool isLoad = (MI->getInstrDescriptor()->Flags & M_LOAD_FLAG) != 0;
+ const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
+ unsigned NumOps = TID->numOperands;
+ bool isLoad = (TID->Flags & M_LOAD_FLAG) != 0;
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
const MachineOperand &Base = MI->getOperand(2);
- const MachineOperand &Offset = MI->getOperand(NumOps-2);
+ const MachineOperand &Offset = MI->getOperand(NumOps-3);
unsigned WBReg = WB.getReg();
unsigned BaseReg = Base.getReg();
unsigned OffReg = Offset.getReg();
- unsigned OffImm = MI->getOperand(NumOps-1).getImm();
+ unsigned OffImm = MI->getOperand(NumOps-2).getImm();
+ ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
switch (AddrMode) {
default:
assert(false && "Unknown indexed op!");
@@ -211,15 +214,15 @@
// add more than 1 instruction. Abandon!
return NULL;
UpdateMI = BuildMI(get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
- .addReg(BaseReg).addImm(SOImmVal);
+ .addReg(BaseReg).addImm(SOImmVal).addImm(Pred);
} else if (Amt != 0) {
ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
UpdateMI = BuildMI(get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
- .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc);
+ .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc).addImm(Pred);
} else
UpdateMI = BuildMI(get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
- .addReg(BaseReg).addReg(OffReg);
+ .addReg(BaseReg).addReg(OffReg).addImm(Pred);
break;
}
case ARMII::AddrMode3 : {
@@ -228,10 +231,10 @@
if (OffReg == 0)
// Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
UpdateMI = BuildMI(get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
- .addReg(BaseReg).addImm(Amt);
+ .addReg(BaseReg).addImm(Amt).addImm(Pred);
else
UpdateMI = BuildMI(get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
- .addReg(BaseReg).addReg(OffReg);
+ .addReg(BaseReg).addReg(OffReg).addImm(Pred);
break;
}
}
@@ -240,19 +243,19 @@
if (isPre) {
if (isLoad)
MemMI = BuildMI(get(MemOpc), MI->getOperand(0).getReg())
- .addReg(WBReg).addReg(0).addImm(0);
+ .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
else
MemMI = BuildMI(get(MemOpc)).addReg(MI->getOperand(1).getReg())
- .addReg(WBReg).addReg(0).addImm(0);
+ .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
NewMIs.push_back(MemMI);
NewMIs.push_back(UpdateMI);
} else {
if (isLoad)
MemMI = BuildMI(get(MemOpc), MI->getOperand(0).getReg())
- .addReg(BaseReg).addReg(0).addImm(0);
+ .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
else
MemMI = BuildMI(get(MemOpc)).addReg(MI->getOperand(1).getReg())
- .addReg(BaseReg).addReg(0).addImm(0);
+ .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
if (WB.isDead())
UpdateMI->getOperand(0).setIsDead();
NewMIs.push_back(UpdateMI);
@@ -437,7 +440,8 @@
const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
// Basic size info comes from the TSFlags field.
- unsigned TSFlags = MI->getInstrDescriptor()->TSFlags;
+ const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
+ unsigned TSFlags = TID->TSFlags;
switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
default:
@@ -463,7 +467,8 @@
case ARM::tBR_JTr: {
// These are jumptable branches, i.e. a branch followed by an inlined
// jumptable. The size is 4 + 4 * number of entries.
- unsigned JTI = MI->getOperand(MI->getNumOperands()-2).getJumpTableIndex();
+ unsigned NumOps = TID->numOperands;
+ unsigned JTI = MI->getOperand(NumOps-3).getJumpTableIndex();
MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
assert(JTI < JT.size());
Index: llvm/lib/Target/ARM/ARMInstrInfo.td
diff -u llvm/lib/Target/ARM/ARMInstrInfo.td:1.100 llvm/lib/Target/ARM/ARMInstrInfo.td:1.101
--- llvm/lib/Target/ARM/ARMInstrInfo.td:1.100 Tue May 8 16:08:43 2007
+++ llvm/lib/Target/ARM/ARMInstrInfo.td Mon May 14 20:29:07 2007
@@ -340,7 +340,7 @@
}
class InstARM<bits<4> opcod, AddrMode am, SizeFlagVal sz, IndexMode im,
- dag ops, string asmstr, string cstr>
+ string cstr>
: Instruction {
let Namespace = "ARM";
@@ -354,58 +354,59 @@
IndexMode IM = im;
bits<2> IndexModeBits = IM.Value;
- dag OperandList = ops;
- let AsmString = asmstr;
let Constraints = cstr;
}
class PseudoInst<dag ops, string asm, list<dag> pattern>
- : InstARM<0, AddrModeNone, SizeSpecial, IndexModeNone, ops, asm, ""> {
+ : InstARM<0, AddrModeNone, SizeSpecial, IndexModeNone, ""> {
+ let OperandList = ops;
+ let AsmString = asm;
let Pattern = pattern;
}
-class I<dag ops, AddrMode am, SizeFlagVal sz, IndexMode im,
- string asm, string cstr, list<dag> pattern>
+// Almost all ARM instructions are predicatable.
+class I<dag oprnds, AddrMode am, SizeFlagVal sz, IndexMode im,
+ string opc, string asm, string cstr, list<dag> pattern>
// FIXME: Set all opcodes to 0 for now.
- : InstARM<0, am, sz, im, ops, asm, cstr> {
+ : InstARM<0, am, sz, im, cstr> {
+ let OperandList = !con(oprnds, (ops pred:$p));
+ let AsmString = !strconcat(opc, !strconcat("$p", asm));
let Pattern = pattern;
list<Predicate> Predicates = [IsARM];
}
-class AI<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrModeNone, Size4Bytes, IndexModeNone, asm, "", pattern>;
-class AI1<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode1, Size4Bytes, IndexModeNone, asm, "", pattern>;
-class AI2<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode2, Size4Bytes, IndexModeNone, asm, "", pattern>;
-class AI3<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode3, Size4Bytes, IndexModeNone, asm, "", pattern>;
-class AI4<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode4, Size4Bytes, IndexModeNone, asm, "", pattern>;
-class AIx2<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrModeNone, Size8Bytes, IndexModeNone, asm, "", pattern>;
-class AI1x2<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode1, Size8Bytes, IndexModeNone, asm, "", pattern>;
+class AI<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrModeNone, Size4Bytes, IndexModeNone, opc, asm, "", pattern>;
+class AI1<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode1, Size4Bytes, IndexModeNone, opc, asm, "", pattern>;
+class AI2<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode2, Size4Bytes, IndexModeNone, opc, asm, "", pattern>;
+class AI3<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode3, Size4Bytes, IndexModeNone, opc, asm, "", pattern>;
+class AI4<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode4, Size4Bytes, IndexModeNone, opc, asm, "", pattern>;
+class AI1x2<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode1, Size8Bytes, IndexModeNone, opc, asm, "", pattern>;
// Pre-indexed ops
-class AI2pr<dag ops, string asm, string cstr, list<dag> pattern>
- : I<ops, AddrMode2, Size4Bytes, IndexModePre, asm, cstr, pattern>;
-class AI3pr<dag ops, string asm, string cstr, list<dag> pattern>
- : I<ops, AddrMode3, Size4Bytes, IndexModePre, asm, cstr, pattern>;
+class AI2pr<dag ops, string opc, string asm, string cstr, list<dag> pattern>
+ : I<ops, AddrMode2, Size4Bytes, IndexModePre, opc, asm, cstr, pattern>;
+class AI3pr<dag ops, string opc, string asm, string cstr, list<dag> pattern>
+ : I<ops, AddrMode3, Size4Bytes, IndexModePre, opc, asm, cstr, pattern>;
// Post-indexed ops
-class AI2po<dag ops, string asm, string cstr, list<dag> pattern>
- : I<ops, AddrMode2, Size4Bytes, IndexModePost, asm, cstr, pattern>;
-class AI3po<dag ops, string asm, string cstr, list<dag> pattern>
- : I<ops, AddrMode3, Size4Bytes, IndexModePost, asm, cstr, pattern>;
+class AI2po<dag ops, string opc, string asm, string cstr, list<dag> pattern>
+ : I<ops, AddrMode2, Size4Bytes, IndexModePost, opc, asm, cstr, pattern>;
+class AI3po<dag ops, string opc, string asm, string cstr, list<dag> pattern>
+ : I<ops, AddrMode3, Size4Bytes, IndexModePost, opc, asm, cstr, pattern>;
// BR_JT instructions
-class JTI<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrModeNone, SizeSpecial, IndexModeNone, asm, "", pattern>;
-class JTI1<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode1, SizeSpecial, IndexModeNone, asm, "", pattern>;
-class JTI2<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode2, SizeSpecial, IndexModeNone, asm, "", pattern>;
+class JTI<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrModeNone, SizeSpecial, IndexModeNone, opc, asm, "", pattern>;
+class JTI1<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode1, SizeSpecial, IndexModeNone, opc, asm, "", pattern>;
+class JTI2<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode2, SizeSpecial, IndexModeNone, opc, asm, "", pattern>;
class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
@@ -416,13 +417,13 @@
/// binop that produces a value.
multiclass AI1_bin_irs<string opc, PatFrag opnode> {
def ri : AI1<(ops GPR:$dst, GPR:$a, so_imm:$b),
- !strconcat(opc, " $dst, $a, $b"),
+ opc, " $dst, $a, $b",
[(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]>;
def rr : AI1<(ops GPR:$dst, GPR:$a, GPR:$b),
- !strconcat(opc, " $dst, $a, $b"),
+ opc, " $dst, $a, $b",
[(set GPR:$dst, (opnode GPR:$a, GPR:$b))]>;
def rs : AI1<(ops GPR:$dst, GPR:$a, so_reg:$b),
- !strconcat(opc, " $dst, $a, $b"),
+ opc, " $dst, $a, $b",
[(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]>;
}
@@ -430,23 +431,23 @@
/// Similar to AI1_bin_irs except the instruction does not produce a result.
multiclass AI1_bin0_irs<string opc, PatFrag opnode> {
def ri : AI1<(ops GPR:$a, so_imm:$b),
- !strconcat(opc, " $a, $b"),
+ opc, " $a, $b",
[(opnode GPR:$a, so_imm:$b)]>;
def rr : AI1<(ops GPR:$a, GPR:$b),
- !strconcat(opc, " $a, $b"),
+ opc, " $a, $b",
[(opnode GPR:$a, GPR:$b)]>;
def rs : AI1<(ops GPR:$a, so_reg:$b),
- !strconcat(opc, " $a, $b"),
+ opc, " $a, $b",
[(opnode GPR:$a, so_reg:$b)]>;
}
/// AI1_bin_is - Defines a set of (op r, {so_imm|so_reg}) patterns for a binop.
multiclass AI1_bin_is<string opc, PatFrag opnode> {
def ri : AI1<(ops GPR:$dst, GPR:$a, so_imm:$b),
- !strconcat(opc, " $dst, $a, $b"),
+ opc, " $dst, $a, $b",
[(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]>;
def rs : AI1<(ops GPR:$dst, GPR:$a, so_reg:$b),
- !strconcat(opc, " $dst, $a, $b"),
+ opc, " $dst, $a, $b",
[(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]>;
}
@@ -454,13 +455,13 @@
/// ops.
multiclass AI1_unary_irs<string opc, PatFrag opnode> {
def i : AI1<(ops GPR:$dst, so_imm:$a),
- !strconcat(opc, " $dst, $a"),
+ opc, " $dst, $a",
[(set GPR:$dst, (opnode so_imm:$a))]>;
def r : AI1<(ops GPR:$dst, GPR:$a),
- !strconcat(opc, " $dst, $a"),
+ opc, " $dst, $a",
[(set GPR:$dst, (opnode GPR:$a))]>;
def s : AI1<(ops GPR:$dst, so_reg:$a),
- !strconcat(opc, " $dst, $a"),
+ opc, " $dst, $a",
[(set GPR:$dst, (opnode so_reg:$a))]>;
}
@@ -468,10 +469,10 @@
/// register and one whose operand is a register rotated by 8/16/24.
multiclass AI_unary_rrot<string opc, PatFrag opnode> {
def r : AI<(ops GPR:$dst, GPR:$Src),
- !strconcat(opc, " $dst, $Src"),
+ opc, " $dst, $Src",
[(set GPR:$dst, (opnode GPR:$Src))]>, Requires<[IsARM, HasV6]>;
def r_rot : AI<(ops GPR:$dst, GPR:$Src, i32imm:$rot),
- !strconcat(opc, " $dst, $Src, ror $rot"),
+ opc, " $dst, $Src, ror $rot",
[(set GPR:$dst, (opnode (rotr GPR:$Src, rot_imm:$rot)))]>,
Requires<[IsARM, HasV6]>;
}
@@ -480,16 +481,39 @@
/// register and one whose operand is a register rotated by 8/16/24.
multiclass AI_bin_rrot<string opc, PatFrag opnode> {
def rr : AI<(ops GPR:$dst, GPR:$LHS, GPR:$RHS),
- !strconcat(opc, " $dst, $LHS, $RHS"),
+ opc, " $dst, $LHS, $RHS",
[(set GPR:$dst, (opnode GPR:$LHS, GPR:$RHS))]>,
Requires<[IsARM, HasV6]>;
def rr_rot : AI<(ops GPR:$dst, GPR:$LHS, GPR:$RHS, i32imm:$rot),
- !strconcat(opc, " $dst, $LHS, $RHS, ror $rot"),
+ opc, " $dst, $LHS, $RHS, ror $rot",
[(set GPR:$dst, (opnode GPR:$LHS,
(rotr GPR:$RHS, rot_imm:$rot)))]>,
Requires<[IsARM, HasV6]>;
}
+// Special cases.
+class XI<dag oprnds, AddrMode am, SizeFlagVal sz, IndexMode im,
+ string asm, string cstr, list<dag> pattern>
+ // FIXME: Set all opcodes to 0 for now.
+ : InstARM<0, am, sz, im, cstr> {
+ let OperandList = oprnds;
+ let AsmString = asm;
+ let Pattern = pattern;
+ list<Predicate> Predicates = [IsARM];
+}
+
+class AXI<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrModeNone, Size4Bytes, IndexModeNone, asm, "", pattern>;
+class AXI1<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrMode1, Size4Bytes, IndexModeNone, asm, "", pattern>;
+class AXI2<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrMode2, Size4Bytes, IndexModeNone, asm, "", pattern>;
+class AXI4<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrMode4, Size4Bytes, IndexModeNone, asm, "", pattern>;
+
+class AXIx2<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrModeNone, Size8Bytes, IndexModeNone, asm, "", pattern>;
+
//===----------------------------------------------------------------------===//
// Instructions
@@ -499,7 +523,7 @@
// Miscellaneous Instructions.
//
def IMPLICIT_DEF_GPR :
-PseudoInst<(ops GPR:$rD),
+PseudoInst<(ops GPR:$rD, pred:$p),
"@ IMPLICIT_DEF_GPR $rD",
[(set GPR:$rD, (undef))]>;
@@ -513,12 +537,12 @@
"${instid:label} ${cpidx:cpentry}", []>;
def ADJCALLSTACKUP :
-PseudoInst<(ops i32imm:$amt),
+PseudoInst<(ops i32imm:$amt, pred:$p),
"@ ADJCALLSTACKUP $amt",
[(ARMcallseq_end imm:$amt)]>, Imp<[SP],[SP]>;
def ADJCALLSTACKDOWN :
-PseudoInst<(ops i32imm:$amt),
+PseudoInst<(ops i32imm:$amt, pred:$p),
"@ ADJCALLSTACKDOWN $amt",
[(ARMcallseq_start imm:$amt)]>, Imp<[SP],[SP]>;
@@ -527,12 +551,12 @@
".loc $file, $line, $col",
[(dwarf_loc (i32 imm:$line), (i32 imm:$col), (i32 imm:$file))]>;
-def PICADD : AI1<(ops GPR:$dst, GPR:$a, pclabel:$cp),
- "$cp:\n\tadd $dst, pc, $a",
- [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>;
+def PICADD : AXI1<(ops GPR:$dst, GPR:$a, pclabel:$cp, pred:$p),
+ "$cp:\n\tadd$p $dst, pc, $a",
+ [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>;
let AddedComplexity = 10 in
-def PICLD : AI2<(ops GPR:$dst, addrmodepc:$addr),
- "${addr:label}:\n\tldr $dst, $addr",
+def PICLD : AXI2<(ops GPR:$dst, addrmodepc:$addr, pred:$p),
+ "${addr:label}:\n\tldr$p $dst, $addr",
[(set GPR:$dst, (load addrmodepc:$addr))]>;
//===----------------------------------------------------------------------===//
@@ -540,53 +564,53 @@
//
let isReturn = 1, isTerminator = 1 in
- def BX_RET : AI<(ops), "bx lr", [(ARMretflag)]>;
+ def BX_RET : AI<(ops), "bx", " lr", [(ARMretflag)]>;
// FIXME: remove when we have a way to marking a MI with these properties.
let isLoad = 1, isReturn = 1, isTerminator = 1 in
- def LDM_RET : AI4<(ops addrmode4:$addr, reglist:$dst1, variable_ops),
- "ldm${addr:submode} $addr, $dst1",
+ def LDM_RET : AXI4<(ops addrmode4:$addr, pred:$p, reglist:$dst1, variable_ops),
+ "ldm${p}${addr:submode} $addr, $dst1",
[]>;
let isCall = 1, noResults = 1,
Defs = [R0, R1, R2, R3, R12, LR,
D0, D1, D2, D3, D4, D5, D6, D7] in {
- def BL : AI<(ops i32imm:$func, variable_ops),
- "bl ${func:call}",
- [(ARMcall tglobaladdr:$func)]>;
+ def BL : AXI<(ops i32imm:$func, pred:$p, variable_ops),
+ "bl$p ${func:call}",
+ [(ARMcall tglobaladdr:$func)]>;
// ARMv5T and above
- def BLX : AI<(ops GPR:$dst, variable_ops),
- "blx $dst",
- [(ARMcall GPR:$dst)]>, Requires<[IsARM, HasV5T]>;
+ def BLX : AXI<(ops GPR:$dst, pred:$p, variable_ops),
+ "blx$p $dst",
+ [(ARMcall GPR:$dst)]>, Requires<[IsARM, HasV5T]>;
let Uses = [LR] in {
// ARMv4T
- def BX : AIx2<(ops GPR:$dst, variable_ops),
- "mov lr, pc\n\tbx $dst",
+ def BX : AXIx2<(ops GPR:$dst, pred:$p, variable_ops),
+ "mov$p lr, pc\n\tbx$p $dst",
[(ARMcall_nolink GPR:$dst)]>;
}
}
-let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
- def B : AI<(ops brtarget:$dst), "b $dst",
- [(br bb:$dst)]>;
+let isBranch = 1, isTerminator = 1, noResults = 1, isBarrier = 1 in {
+ // B can changed into a Bcc, but it is not "predicated".
+ def B : AXI<(ops brtarget:$dst), "b $dst",
+ [(br bb:$dst)]>;
+
+ def Bcc : AXI<(ops brtarget:$dst, ccop:$cc), "b$cc $dst",
+ [(ARMbrcond bb:$dst, imm:$cc)]>;
def BR_JTr : JTI<(ops GPR:$dst, jtblock_operand:$jt, i32imm:$id),
- "mov pc, $dst \n$jt",
+ "mov", " pc, $dst \n$jt",
[(ARMbrjt GPR:$dst, tjumptable:$jt, imm:$id)]>;
def BR_JTm : JTI2<(ops addrmode2:$dst, jtblock_operand:$jt, i32imm:$id),
- "ldr pc, $dst \n$jt",
+ "ldr", " pc, $dst \n$jt",
[(ARMbrjt (i32 (load addrmode2:$dst)), tjumptable:$jt,
imm:$id)]>;
def BR_JTadd : JTI1<(ops GPR:$dst, GPR:$idx, jtblock_operand:$jt, i32imm:$id),
- "add pc, $dst, $idx \n$jt",
+ "add", " pc, $dst, $idx \n$jt",
[(ARMbrjt (add GPR:$dst, GPR:$idx), tjumptable:$jt,
imm:$id)]>;
}
-let isBranch = 1, isTerminator = 1, noResults = 1, isBarrier = 1 in
- def Bcc : AI<(ops brtarget:$dst, ccop:$cc), "b$cc $dst",
- [(ARMbrcond bb:$dst, imm:$cc)]>;
-
//===----------------------------------------------------------------------===//
// Load / store Instructions.
//
@@ -594,117 +618,117 @@
// Load
let isLoad = 1 in {
def LDR : AI2<(ops GPR:$dst, addrmode2:$addr),
- "ldr $dst, $addr",
+ "ldr", " $dst, $addr",
[(set GPR:$dst, (load addrmode2:$addr))]>;
// Special LDR for loads from non-pc-relative constpools.
let isReMaterializable = 1 in
def LDRcp : AI2<(ops GPR:$dst, addrmode2:$addr),
- "ldr $dst, $addr", []>;
+ "ldr", " $dst, $addr", []>;
// Loads with zero extension
def LDRH : AI3<(ops GPR:$dst, addrmode3:$addr),
- "ldrh $dst, $addr",
+ "ldrh", " $dst, $addr",
[(set GPR:$dst, (zextloadi16 addrmode3:$addr))]>;
def LDRB : AI2<(ops GPR:$dst, addrmode2:$addr),
- "ldrb $dst, $addr",
+ "ldrb", " $dst, $addr",
[(set GPR:$dst, (zextloadi8 addrmode2:$addr))]>;
// Loads with sign extension
def LDRSH : AI3<(ops GPR:$dst, addrmode3:$addr),
- "ldrsh $dst, $addr",
+ "ldrsh", " $dst, $addr",
[(set GPR:$dst, (sextloadi16 addrmode3:$addr))]>;
def LDRSB : AI3<(ops GPR:$dst, addrmode3:$addr),
- "ldrsb $dst, $addr",
+ "ldrsb", " $dst, $addr",
[(set GPR:$dst, (sextloadi8 addrmode3:$addr))]>;
// Load doubleword
def LDRD : AI3<(ops GPR:$dst, addrmode3:$addr),
- "ldrd $dst, $addr",
+ "ldrd", " $dst, $addr",
[]>, Requires<[IsARM, HasV5T]>;
// Indexed loads
def LDR_PRE : AI2pr<(ops GPR:$dst, GPR:$base_wb, addrmode2:$addr),
- "ldr $dst, $addr!", "$addr.base = $base_wb", []>;
+ "ldr", " $dst, $addr!", "$addr.base = $base_wb", []>;
def LDR_POST : AI2po<(ops GPR:$dst, GPR:$base_wb, GPR:$base, am2offset:$offset),
- "ldr $dst, [$base], $offset", "$base = $base_wb", []>;
+ "ldr", " $dst, [$base], $offset", "$base = $base_wb", []>;
def LDRH_PRE : AI3pr<(ops GPR:$dst, GPR:$base_wb, addrmode3:$addr),
- "ldrh $dst, $addr!", "$addr.base = $base_wb", []>;
+ "ldrh", " $dst, $addr!", "$addr.base = $base_wb", []>;
def LDRH_POST : AI3po<(ops GPR:$dst, GPR:$base_wb, GPR:$base,am3offset:$offset),
- "ldrh $dst, [$base], $offset", "$base = $base_wb", []>;
+ "ldrh", " $dst, [$base], $offset", "$base = $base_wb", []>;
def LDRB_PRE : AI2pr<(ops GPR:$dst, GPR:$base_wb, addrmode2:$addr),
- "ldrb $dst, $addr!", "$addr.base = $base_wb", []>;
+ "ldrb", " $dst, $addr!", "$addr.base = $base_wb", []>;
def LDRB_POST : AI2po<(ops GPR:$dst, GPR:$base_wb, GPR:$base,am2offset:$offset),
- "ldrb $dst, [$base], $offset", "$base = $base_wb", []>;
+ "ldrb", " $dst, [$base], $offset", "$base = $base_wb", []>;
def LDRSH_PRE : AI3pr<(ops GPR:$dst, GPR:$base_wb, addrmode3:$addr),
- "ldrsh $dst, $addr!", "$addr.base = $base_wb", []>;
+ "ldrsh", " $dst, $addr!", "$addr.base = $base_wb", []>;
def LDRSH_POST: AI3po<(ops GPR:$dst, GPR:$base_wb, GPR:$base,am3offset:$offset),
- "ldrsh $dst, [$base], $offset", "$base = $base_wb", []>;
+ "ldrsh", " $dst, [$base], $offset", "$base = $base_wb", []>;
def LDRSB_PRE : AI3pr<(ops GPR:$dst, GPR:$base_wb, addrmode3:$addr),
- "ldrsb $dst, $addr!", "$addr.base = $base_wb", []>;
+ "ldrsb", " $dst, $addr!", "$addr.base = $base_wb", []>;
def LDRSB_POST: AI3po<(ops GPR:$dst, GPR:$base_wb, GPR:$base,am3offset:$offset),
- "ldrsb $dst, [$base], $offset", "$base = $base_wb", []>;
+ "ldrsb", " $dst, [$base], $offset", "$base = $base_wb", []>;
} // isLoad
// Store
let isStore = 1 in {
def STR : AI2<(ops GPR:$src, addrmode2:$addr),
- "str $src, $addr",
+ "str", " $src, $addr",
[(store GPR:$src, addrmode2:$addr)]>;
// Stores with truncate
def STRH : AI3<(ops GPR:$src, addrmode3:$addr),
- "strh $src, $addr",
+ "strh", " $src, $addr",
[(truncstorei16 GPR:$src, addrmode3:$addr)]>;
def STRB : AI2<(ops GPR:$src, addrmode2:$addr),
- "strb $src, $addr",
+ "strb", " $src, $addr",
[(truncstorei8 GPR:$src, addrmode2:$addr)]>;
// Store doubleword
def STRD : AI3<(ops GPR:$src, addrmode3:$addr),
- "strd $src, $addr",
+ "strd", " $src, $addr",
[]>, Requires<[IsARM, HasV5T]>;
// Indexed stores
def STR_PRE : AI2pr<(ops GPR:$base_wb, GPR:$src, GPR:$base, am2offset:$offset),
- "str $src, [$base, $offset]!", "$base = $base_wb",
+ "str", " $src, [$base, $offset]!", "$base = $base_wb",
[(set GPR:$base_wb,
(pre_store GPR:$src, GPR:$base, am2offset:$offset))]>;
def STR_POST : AI2po<(ops GPR:$base_wb, GPR:$src, GPR:$base,am2offset:$offset),
- "str $src, [$base], $offset", "$base = $base_wb",
+ "str", " $src, [$base], $offset", "$base = $base_wb",
[(set GPR:$base_wb,
(post_store GPR:$src, GPR:$base, am2offset:$offset))]>;
def STRH_PRE : AI3pr<(ops GPR:$base_wb, GPR:$src, GPR:$base,am3offset:$offset),
- "strh $src, [$base, $offset]!", "$base = $base_wb",
+ "strh", " $src, [$base, $offset]!", "$base = $base_wb",
[(set GPR:$base_wb,
(pre_truncsti16 GPR:$src, GPR:$base,am3offset:$offset))]>;
def STRH_POST: AI3po<(ops GPR:$base_wb, GPR:$src, GPR:$base,am3offset:$offset),
- "strh $src, [$base], $offset", "$base = $base_wb",
+ "strh", " $src, [$base], $offset", "$base = $base_wb",
[(set GPR:$base_wb, (post_truncsti16 GPR:$src,
GPR:$base, am3offset:$offset))]>;
def STRB_PRE : AI2pr<(ops GPR:$base_wb, GPR:$src, GPR:$base,am2offset:$offset),
- "strb $src, [$base, $offset]!", "$base = $base_wb",
+ "strb", " $src, [$base, $offset]!", "$base = $base_wb",
[(set GPR:$base_wb, (pre_truncsti8 GPR:$src,
GPR:$base, am2offset:$offset))]>;
def STRB_POST: AI2po<(ops GPR:$base_wb, GPR:$src, GPR:$base,am2offset:$offset),
- "strb $src, [$base], $offset", "$base = $base_wb",
+ "strb", " $src, [$base], $offset", "$base = $base_wb",
[(set GPR:$base_wb, (post_truncsti8 GPR:$src,
GPR:$base, am2offset:$offset))]>;
} // isStore
@@ -714,42 +738,41 @@
//
let isLoad = 1 in
-def LDM : AI4<(ops addrmode4:$addr, reglist:$dst1, variable_ops),
- "ldm${addr:submode} $addr, $dst1",
- []>;
+def LDM : AXI4<(ops addrmode4:$addr, pred:$p, reglist:$dst1, variable_ops),
+ "ldm${p}${addr:submode} $addr, $dst1",
+ []>;
let isStore = 1 in
-def STM : AI4<(ops addrmode4:$addr, reglist:$src1, variable_ops),
- "stm${addr:submode} $addr, $src1",
- []>;
+def STM : AXI4<(ops addrmode4:$addr, pred:$p, reglist:$src1, variable_ops),
+ "stm${p}${addr:submode} $addr, $src1",
+ []>;
//===----------------------------------------------------------------------===//
// Move Instructions.
//
def MOVr : AI1<(ops GPR:$dst, GPR:$src),
- "mov $dst, $src", []>;
+ "mov", " $dst, $src", []>;
def MOVs : AI1<(ops GPR:$dst, so_reg:$src),
- "mov $dst, $src", [(set GPR:$dst, so_reg:$src)]>;
+ "mov", " $dst, $src", [(set GPR:$dst, so_reg:$src)]>;
let isReMaterializable = 1 in
def MOVi : AI1<(ops GPR:$dst, so_imm:$src),
- "mov $dst, $src", [(set GPR:$dst, so_imm:$src)]>;
+ "mov", " $dst, $src", [(set GPR:$dst, so_imm:$src)]>;
// These aren't really mov instructions, but we have to define them this way
// due to flag operands.
def MOVsrl_flag : AI1<(ops GPR:$dst, GPR:$src),
- "movs $dst, $src, lsr #1",
+ "movs", " $dst, $src, lsr #1",
[(set GPR:$dst, (ARMsrl_flag GPR:$src))]>;
def MOVsra_flag : AI1<(ops GPR:$dst, GPR:$src),
- "movs $dst, $src, asr #1",
+ "movs", " $dst, $src, asr #1",
[(set GPR:$dst, (ARMsra_flag GPR:$src))]>;
def MOVrx : AI1<(ops GPR:$dst, GPR:$src),
- "mov $dst, $src, rrx",
+ "mov", " $dst, $src, rrx",
[(set GPR:$dst, (ARMrrx GPR:$src))]>;
-
//===----------------------------------------------------------------------===//
// Extend Instructions.
//
@@ -831,12 +854,12 @@
defm BIC : AI1_bin_irs<"bic", BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
def MVNr : AI<(ops GPR:$dst, GPR:$src),
- "mvn $dst, $src", [(set GPR:$dst, (not GPR:$src))]>;
+ "mvn", " $dst, $src", [(set GPR:$dst, (not GPR:$src))]>;
def MVNs : AI<(ops GPR:$dst, so_reg:$src),
- "mvn $dst, $src", [(set GPR:$dst, (not so_reg:$src))]>;
+ "mvn", " $dst, $src", [(set GPR:$dst, (not so_reg:$src))]>;
let isReMaterializable = 1 in
def MVNi : AI<(ops GPR:$dst, so_imm:$imm),
- "mvn $dst, $imm", [(set GPR:$dst, so_imm_not:$imm)]>;
+ "mvn", " $dst, $imm", [(set GPR:$dst, so_imm_not:$imm)]>;
def : ARMPat<(and GPR:$src, so_imm_not:$imm),
(BICri GPR:$src, so_imm_not:$imm)>;
@@ -848,13 +871,13 @@
// AI_orr - Defines a (op r, r) pattern.
class AI_orr<string opc, SDNode opnode>
: AI<(ops GPR:$dst, GPR:$a, GPR:$b),
- !strconcat(opc, " $dst, $a, $b"),
+ opc, " $dst, $a, $b",
[(set GPR:$dst, (opnode GPR:$a, GPR:$b))]>;
// AI_oorr - Defines a (op (op r, r), r) pattern.
class AI_oorr<string opc, SDNode opnode1, SDNode opnode2>
: AI<(ops GPR:$dst, GPR:$a, GPR:$b, GPR:$c),
- !strconcat(opc, " $dst, $a, $b, $c"),
+ opc, " $dst, $a, $b, $c",
[(set GPR:$dst, (opnode1 (opnode2 GPR:$a, GPR:$b), GPR:$c))]>;
def MUL : AI_orr<"mul", mul>;
@@ -862,24 +885,24 @@
// Extra precision multiplies with low / high results
def SMULL : AI<(ops GPR:$ldst, GPR:$hdst, GPR:$a, GPR:$b),
- "smull $ldst, $hdst, $a, $b",
+ "smull", " $ldst, $hdst, $a, $b",
[]>;
def UMULL : AI<(ops GPR:$ldst, GPR:$hdst, GPR:$a, GPR:$b),
- "umull $ldst, $hdst, $a, $b",
+ "umull", " $ldst, $hdst, $a, $b",
[]>;
// Multiply + accumulate
def SMLAL : AI<(ops GPR:$ldst, GPR:$hdst, GPR:$a, GPR:$b),
- "smlal $ldst, $hdst, $a, $b",
+ "smlal", " $ldst, $hdst, $a, $b",
[]>;
def UMLAL : AI<(ops GPR:$ldst, GPR:$hdst, GPR:$a, GPR:$b),
- "umlal $ldst, $hdst, $a, $b",
+ "umlal", " $ldst, $hdst, $a, $b",
[]>;
def UMAAL : AI<(ops GPR:$ldst, GPR:$hdst, GPR:$a, GPR:$b),
- "umaal $ldst, $hdst, $a, $b",
+ "umaal", " $ldst, $hdst, $a, $b",
[]>, Requires<[IsARM, HasV6]>;
// Most significant word multiply
@@ -888,38 +911,38 @@
def SMMLS : AI<(ops GPR:$dst, GPR:$a, GPR:$b, GPR:$c),
- "smmls $dst, $a, $b, $c",
+ "smmls", " $dst, $a, $b, $c",
[(set GPR:$dst, (sub GPR:$c, (mulhs GPR:$a, GPR:$b)))]>,
Requires<[IsARM, HasV6]>;
multiclass AI_smul<string opc, PatFrag opnode> {
def BB : AI<(ops GPR:$dst, GPR:$a, GPR:$b),
- !strconcat(opc, "bb $dst, $a, $b"),
+ !strconcat(opc, "bb"), " $dst, $a, $b",
[(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16),
(sext_inreg GPR:$b, i16)))]>,
Requires<[IsARM, HasV5TE]>;
def BT : AI<(ops GPR:$dst, GPR:$a, GPR:$b),
- !strconcat(opc, "bt $dst, $a, $b"),
+ !strconcat(opc, "bt"), " $dst, $a, $b",
[(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16),
(sra GPR:$b, 16)))]>,
Requires<[IsARM, HasV5TE]>;
def TB : AI<(ops GPR:$dst, GPR:$a, GPR:$b),
- !strconcat(opc, "tb $dst, $a, $b"),
+ !strconcat(opc, "tb"), " $dst, $a, $b",
[(set GPR:$dst, (opnode (sra GPR:$a, 16),
(sext_inreg GPR:$b, i16)))]>,
Requires<[IsARM, HasV5TE]>;
def TT : AI<(ops GPR:$dst, GPR:$a, GPR:$b),
- !strconcat(opc, "tt $dst, $a, $b"),
+ !strconcat(opc, "tt"), " $dst, $a, $b",
[(set GPR:$dst, (opnode (sra GPR:$a, 16),
(sra GPR:$b, 16)))]>,
Requires<[IsARM, HasV5TE]>;
def WB : AI<(ops GPR:$dst, GPR:$a, GPR:$b),
- !strconcat(opc, "wb $dst, $a, $b"),
+ !strconcat(opc, "wb"), " $dst, $a, $b",
[(set GPR:$dst, (sra (opnode GPR:$a,
(sext_inreg GPR:$b, i16)), 16))]>,
Requires<[IsARM, HasV5TE]>;
def WT : AI<(ops GPR:$dst, GPR:$a, GPR:$b),
- !strconcat(opc, "wt $dst, $a, $b"),
+ !strconcat(opc, "wt"), " $dst, $a, $b",
[(set GPR:$dst, (sra (opnode GPR:$a,
(sra GPR:$b, 16)), 16))]>,
Requires<[IsARM, HasV5TE]>;
@@ -927,34 +950,34 @@
multiclass AI_smla<string opc, PatFrag opnode> {
def BB : AI<(ops GPR:$dst, GPR:$a, GPR:$b, GPR:$acc),
- !strconcat(opc, "bb $dst, $a, $b, $acc"),
+ !strconcat(opc, "bb"), " $dst, $a, $b, $acc",
[(set GPR:$dst, (add GPR:$acc,
(opnode (sext_inreg GPR:$a, i16),
(sext_inreg GPR:$b, i16))))]>,
Requires<[IsARM, HasV5TE]>;
def BT : AI<(ops GPR:$dst, GPR:$a, GPR:$b, GPR:$acc),
- !strconcat(opc, "bt $dst, $a, $b, $acc"),
+ !strconcat(opc, "bt"), " $dst, $a, $b, $acc",
[(set GPR:$dst, (add GPR:$acc, (opnode (sext_inreg GPR:$a, i16),
(sra GPR:$b, 16))))]>,
Requires<[IsARM, HasV5TE]>;
def TB : AI<(ops GPR:$dst, GPR:$a, GPR:$b, GPR:$acc),
- !strconcat(opc, "tb $dst, $a, $b, $acc"),
+ !strconcat(opc, "tb"), " $dst, $a, $b, $acc",
[(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, 16),
(sext_inreg GPR:$b, i16))))]>,
Requires<[IsARM, HasV5TE]>;
def TT : AI<(ops GPR:$dst, GPR:$a, GPR:$b, GPR:$acc),
- !strconcat(opc, "tt $dst, $a, $b, $acc"),
+ !strconcat(opc, "tt"), " $dst, $a, $b, $acc",
[(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, 16),
(sra GPR:$b, 16))))]>,
Requires<[IsARM, HasV5TE]>;
def WB : AI<(ops GPR:$dst, GPR:$a, GPR:$b, GPR:$acc),
- !strconcat(opc, "wb $dst, $a, $b, $acc"),
+ !strconcat(opc, "wb"), " $dst, $a, $b, $acc",
[(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a,
(sext_inreg GPR:$b, i16)), 16)))]>,
Requires<[IsARM, HasV5TE]>;
def WT : AI<(ops GPR:$dst, GPR:$a, GPR:$b, GPR:$acc),
- !strconcat(opc, "wt $dst, $a, $b, $acc"),
+ !strconcat(opc, "wt"), " $dst, $a, $b, $acc",
[(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a,
(sra GPR:$b, 16)), 16)))]>,
Requires<[IsARM, HasV5TE]>;
@@ -971,15 +994,15 @@
//
def CLZ : AI<(ops GPR:$dst, GPR:$src),
- "clz $dst, $src",
+ "clz", " $dst, $src",
[(set GPR:$dst, (ctlz GPR:$src))]>, Requires<[IsARM, HasV5T]>;
def REV : AI<(ops GPR:$dst, GPR:$src),
- "rev $dst, $src",
+ "rev", " $dst, $src",
[(set GPR:$dst, (bswap GPR:$src))]>, Requires<[IsARM, HasV6]>;
def REV16 : AI<(ops GPR:$dst, GPR:$src),
- "rev16 $dst, $src",
+ "rev16", " $dst, $src",
[(set GPR:$dst,
(or (and (srl GPR:$src, 8), 0xFF),
(or (and (shl GPR:$src, 8), 0xFF00),
@@ -988,7 +1011,7 @@
Requires<[IsARM, HasV6]>;
def REVSH : AI<(ops GPR:$dst, GPR:$src),
- "revsh $dst, $src",
+ "revsh", " $dst, $src",
[(set GPR:$dst,
(sext_inreg
(or (srl (and GPR:$src, 0xFF00), 8),
@@ -996,7 +1019,7 @@
Requires<[IsARM, HasV6]>;
def PKHBT : AI<(ops GPR:$dst, GPR:$src1, GPR:$src2, i32imm:$shamt),
- "pkhbt $dst, $src1, $src2, LSL $shamt",
+ "pkhbt", " $dst, $src1, $src2, LSL $shamt",
[(set GPR:$dst, (or (and GPR:$src1, 0xFFFF),
(and (shl GPR:$src2, (i32 imm:$shamt)),
0xFFFF0000)))]>,
@@ -1010,7 +1033,7 @@
def PKHTB : AI<(ops GPR:$dst, GPR:$src1, GPR:$src2, i32imm:$shamt),
- "pkhtb $dst, $src1, $src2, ASR $shamt",
+ "pkhtb", " $dst, $src1, $src2, ASR $shamt",
[(set GPR:$dst, (or (and GPR:$src1, 0xFFFF0000),
(and (sra GPR:$src2, imm16_31:$shamt),
0xFFFF)))]>, Requires<[IsARM, HasV6]>;
@@ -1046,36 +1069,36 @@
// Conditional moves
-def MOVCCr : AI<(ops GPR:$dst, GPR:$false, GPR:$true, ccop:$cc),
- "mov$cc $dst, $true",
- [(set GPR:$dst, (ARMcmov GPR:$false, GPR:$true, imm:$cc))]>,
- RegConstraint<"$false = $dst">;
-
-def MOVCCs : AI<(ops GPR:$dst, GPR:$false, so_reg:$true, ccop:$cc),
- "mov$cc $dst, $true",
- [(set GPR:$dst, (ARMcmov GPR:$false, so_reg:$true,imm:$cc))]>,
- RegConstraint<"$false = $dst">;
-
-def MOVCCi : AI<(ops GPR:$dst, GPR:$false, so_imm:$true, ccop:$cc),
- "mov$cc $dst, $true",
- [(set GPR:$dst, (ARMcmov GPR:$false, so_imm:$true,imm:$cc))]>,
- RegConstraint<"$false = $dst">;
+def MOVCCr : AXI<(ops GPR:$dst, GPR:$false, GPR:$true, ccop:$cc),
+ "mov$cc $dst, $true",
+ [(set GPR:$dst, (ARMcmov GPR:$false, GPR:$true, imm:$cc))]>,
+ RegConstraint<"$false = $dst">;
+
+def MOVCCs : AXI<(ops GPR:$dst, GPR:$false, so_reg:$true, ccop:$cc),
+ "mov$cc $dst, $true",
+ [(set GPR:$dst, (ARMcmov GPR:$false, so_reg:$true,imm:$cc))]>,
+ RegConstraint<"$false = $dst">;
+
+def MOVCCi : AXI<(ops GPR:$dst, GPR:$false, so_imm:$true, ccop:$cc),
+ "mov$cc $dst, $true",
+ [(set GPR:$dst, (ARMcmov GPR:$false, so_imm:$true,imm:$cc))]>,
+ RegConstraint<"$false = $dst">;
// LEApcrel - Load a pc-relative address into a register without offending the
// assembler.
-def LEApcrel : AI1<(ops GPR:$dst, i32imm:$label),
+def LEApcrel : AXI1<(ops GPR:$dst, i32imm:$label, pred:$p),
!strconcat(!strconcat(".set PCRELV${:uid}, ($label-(",
"${:private}PCRELL${:uid}+8))\n"),
!strconcat("${:private}PCRELL${:uid}:\n\t",
- "add $dst, pc, #PCRELV${:uid}")),
+ "add$p $dst, pc, #PCRELV${:uid}")),
[]>;
-def LEApcrelJT : AI1<(ops GPR:$dst, i32imm:$label, i32imm:$id),
+def LEApcrelJT : AXI1<(ops GPR:$dst, i32imm:$label, i32imm:$id, pred:$p),
!strconcat(!strconcat(".set PCRELV${:uid}, (${label}_${id:no_hash}-(",
"${:private}PCRELL${:uid}+8))\n"),
!strconcat("${:private}PCRELL${:uid}:\n\t",
- "add $dst, pc, #PCRELV${:uid}")),
+ "add$p $dst, pc, #PCRELV${:uid}")),
[]>;
//===----------------------------------------------------------------------===//
// TLS Instructions
@@ -1084,8 +1107,8 @@
// __aeabi_read_tp preserves the registers r1-r3.
let isCall = 1,
Defs = [R0, R12, LR] in {
- def TPsoft : AI<(ops),
- "bl __aeabi_read_tp",
+ def TPsoft : AI<(ops),
+ "bl", " __aeabi_read_tp",
[(set R0, ARMthread_pointer)]>;
}
@@ -1104,7 +1127,7 @@
// Two piece so_imms.
let isReMaterializable = 1 in
def MOVi2pieces : AI1x2<(ops GPR:$dst, so_imm2part:$src),
- "mov $dst, $src",
+ "mov", " $dst, $src",
[(set GPR:$dst, so_imm2part:$src)]>;
def : ARMPat<(or GPR:$LHS, so_imm2part:$RHS),
Index: llvm/lib/Target/ARM/ARMInstrThumb.td
diff -u llvm/lib/Target/ARM/ARMInstrThumb.td:1.26 llvm/lib/Target/ARM/ARMInstrThumb.td:1.27
--- llvm/lib/Target/ARM/ARMInstrThumb.td:1.26 Tue May 8 16:08:43 2007
+++ llvm/lib/Target/ARM/ARMInstrThumb.td Mon May 14 20:29:07 2007
@@ -32,7 +32,9 @@
class ThumbI<dag ops, AddrMode am, SizeFlagVal sz,
string asm, string cstr, list<dag> pattern>
// FIXME: Set all opcodes to 0 for now.
- : InstARM<0, am, sz, IndexModeNone, ops, asm, cstr> {
+ : InstARM<0, am, sz, IndexModeNone, cstr> {
+ let OperandList = ops;
+ let AsmString = asm;
let Pattern = pattern;
list<Predicate> Predicates = [IsThumb];
}
@@ -157,6 +159,16 @@
// Miscellaneous Instructions.
//
+def tADJCALLSTACKUP :
+PseudoInst<(ops i32imm:$amt),
+ "@ tADJCALLSTACKUP $amt",
+ [(ARMcallseq_end imm:$amt)]>, Imp<[SP],[SP]>, Requires<[IsThumb]>;
+
+def tADJCALLSTACKDOWN :
+PseudoInst<(ops i32imm:$amt),
+ "@ tADJCALLSTACKDOWN $amt",
+ [(ARMcallseq_start imm:$amt)]>, Imp<[SP],[SP]>, Requires<[IsThumb]>;
+
def tPICADD : TIt<(ops GPR:$dst, GPR:$lhs, pclabel:$cp),
"$cp:\n\tadd $dst, pc",
[(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>;
Index: llvm/lib/Target/ARM/ARMInstrVFP.td
diff -u llvm/lib/Target/ARM/ARMInstrVFP.td:1.5 llvm/lib/Target/ARM/ARMInstrVFP.td:1.6
--- llvm/lib/Target/ARM/ARMInstrVFP.td:1.5 Tue May 8 16:08:43 2007
+++ llvm/lib/Target/ARM/ARMInstrVFP.td Mon May 14 20:29:07 2007
@@ -16,25 +16,49 @@
//
// ARM Float Instruction
-class ASI<dag ops, string asm, list<dag> pattern> : AI<ops, asm, pattern> {
+class ASI<dag ops, string opc, string asm, list<dag> pattern>
+ : AI<ops, opc, asm, pattern> {
// TODO: Mark the instructions with the appropriate subtarget info.
}
-class ASI5<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode5, Size4Bytes, IndexModeNone, asm, "", pattern> {
+class ASI5<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode5, Size4Bytes, IndexModeNone, opc, asm, "", pattern> {
// TODO: Mark the instructions with the appropriate subtarget info.
}
// ARM Double Instruction
-class ADI<dag ops, string asm, list<dag> pattern> : AI<ops, asm, pattern> {
+class ADI<dag ops, string opc, string asm, list<dag> pattern>
+ : AI<ops, opc, asm, pattern> {
// TODO: Mark the instructions with the appropriate subtarget info.
}
-class ADI5<dag ops, string asm, list<dag> pattern>
- : I<ops, AddrMode5, Size4Bytes, IndexModeNone, asm, "", pattern> {
+class ADI5<dag ops, string opc, string asm, list<dag> pattern>
+ : I<ops, AddrMode5, Size4Bytes, IndexModeNone, opc, asm, "", pattern> {
// TODO: Mark the instructions with the appropriate subtarget info.
}
+// Special cases.
+class AXSI<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrModeNone, Size4Bytes, IndexModeNone, asm, "", pattern> {
+ // TODO: Mark the instructions with the appropriate subtarget info.
+}
+
+class AXSI5<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrMode5, Size4Bytes, IndexModeNone, asm, "", pattern> {
+ // TODO: Mark the instructions with the appropriate subtarget info.
+}
+
+class AXDI<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrModeNone, Size4Bytes, IndexModeNone, asm, "", pattern> {
+ // TODO: Mark the instructions with the appropriate subtarget info.
+}
+
+class AXDI5<dag ops, string asm, list<dag> pattern>
+ : XI<ops, AddrMode5, Size4Bytes, IndexModeNone, asm, "", pattern> {
+ // TODO: Mark the instructions with the appropriate subtarget info.
+}
+
+
def SDT_FTOI :
SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
def SDT_ITOF :
@@ -60,21 +84,21 @@
let isLoad = 1 in {
def FLDD : ADI5<(ops DPR:$dst, addrmode5:$addr),
- "fldd $dst, $addr",
+ "fldd", " $dst, $addr",
[(set DPR:$dst, (load addrmode5:$addr))]>;
def FLDS : ASI5<(ops SPR:$dst, addrmode5:$addr),
- "flds $dst, $addr",
+ "flds", " $dst, $addr",
[(set SPR:$dst, (load addrmode5:$addr))]>;
} // isLoad
let isStore = 1 in {
def FSTD : ADI5<(ops DPR:$src, addrmode5:$addr),
- "fstd $src, $addr",
+ "fstd", " $src, $addr",
[(store DPR:$src, addrmode5:$addr)]>;
def FSTS : ASI5<(ops SPR:$src, addrmode5:$addr),
- "fsts $src, $addr",
+ "fsts", " $src, $addr",
[(store SPR:$src, addrmode5:$addr)]>;
} // isStore
@@ -83,22 +107,22 @@
//
let isLoad = 1 in {
-def FLDMD : ADI5<(ops addrmode5:$addr, reglist:$dst1, variable_ops),
- "fldm${addr:submode}d ${addr:base}, $dst1",
- []>;
-
-def FLDMS : ASI5<(ops addrmode5:$addr, reglist:$dst1, variable_ops),
- "fldm${addr:submode}s ${addr:base}, $dst1",
- []>;
+def FLDMD : AXDI5<(ops addrmode5:$addr, pred:$p, reglist:$dst1, variable_ops),
+ "fldm${p}${addr:submode}d ${addr:base}, $dst1",
+ []>;
+
+def FLDMS : AXSI5<(ops addrmode5:$addr, pred:$p, reglist:$dst1, variable_ops),
+ "fldm${p}${addr:submode}s ${addr:base}, $dst1",
+ []>;
} // isLoad
let isStore = 1 in {
-def FSTMD : ADI5<(ops addrmode5:$addr, reglist:$src1, variable_ops),
- "fstm${addr:submode}d ${addr:base}, $src1",
+def FSTMD : AXDI5<(ops addrmode5:$addr, pred:$p, reglist:$src1, variable_ops),
+ "fstm${p}${addr:submode}d ${addr:base}, $src1",
[]>;
-def FSTMS : ASI5<(ops addrmode5:$addr, reglist:$src1, variable_ops),
- "fstm${addr:submode}s ${addr:base}, $src1",
+def FSTMS : AXSI5<(ops addrmode5:$addr, pred:$p, reglist:$src1, variable_ops),
+ "fstm${p}${addr:submode}s ${addr:base}, $src1",
[]>;
} // isStore
@@ -109,43 +133,43 @@
//
def FADDD : ADI<(ops DPR:$dst, DPR:$a, DPR:$b),
- "faddd $dst, $a, $b",
+ "faddd", " $dst, $a, $b",
[(set DPR:$dst, (fadd DPR:$a, DPR:$b))]>;
def FADDS : ASI<(ops SPR:$dst, SPR:$a, SPR:$b),
- "fadds $dst, $a, $b",
+ "fadds", " $dst, $a, $b",
[(set SPR:$dst, (fadd SPR:$a, SPR:$b))]>;
def FCMPED : ADI<(ops DPR:$a, DPR:$b),
- "fcmped $a, $b",
+ "fcmped", " $a, $b",
[(arm_cmpfp DPR:$a, DPR:$b)]>;
def FCMPES : ASI<(ops SPR:$a, SPR:$b),
- "fcmpes $a, $b",
+ "fcmpes", " $a, $b",
[(arm_cmpfp SPR:$a, SPR:$b)]>;
def FDIVD : ADI<(ops DPR:$dst, DPR:$a, DPR:$b),
- "fdivd $dst, $a, $b",
+ "fdivd", " $dst, $a, $b",
[(set DPR:$dst, (fdiv DPR:$a, DPR:$b))]>;
def FDIVS : ASI<(ops SPR:$dst, SPR:$a, SPR:$b),
- "fdivs $dst, $a, $b",
+ "fdivs", " $dst, $a, $b",
[(set SPR:$dst, (fdiv SPR:$a, SPR:$b))]>;
def FMULD : ADI<(ops DPR:$dst, DPR:$a, DPR:$b),
- "fmuld $dst, $a, $b",
+ "fmuld", " $dst, $a, $b",
[(set DPR:$dst, (fmul DPR:$a, DPR:$b))]>;
def FMULS : ASI<(ops SPR:$dst, SPR:$a, SPR:$b),
- "fmuls $dst, $a, $b",
+ "fmuls", " $dst, $a, $b",
[(set SPR:$dst, (fmul SPR:$a, SPR:$b))]>;
def FNMULD : ADI<(ops DPR:$dst, DPR:$a, DPR:$b),
- "fnmuld $dst, $a, $b",
+ "fnmuld", " $dst, $a, $b",
[(set DPR:$dst, (fneg (fmul DPR:$a, DPR:$b)))]>;
def FNMULS : ASI<(ops SPR:$dst, SPR:$a, SPR:$b),
- "fnmuls $dst, $a, $b",
+ "fnmuls", " $dst, $a, $b",
[(set SPR:$dst, (fneg (fmul SPR:$a, SPR:$b)))]>;
// Match reassociated forms only if not sign dependent rounding.
@@ -156,11 +180,11 @@
def FSUBD : ADI<(ops DPR:$dst, DPR:$a, DPR:$b),
- "fsubd $dst, $a, $b",
+ "fsubd", " $dst, $a, $b",
[(set DPR:$dst, (fsub DPR:$a, DPR:$b))]>;
def FSUBS : ASI<(ops SPR:$dst, SPR:$a, SPR:$b),
- "fsubs $dst, $a, $b",
+ "fsubs", " $dst, $a, $b",
[(set SPR:$dst, (fsub SPR:$a, SPR:$b))]>;
//===----------------------------------------------------------------------===//
@@ -168,82 +192,82 @@
//
def FABSD : ADI<(ops DPR:$dst, DPR:$a),
- "fabsd $dst, $a",
+ "fabsd", " $dst, $a",
[(set DPR:$dst, (fabs DPR:$a))]>;
def FABSS : ASI<(ops SPR:$dst, SPR:$a),
- "fabss $dst, $a",
+ "fabss", " $dst, $a",
[(set SPR:$dst, (fabs SPR:$a))]>;
def FCMPEZD : ADI<(ops DPR:$a),
- "fcmpezd $a",
+ "fcmpezd", " $a",
[(arm_cmpfp0 DPR:$a)]>;
def FCMPEZS : ASI<(ops SPR:$a),
- "fcmpezs $a",
+ "fcmpezs", " $a",
[(arm_cmpfp0 SPR:$a)]>;
def FCVTDS : ADI<(ops DPR:$dst, SPR:$a),
- "fcvtds $dst, $a",
+ "fcvtds", " $dst, $a",
[(set DPR:$dst, (fextend SPR:$a))]>;
def FCVTSD : ADI<(ops SPR:$dst, DPR:$a),
- "fcvtsd $dst, $a",
+ "fcvtsd", " $dst, $a",
[(set SPR:$dst, (fround DPR:$a))]>;
def FCPYD : ADI<(ops DPR:$dst, DPR:$a),
- "fcpyd $dst, $a",
+ "fcpyd", " $dst, $a",
[/*(set DPR:$dst, DPR:$a)*/]>;
def FCPYS : ASI<(ops SPR:$dst, SPR:$a),
- "fcpys $dst, $a",
+ "fcpys", " $dst, $a",
[/*(set SPR:$dst, SPR:$a)*/]>;
def FNEGD : ADI<(ops DPR:$dst, DPR:$a),
- "fnegd $dst, $a",
+ "fnegd", " $dst, $a",
[(set DPR:$dst, (fneg DPR:$a))]>;
def FNEGS : ASI<(ops SPR:$dst, SPR:$a),
- "fnegs $dst, $a",
+ "fnegs", " $dst, $a",
[(set SPR:$dst, (fneg SPR:$a))]>;
def FSQRTD : ADI<(ops DPR:$dst, DPR:$a),
- "fsqrtd $dst, $a",
+ "fsqrtd", " $dst, $a",
[(set DPR:$dst, (fsqrt DPR:$a))]>;
def FSQRTS : ASI<(ops SPR:$dst, SPR:$a),
- "fsqrts $dst, $a",
+ "fsqrts", " $dst, $a",
[(set SPR:$dst, (fsqrt SPR:$a))]>;
//===----------------------------------------------------------------------===//
// FP <-> GPR Copies. Int <-> FP Conversions.
//
-def IMPLICIT_DEF_SPR : PseudoInst<(ops SPR:$rD),
+def IMPLICIT_DEF_SPR : PseudoInst<(ops SPR:$rD, pred:$p),
"@ IMPLICIT_DEF_SPR $rD",
[(set SPR:$rD, (undef))]>;
-def IMPLICIT_DEF_DPR : PseudoInst<(ops DPR:$rD),
+def IMPLICIT_DEF_DPR : PseudoInst<(ops DPR:$rD, pred:$p),
"@ IMPLICIT_DEF_DPR $rD",
[(set DPR:$rD, (undef))]>;
def FMRS : ASI<(ops GPR:$dst, SPR:$src),
- "fmrs $dst, $src",
+ "fmrs", " $dst, $src",
[(set GPR:$dst, (bitconvert SPR:$src))]>;
def FMSR : ASI<(ops SPR:$dst, GPR:$src),
- "fmsr $dst, $src",
+ "fmsr", " $dst, $src",
[(set SPR:$dst, (bitconvert GPR:$src))]>;
def FMRRD : ADI<(ops GPR:$dst1, GPR:$dst2, DPR:$src),
- "fmrrd $dst1, $dst2, $src",
+ "fmrrd", " $dst1, $dst2, $src",
[/* FIXME: Can't write pattern for multiple result instr*/]>;
// FMDHR: GPR -> SPR
// FMDLR: GPR -> SPR
def FMDRR : ADI<(ops DPR:$dst, GPR:$src1, GPR:$src2),
- "fmdrr $dst, $src1, $src2",
+ "fmdrr", " $dst, $src1, $src2",
[(set DPR:$dst, (arm_fmdrr GPR:$src1, GPR:$src2))]>;
// FMRDH: SPR -> GPR
@@ -254,7 +278,7 @@
// FMSRR: GPR -> SPR
-def FMSTAT : ASI<(ops), "fmstat", [(arm_fmstat)]>;
+def FMSTAT : ASI<(ops), "fmstat", "", [(arm_fmstat)]>;
// FMXR: GPR -> VFP Sstem reg
@@ -262,38 +286,38 @@
// Int to FP:
def FSITOD : ADI<(ops DPR:$dst, SPR:$a),
- "fsitod $dst, $a",
+ "fsitod", " $dst, $a",
[(set DPR:$dst, (arm_sitof SPR:$a))]>;
def FSITOS : ASI<(ops SPR:$dst, SPR:$a),
- "fsitos $dst, $a",
+ "fsitos", " $dst, $a",
[(set SPR:$dst, (arm_sitof SPR:$a))]>;
def FUITOD : ADI<(ops DPR:$dst, SPR:$a),
- "fuitod $dst, $a",
+ "fuitod", " $dst, $a",
[(set DPR:$dst, (arm_uitof SPR:$a))]>;
def FUITOS : ASI<(ops SPR:$dst, SPR:$a),
- "fuitos $dst, $a",
+ "fuitos", " $dst, $a",
[(set SPR:$dst, (arm_uitof SPR:$a))]>;
// FP to Int:
// Always set Z bit in the instruction, i.e. "round towards zero" variants.
def FTOSIZD : ADI<(ops SPR:$dst, DPR:$a),
- "ftosizd $dst, $a",
+ "ftosizd", " $dst, $a",
[(set SPR:$dst, (arm_ftosi DPR:$a))]>;
def FTOSIZS : ASI<(ops SPR:$dst, SPR:$a),
- "ftosizs $dst, $a",
+ "ftosizs", " $dst, $a",
[(set SPR:$dst, (arm_ftosi SPR:$a))]>;
def FTOUIZD : ADI<(ops SPR:$dst, DPR:$a),
- "ftouizd $dst, $a",
+ "ftouizd", " $dst, $a",
[(set SPR:$dst, (arm_ftoui DPR:$a))]>;
def FTOUIZS : ASI<(ops SPR:$dst, SPR:$a),
- "ftouizs $dst, $a",
+ "ftouizs", " $dst, $a",
[(set SPR:$dst, (arm_ftoui SPR:$a))]>;
//===----------------------------------------------------------------------===//
@@ -301,42 +325,42 @@
//
def FMACD : ADI<(ops DPR:$dst, DPR:$dstin, DPR:$a, DPR:$b),
- "fmacd $dst, $a, $b",
+ "fmacd", " $dst, $a, $b",
[(set DPR:$dst, (fadd (fmul DPR:$a, DPR:$b), DPR:$dstin))]>,
RegConstraint<"$dstin = $dst">;
def FMACS : ASI<(ops SPR:$dst, SPR:$dstin, SPR:$a, SPR:$b),
- "fmacs $dst, $a, $b",
+ "fmacs", " $dst, $a, $b",
[(set SPR:$dst, (fadd (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
RegConstraint<"$dstin = $dst">;
def FMSCD : ADI<(ops DPR:$dst, DPR:$dstin, DPR:$a, DPR:$b),
- "fmscd $dst, $a, $b",
+ "fmscd", " $dst, $a, $b",
[(set DPR:$dst, (fsub (fmul DPR:$a, DPR:$b), DPR:$dstin))]>,
RegConstraint<"$dstin = $dst">;
def FMSCS : ASI<(ops SPR:$dst, SPR:$dstin, SPR:$a, SPR:$b),
- "fmscs $dst, $a, $b",
+ "fmscs", " $dst, $a, $b",
[(set SPR:$dst, (fsub (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
RegConstraint<"$dstin = $dst">;
def FNMACD : ADI<(ops DPR:$dst, DPR:$dstin, DPR:$a, DPR:$b),
- "fnmacd $dst, $a, $b",
+ "fnmacd", " $dst, $a, $b",
[(set DPR:$dst, (fadd (fneg (fmul DPR:$a, DPR:$b)), DPR:$dstin))]>,
RegConstraint<"$dstin = $dst">;
def FNMACS : ASI<(ops SPR:$dst, SPR:$dstin, SPR:$a, SPR:$b),
- "fnmacs $dst, $a, $b",
+ "fnmacs", " $dst, $a, $b",
[(set SPR:$dst, (fadd (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
RegConstraint<"$dstin = $dst">;
def FNMSCD : ADI<(ops DPR:$dst, DPR:$dstin, DPR:$a, DPR:$b),
- "fnmscd $dst, $a, $b",
+ "fnmscd", " $dst, $a, $b",
[(set DPR:$dst, (fsub (fneg (fmul DPR:$a, DPR:$b)), DPR:$dstin))]>,
RegConstraint<"$dstin = $dst">;
def FNMSCS : ASI<(ops SPR:$dst, SPR:$dstin, SPR:$a, SPR:$b),
- "fnmscs $dst, $a, $b",
+ "fnmscs", " $dst, $a, $b",
[(set SPR:$dst, (fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
RegConstraint<"$dstin = $dst">;
@@ -344,22 +368,22 @@
// FP Conditional moves.
//
-def FCPYDcc : ADI<(ops DPR:$dst, DPR:$false, DPR:$true, ccop:$cc),
+def FCPYDcc : AXDI<(ops DPR:$dst, DPR:$false, DPR:$true, ccop:$cc),
"fcpyd$cc $dst, $true",
[(set DPR:$dst, (ARMcmov DPR:$false, DPR:$true, imm:$cc))]>,
RegConstraint<"$false = $dst">;
-def FCPYScc : ASI<(ops SPR:$dst, SPR:$false, SPR:$true, ccop:$cc),
+def FCPYScc : AXSI<(ops SPR:$dst, SPR:$false, SPR:$true, ccop:$cc),
"fcpys$cc $dst, $true",
[(set SPR:$dst, (ARMcmov SPR:$false, SPR:$true, imm:$cc))]>,
RegConstraint<"$false = $dst">;
-def FNEGDcc : ADI<(ops DPR:$dst, DPR:$false, DPR:$true, ccop:$cc),
+def FNEGDcc : AXDI<(ops DPR:$dst, DPR:$false, DPR:$true, ccop:$cc),
"fnegd$cc $dst, $true",
[(set DPR:$dst, (ARMcneg DPR:$false, DPR:$true, imm:$cc))]>,
RegConstraint<"$false = $dst">;
-def FNEGScc : ASI<(ops SPR:$dst, SPR:$false, SPR:$true, ccop:$cc),
+def FNEGScc : AXSI<(ops SPR:$dst, SPR:$false, SPR:$true, ccop:$cc),
"fnegs$cc $dst, $true",
[(set SPR:$dst, (ARMcneg SPR:$false, SPR:$true, imm:$cc))]>,
RegConstraint<"$false = $dst">;
Index: llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
diff -u llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp:1.11 llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp:1.12
--- llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp:1.11 Wed May 2 20:11:53 2007
+++ llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Mon May 14 20:29:07 2007
@@ -66,8 +66,8 @@
SmallVector<MachineBasicBlock::iterator, 4>
MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
- int Opcode, unsigned Size, unsigned Scratch,
- MemOpQueue &MemOps);
+ int Opcode, unsigned Size, ARMCC::CondCodes Pred,
+ unsigned Scratch, MemOpQueue &MemOps);
void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
@@ -112,7 +112,7 @@
/// It returns true if the transformation is done.
static bool mergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
int Offset, unsigned Base, bool BaseKill, int Opcode,
- unsigned Scratch,
+ ARMCC::CondCodes Pred, unsigned Scratch,
SmallVector<std::pair<unsigned, bool>, 8> &Regs,
const TargetInstrInfo *TII) {
// Only a single register to load / store. Don't bother.
@@ -156,7 +156,7 @@
return false; // Probably not worth it then.
BuildMI(MBB, MBBI, TII->get(BaseOpc), NewBase)
- .addReg(Base, false, false, BaseKill).addImm(ImmedOffset);
+ .addReg(Base, false, false, BaseKill).addImm(ImmedOffset).addImm(Pred);
Base = NewBase;
BaseKill = true; // New base is always killed right its use.
}
@@ -166,9 +166,10 @@
Opcode = getLoadStoreMultipleOpcode(Opcode);
MachineInstrBuilder MIB = (isAM4)
? BuildMI(MBB, MBBI, TII->get(Opcode)).addReg(Base, false, false, BaseKill)
- .addImm(ARM_AM::getAM4ModeImm(Mode))
+ .addImm(ARM_AM::getAM4ModeImm(Mode)).addImm(Pred)
: BuildMI(MBB, MBBI, TII->get(Opcode)).addReg(Base, false, false, BaseKill)
- .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs));
+ .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs))
+ .addImm(Pred);
for (unsigned i = 0; i != NumRegs; ++i)
MIB = MIB.addReg(Regs[i].first, isDef, false, Regs[i].second);
@@ -180,9 +181,9 @@
SmallVector<MachineBasicBlock::iterator, 4>
ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
unsigned Base, int Opcode, unsigned Size,
- unsigned Scratch, MemOpQueue &MemOps) {
+ ARMCC::CondCodes Pred, unsigned Scratch,
+ MemOpQueue &MemOps) {
SmallVector<MachineBasicBlock::iterator, 4> Merges;
- SmallVector<std::pair<unsigned,bool>, 8> Regs;
bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
int Offset = MemOps[SIndex].Offset;
int SOffset = Offset;
@@ -191,6 +192,8 @@
unsigned PReg = MemOps[SIndex].MBBI->getOperand(0).getReg();
unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg);
bool isKill = MemOps[SIndex].MBBI->getOperand(0).isKill();
+
+ SmallVector<std::pair<unsigned,bool>, 8> Regs;
Regs.push_back(std::make_pair(PReg, isKill));
for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
int NewOffset = MemOps[i].Offset;
@@ -206,7 +209,8 @@
PRegNum = RegNum;
} else {
// Can't merge this in. Try merge the earlier ones first.
- if (mergeOps(MBB, ++Loc, SOffset, Base, false, Opcode,Scratch,Regs,TII)) {
+ if (mergeOps(MBB, ++Loc, SOffset, Base, false, Opcode, Pred, Scratch,
+ Regs, TII)) {
Merges.push_back(prior(Loc));
for (unsigned j = SIndex; j < i; ++j) {
MBB.erase(MemOps[j].MBBI);
@@ -214,7 +218,7 @@
}
}
SmallVector<MachineBasicBlock::iterator, 4> Merges2 =
- MergeLDR_STR(MBB, i, Base, Opcode, Size, Scratch, MemOps);
+ MergeLDR_STR(MBB, i, Base, Opcode, Size, Pred, Scratch, MemOps);
Merges.append(Merges2.begin(), Merges2.end());
return Merges;
}
@@ -226,7 +230,8 @@
}
bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
- if (mergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode,Scratch,Regs, TII)) {
+ if (mergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode, Pred, Scratch,
+ Regs, TII)) {
Merges.push_back(prior(Loc));
for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) {
MBB.erase(MemOps[i].MBBI);
@@ -237,20 +242,29 @@
return Merges;
}
+/// getInstrPredicate - If instruction is predicated, returns its predicate
+/// condition, otherwise returns AL.
+static ARMCC::CondCodes getInstrPredicate(MachineInstr *MI) {
+ MachineOperand *PredMO = MI->findFirstPredOperand();
+ return PredMO ? (ARMCC::CondCodes)PredMO->getImmedValue() : ARMCC::AL;
+}
+
static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
- unsigned Bytes) {
+ unsigned Bytes, ARMCC::CondCodes Pred) {
return (MI && MI->getOpcode() == ARM::SUBri &&
MI->getOperand(0).getReg() == Base &&
MI->getOperand(1).getReg() == Base &&
- ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes);
+ ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes &&
+ getInstrPredicate(MI) == Pred);
}
static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
- unsigned Bytes) {
+ unsigned Bytes, ARMCC::CondCodes Pred) {
return (MI && MI->getOpcode() == ARM::ADDri &&
MI->getOperand(0).getReg() == Base &&
MI->getOperand(1).getReg() == Base &&
- ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes);
+ ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes &&
+ getInstrPredicate(MI) == Pred);
}
static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
@@ -266,7 +280,7 @@
return 8;
case ARM::LDM:
case ARM::STM:
- return (MI->getNumOperands() - 2) * 4;
+ return (MI->getNumOperands() - 3) * 4;
case ARM::FLDMS:
case ARM::FSTMS:
case ARM::FLDMD:
@@ -292,6 +306,7 @@
MachineInstr *MI = MBBI;
unsigned Base = MI->getOperand(0).getReg();
unsigned Bytes = getLSMultipleTransferSize(MI);
+ ARMCC::CondCodes Pred = getInstrPredicate(MI);
int Opcode = MI->getOpcode();
bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::STM;
@@ -301,7 +316,7 @@
// Can't use the updating AM4 sub-mode if the base register is also a dest
// register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
- for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i) {
+ for (unsigned i = 3, e = MI->getNumOperands(); i != e; ++i) {
if (MI->getOperand(i).getReg() == Base)
return false;
}
@@ -310,12 +325,12 @@
if (MBBI != MBB.begin()) {
MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
if (Mode == ARM_AM::ia &&
- isMatchingDecrement(PrevMBBI, Base, Bytes)) {
+ isMatchingDecrement(PrevMBBI, Base, Bytes, Pred)) {
MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
MBB.erase(PrevMBBI);
return true;
} else if (Mode == ARM_AM::ib &&
- isMatchingDecrement(PrevMBBI, Base, Bytes)) {
+ isMatchingDecrement(PrevMBBI, Base, Bytes, Pred)) {
MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
MBB.erase(PrevMBBI);
return true;
@@ -325,12 +340,12 @@
if (MBBI != MBB.end()) {
MachineBasicBlock::iterator NextMBBI = next(MBBI);
if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
- isMatchingIncrement(NextMBBI, Base, Bytes)) {
+ isMatchingIncrement(NextMBBI, Base, Bytes, Pred)) {
MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
MBB.erase(NextMBBI);
return true;
} else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
- isMatchingDecrement(NextMBBI, Base, Bytes)) {
+ isMatchingDecrement(NextMBBI, Base, Bytes, Pred)) {
MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
MBB.erase(NextMBBI);
return true;
@@ -346,7 +361,7 @@
if (MBBI != MBB.begin()) {
MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
if (Mode == ARM_AM::ia &&
- isMatchingDecrement(PrevMBBI, Base, Bytes)) {
+ isMatchingDecrement(PrevMBBI, Base, Bytes, Pred)) {
MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
MBB.erase(PrevMBBI);
return true;
@@ -356,7 +371,7 @@
if (MBBI != MBB.end()) {
MachineBasicBlock::iterator NextMBBI = next(MBBI);
if (Mode == ARM_AM::ia &&
- isMatchingIncrement(NextMBBI, Base, Bytes)) {
+ isMatchingIncrement(NextMBBI, Base, Bytes, Pred)) {
MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
MBB.erase(NextMBBI);
}
@@ -414,16 +429,17 @@
if (isLd && MI->getOperand(0).getReg() == Base)
return false;
+ ARMCC::CondCodes Pred = getInstrPredicate(MI);
bool DoMerge = false;
ARM_AM::AddrOpc AddSub = ARM_AM::add;
unsigned NewOpc = 0;
if (MBBI != MBB.begin()) {
MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
- if (isMatchingDecrement(PrevMBBI, Base, Bytes)) {
+ if (isMatchingDecrement(PrevMBBI, Base, Bytes, Pred)) {
DoMerge = true;
AddSub = ARM_AM::sub;
NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
- } else if (isAM2 && isMatchingIncrement(PrevMBBI, Base, Bytes)) {
+ } else if (isAM2 && isMatchingIncrement(PrevMBBI, Base, Bytes, Pred)) {
DoMerge = true;
NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
}
@@ -433,11 +449,11 @@
if (!DoMerge && MBBI != MBB.end()) {
MachineBasicBlock::iterator NextMBBI = next(MBBI);
- if (isAM2 && isMatchingDecrement(NextMBBI, Base, Bytes)) {
+ if (isAM2 && isMatchingDecrement(NextMBBI, Base, Bytes, Pred)) {
DoMerge = true;
AddSub = ARM_AM::sub;
NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
- } else if (isMatchingIncrement(NextMBBI, Base, Bytes)) {
+ } else if (isMatchingIncrement(NextMBBI, Base, Bytes, Pred)) {
DoMerge = true;
NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
}
@@ -457,20 +473,22 @@
// LDR_PRE, LDR_POST;
BuildMI(MBB, MBBI, TII->get(NewOpc), MI->getOperand(0).getReg())
.addReg(Base, true)
- .addReg(Base).addReg(0).addImm(Offset);
+ .addReg(Base).addReg(0).addImm(Offset).addImm(Pred);
else
+ // FLDMS, FLDMD
BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base, false, false, BaseKill)
- .addImm(Offset).addReg(MI->getOperand(0).getReg(), true);
+ .addImm(Offset).addImm(Pred).addReg(MI->getOperand(0).getReg(), true);
} else {
MachineOperand &MO = MI->getOperand(0);
if (isAM2)
// STR_PRE, STR_POST;
BuildMI(MBB, MBBI, TII->get(NewOpc), Base)
.addReg(MO.getReg(), false, false, MO.isKill())
- .addReg(Base).addReg(0).addImm(Offset);
+ .addReg(Base).addReg(0).addImm(Offset).addImm(Pred);
else
- BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base)
- .addImm(Offset).addReg(MO.getReg(), false, false, MO.isKill());
+ // FSTMS, FSTMD
+ BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base).addImm(Offset)
+ .addImm(Pred).addReg(MO.getReg(), false, false, MO.isKill());
}
MBB.erase(MBBI);
@@ -521,6 +539,7 @@
unsigned CurrBase = 0;
int CurrOpc = -1;
unsigned CurrSize = 0;
+ ARMCC::CondCodes CurrPred = ARMCC::AL;
unsigned Position = 0;
RS->enterBasicBlock(&MBB);
@@ -536,8 +555,9 @@
bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
unsigned Size = getLSMultipleTransferSize(MBBI);
unsigned Base = MBBI->getOperand(1).getReg();
- unsigned OffIdx = MBBI->getNumOperands()-1;
- unsigned OffField = MBBI->getOperand(OffIdx).getImm();
+ ARMCC::CondCodes Pred = getInstrPredicate(MBBI);
+ const TargetInstrDescriptor *TID = MBBI->getInstrDescriptor();
+ unsigned OffField = MBBI->getOperand(TID->numOperands-2).getImm();
int Offset = isAM2
? ARM_AM::getAM2Offset(OffField) : ARM_AM::getAM5Offset(OffField) * 4;
if (isAM2) {
@@ -562,6 +582,7 @@
CurrBase = Base;
CurrOpc = Opcode;
CurrSize = Size;
+ CurrPred = Pred;
MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
NumMemOps++;
Advance = true;
@@ -571,7 +592,7 @@
Advance = true;
}
- if (CurrOpc == Opcode && CurrBase == Base) {
+ if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
// Continue adding to the queue.
if (Offset > MemOps.back().Offset) {
MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
@@ -617,7 +638,8 @@
// Merge ops.
SmallVector<MachineBasicBlock::iterator,4> MBBII =
- MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize, Scratch, MemOps);
+ MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize, CurrPred,
+ Scratch, MemOps);
// Try folding preceeding/trailing base inc/dec into the generated
// LDM/STM ops.
@@ -639,6 +661,8 @@
CurrBase = 0;
CurrOpc = -1;
+ CurrSize = 0;
+ CurrPred = ARMCC::AL;
if (NumMemOps) {
MemOps.clear();
NumMemOps = 0;
Index: llvm/lib/Target/ARM/ARMRegisterInfo.cpp
diff -u llvm/lib/Target/ARM/ARMRegisterInfo.cpp:1.95 llvm/lib/Target/ARM/ARMRegisterInfo.cpp:1.96
--- llvm/lib/Target/ARM/ARMRegisterInfo.cpp:1.95 Mon May 7 18:15:16 2007
+++ llvm/lib/Target/ARM/ARMRegisterInfo.cpp Mon May 14 20:29:07 2007
@@ -144,14 +144,14 @@
.addFrameIndex(FI).addImm(0);
else
BuildMI(MBB, I, TII.get(ARM::STR)).addReg(SrcReg, false, false, true)
- .addFrameIndex(FI).addReg(0).addImm(0);
+ .addFrameIndex(FI).addReg(0).addImm(0).addImm((int64_t)ARMCC::AL);
} else if (RC == ARM::DPRRegisterClass) {
BuildMI(MBB, I, TII.get(ARM::FSTD)).addReg(SrcReg, false, false, true)
- .addFrameIndex(FI).addImm(0);
+ .addFrameIndex(FI).addImm(0).addImm((int64_t)ARMCC::AL);
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
BuildMI(MBB, I, TII.get(ARM::FSTS)).addReg(SrcReg, false, false, true)
- .addFrameIndex(FI).addImm(0);
+ .addFrameIndex(FI).addImm(0).addImm((int64_t)ARMCC::AL);
}
}
@@ -167,14 +167,14 @@
.addFrameIndex(FI).addImm(0);
else
BuildMI(MBB, I, TII.get(ARM::LDR), DestReg)
- .addFrameIndex(FI).addReg(0).addImm(0);
+ .addFrameIndex(FI).addReg(0).addImm(0).addImm((int64_t)ARMCC::AL);
} else if (RC == ARM::DPRRegisterClass) {
BuildMI(MBB, I, TII.get(ARM::FLDD), DestReg)
- .addFrameIndex(FI).addImm(0);
+ .addFrameIndex(FI).addImm(0).addImm((int64_t)ARMCC::AL);
} else {
assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
BuildMI(MBB, I, TII.get(ARM::FLDS), DestReg)
- .addFrameIndex(FI).addImm(0);
+ .addFrameIndex(FI).addImm(0).addImm((int64_t)ARMCC::AL);
}
}
@@ -185,12 +185,17 @@
if (RC == ARM::GPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- BuildMI(MBB, I, TII.get(AFI->isThumbFunction() ? ARM::tMOVr : ARM::MOVr),
- DestReg).addReg(SrcReg);
+ if (AFI->isThumbFunction())
+ BuildMI(MBB, I, TII.get(ARM::tMOVr), DestReg).addReg(SrcReg);
+ else
+ BuildMI(MBB, I, TII.get(ARM::MOVr), DestReg).addReg(SrcReg)
+ .addImm((int64_t)ARMCC::AL);
} else if (RC == ARM::SPRRegisterClass)
- BuildMI(MBB, I, TII.get(ARM::FCPYS), DestReg).addReg(SrcReg);
+ BuildMI(MBB, I, TII.get(ARM::FCPYS), DestReg).addReg(SrcReg)
+ .addImm((int64_t)ARMCC::AL);
else if (RC == ARM::DPRRegisterClass)
- BuildMI(MBB, I, TII.get(ARM::FCPYD), DestReg).addReg(SrcReg);
+ BuildMI(MBB, I, TII.get(ARM::FCPYD), DestReg).addReg(SrcReg)
+ .addImm((int64_t)ARMCC::AL);
else
abort();
}
@@ -199,7 +204,7 @@
/// specified immediate.
static void emitLoadConstPool(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- unsigned DestReg, int Val,
+ unsigned DestReg, ARMCC::CondCodes Pred, int Val,
const TargetInstrInfo &TII, bool isThumb) {
MachineFunction &MF = *MBB.getParent();
MachineConstantPool *ConstantPool = MF.getConstantPool();
@@ -209,7 +214,7 @@
BuildMI(MBB, MBBI, TII.get(ARM::tLDRcp), DestReg).addConstantPoolIndex(Idx);
else
BuildMI(MBB, MBBI, TII.get(ARM::LDRcp), DestReg).addConstantPoolIndex(Idx)
- .addReg(0).addImm(0);
+ .addReg(0).addImm(0).addImm((unsigned)Pred);
}
void ARMRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
@@ -217,7 +222,9 @@
unsigned DestReg,
const MachineInstr *Orig) const {
if (Orig->getOpcode() == ARM::MOVi2pieces) {
- emitLoadConstPool(MBB, I, DestReg, Orig->getOperand(1).getImmedValue(),
+ emitLoadConstPool(MBB, I, DestReg,
+ (ARMCC::CondCodes)Orig->getOperand(2).getImmedValue(),
+ Orig->getOperand(1).getImmedValue(),
TII, false);
return;
}
@@ -247,14 +254,15 @@
switch (Opc) {
default: break;
case ARM::MOVr: {
+ unsigned Pred = MI->getOperand(2).getImmedValue();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::STR)).addReg(SrcReg).addFrameIndex(FI)
- .addReg(0).addImm(0);
+ .addReg(0).addImm(0).addImm(Pred);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
NewMI = BuildMI(TII.get(ARM::LDR), DstReg).addFrameIndex(FI).addReg(0)
- .addImm(0);
+ .addImm(0).addImm(Pred);
}
break;
}
@@ -277,24 +285,28 @@
break;
}
case ARM::FCPYS: {
+ unsigned Pred = MI->getOperand(2).getImmedValue();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
- .addImm(0);
+ .addImm(0).addImm(Pred);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
- NewMI = BuildMI(TII.get(ARM::FLDS), DstReg).addFrameIndex(FI).addImm(0);
+ NewMI = BuildMI(TII.get(ARM::FLDS), DstReg).addFrameIndex(FI)
+ .addImm(0).addImm(Pred);
}
break;
}
case ARM::FCPYD: {
+ unsigned Pred = MI->getOperand(2).getImmedValue();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
NewMI = BuildMI(TII.get(ARM::FSTD)).addReg(SrcReg).addFrameIndex(FI)
- .addImm(0);
+ .addImm(0).addImm(Pred);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
- NewMI = BuildMI(TII.get(ARM::FLDD), DstReg).addFrameIndex(FI).addImm(0);
+ NewMI = BuildMI(TII.get(ARM::FLDD), DstReg).addFrameIndex(FI)
+ .addImm(0).addImm(Pred);
}
break;
}
@@ -415,6 +427,7 @@
void emitARMRegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
unsigned DestReg, unsigned BaseReg,
+ ARMCC::CondCodes Pred,
int NumBytes, const TargetInstrInfo &TII) {
bool isSub = NumBytes < 0;
if (isSub) NumBytes = -NumBytes;
@@ -433,7 +446,8 @@
// Build the new ADD / SUB.
BuildMI(MBB, MBBI, TII.get(isSub ? ARM::SUBri : ARM::ADDri), DestReg)
- .addReg(BaseReg, false, false, true).addImm(SOImmVal);
+ .addReg(BaseReg, false, false, true).addImm(SOImmVal)
+ .addImm((unsigned)Pred);
BaseReg = DestReg;
}
}
@@ -498,7 +512,7 @@
BuildMI(MBB, MBBI, TII.get(ARM::tNEG), LdReg)
.addReg(LdReg, false, false, true);
} else
- emitLoadConstPool(MBB, MBBI, LdReg, NumBytes, TII, true);
+ emitLoadConstPool(MBB, MBBI, LdReg, ARMCC::AL, NumBytes, TII, true);
// Emit add / sub.
int Opc = (isSub) ? ARM::tSUBrr : (isHigh ? ARM::tADDhirr : ARM::tADDrr);
@@ -618,11 +632,12 @@
static
void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- int NumBytes, bool isThumb, const TargetInstrInfo &TII) {
+ ARMCC::CondCodes Pred, int NumBytes, bool isThumb,
+ const TargetInstrInfo &TII) {
if (isThumb)
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII);
else
- emitARMRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII);
+ emitARMRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, Pred, NumBytes, TII);
}
void ARMRegisterInfo::
@@ -643,11 +658,15 @@
Amount = (Amount+Align-1)/Align*Align;
// Replace the pseudo instruction with a new instruction...
- if (Old->getOpcode() == ARM::ADJCALLSTACKDOWN) {
- emitSPUpdate(MBB, I, -Amount, AFI->isThumbFunction(), TII);
+ unsigned Opc = Old->getOpcode();
+ bool isThumb = AFI->isThumbFunction();
+ ARMCC::CondCodes Pred = isThumb
+ ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(1).getImmedValue();
+ if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
+ emitSPUpdate(MBB, I, Pred, -Amount, isThumb, TII);
} else {
- assert(Old->getOpcode() == ARM::ADJCALLSTACKUP);
- emitSPUpdate(MBB, I, Amount, AFI->isThumbFunction(), TII);
+ assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
+ emitSPUpdate(MBB, I, Pred, Amount, isThumb, TII);
}
}
}
@@ -925,7 +944,7 @@
if (FrameReg == ARM::SP)
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
else {
- emitLoadConstPool(MBB, II, TmpReg, Offset, TII, true);
+ emitLoadConstPool(MBB, II, TmpReg, ARMCC::AL, Offset, TII, true);
UseRR = true;
}
} else
@@ -960,7 +979,7 @@
if (FrameReg == ARM::SP)
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,Offset,false,TII);
else {
- emitLoadConstPool(MBB, II, TmpReg, Offset, TII, true);
+ emitLoadConstPool(MBB, II, TmpReg, ARMCC::AL, Offset, TII, true);
UseRR = true;
}
} else
@@ -990,7 +1009,10 @@
if (ScratchReg == 0)
// No register is "free". Scavenge a register.
ScratchReg = RS->scavengeRegister(&ARM::GPRRegClass, II, SPAdj);
- emitARMRegPlusImmediate(MBB, II, ScratchReg, FrameReg,
+ MachineOperand *MO = MI.findFirstPredOperand();
+ ARMCC::CondCodes Pred = MO ?
+ (ARMCC::CondCodes)MO->getImmedValue() : ARMCC::AL;
+ emitARMRegPlusImmediate(MBB, II, ScratchReg, FrameReg, Pred,
isSub ? -Offset : Offset, TII);
MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
}
@@ -1293,11 +1315,11 @@
int FramePtrSpillFI = 0;
if (VARegSaveSize)
- emitSPUpdate(MBB, MBBI, -VARegSaveSize, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, -VARegSaveSize, isThumb, TII);
if (!AFI->hasStackFrame()) {
if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, -NumBytes, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, -NumBytes, isThumb, TII);
return;
}
@@ -1337,24 +1359,27 @@
if (!isThumb) {
// Build the new SUBri to adjust SP for integer callee-save spill area 1.
- emitSPUpdate(MBB, MBBI, -GPRCS1Size, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, -GPRCS1Size, isThumb, TII);
movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, 1, STI);
} else if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH)
++MBBI;
// Darwin ABI requires FP to point to the stack slot that contains the
// previous FP.
- if (STI.isTargetDarwin() || hasFP(MF))
- BuildMI(MBB, MBBI, TII.get(isThumb ? ARM::tADDrSPi : ARM::ADDri), FramePtr)
+ if (STI.isTargetDarwin() || hasFP(MF)) {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, TII.get(isThumb ? ARM::tADDrSPi : ARM::ADDri),FramePtr)
.addFrameIndex(FramePtrSpillFI).addImm(0);
+ if (!isThumb) MIB.addImm(ARMCC::AL);
+ }
if (!isThumb) {
// Build the new SUBri to adjust SP for integer callee-save spill area 2.
- emitSPUpdate(MBB, MBBI, -GPRCS2Size, false, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, -GPRCS2Size, false, TII);
// Build the new SUBri to adjust SP for FP callee-save spill area.
movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, 2, STI);
- emitSPUpdate(MBB, MBBI, -DPRCSSize, false, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, -DPRCSSize, false, TII);
}
// Determine starting offsets of spill areas.
@@ -1371,7 +1396,7 @@
// Insert it after all the callee-save spills.
if (!isThumb)
movePastCSLoadStoreOps(MBB, MBBI, ARM::FSTD, 3, STI);
- emitSPUpdate(MBB, MBBI, -NumBytes, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, -NumBytes, isThumb, TII);
}
if(STI.isTargetELF() && hasFP(MF)) {
@@ -1414,7 +1439,7 @@
int NumBytes = (int)MFI->getStackSize();
if (!AFI->hasStackFrame()) {
if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, NumBytes, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, NumBytes, isThumb, TII);
} else {
// Unwind MBBI to point to first LDR / FLDD.
const unsigned *CSRegs = getCalleeSavedRegs();
@@ -1444,9 +1469,9 @@
&MBB.front() != MBBI &&
prior(MBBI)->getOpcode() == ARM::tPOP) {
MachineBasicBlock::iterator PMBBI = prior(MBBI);
- emitSPUpdate(MBB, PMBBI, NumBytes, isThumb, TII);
+ emitSPUpdate(MBB, PMBBI, ARMCC::AL, NumBytes, isThumb, TII);
} else
- emitSPUpdate(MBB, MBBI, NumBytes, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, NumBytes, isThumb, TII);
}
} else {
// Darwin ABI requires FP to point to the stack slot that contains the
@@ -1461,24 +1486,28 @@
hasFP(MF))
if (NumBytes)
BuildMI(MBB, MBBI, TII.get(ARM::SUBri), ARM::SP).addReg(FramePtr)
- .addImm(NumBytes);
+ .addImm(NumBytes).addImm((unsigned)ARMCC::AL);
else
- BuildMI(MBB, MBBI, TII.get(ARM::MOVr), ARM::SP).addReg(FramePtr);
+ BuildMI(MBB, MBBI, TII.get(ARM::MOVr), ARM::SP).addReg(FramePtr)
+ .addImm((unsigned)ARMCC::AL);
} else if (NumBytes) {
- emitSPUpdate(MBB, MBBI, NumBytes, false, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, NumBytes, false, TII);
}
// Move SP to start of integer callee save spill area 2.
movePastCSLoadStoreOps(MBB, MBBI, ARM::FLDD, 3, STI);
- emitSPUpdate(MBB, MBBI, AFI->getDPRCalleeSavedAreaSize(), false, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, AFI->getDPRCalleeSavedAreaSize(),
+ false, TII);
// Move SP to start of integer callee save spill area 1.
movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, 2, STI);
- emitSPUpdate(MBB, MBBI, AFI->getGPRCalleeSavedArea2Size(), false, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, AFI->getGPRCalleeSavedArea2Size(),
+ false, TII);
// Move SP to SP upon entry to the function.
movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, 1, STI);
- emitSPUpdate(MBB, MBBI, AFI->getGPRCalleeSavedArea1Size(), false, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, AFI->getGPRCalleeSavedArea1Size(),
+ false, TII);
}
}
@@ -1488,7 +1517,7 @@
// FIXME: Verify this is still ok when R3 is no longer being reserved.
BuildMI(MBB, MBBI, TII.get(ARM::tPOP)).addReg(ARM::R3);
- emitSPUpdate(MBB, MBBI, VARegSaveSize, isThumb, TII);
+ emitSPUpdate(MBB, MBBI, ARMCC::AL, VARegSaveSize, isThumb, TII);
if (isThumb) {
BuildMI(MBB, MBBI, TII.get(ARM::tBX_RET_vararg)).addReg(ARM::R3);
More information about the llvm-commits
mailing list