[llvm-commits] [llvm] r66456 - in /llvm/branches/Apple/Dib: lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp lib/Target/ARM/ARMSubtarget.cpp lib/Target/ARM/ARMTargetMachine.cpp lib/Target/X86/X86ISelLowering.cpp lib/Target/X86/X86ISelLowering.h lib/Target/X86/X86Instr64bit.td lib/Target/X86/X86InstrInfo.cpp lib/Target/X86/X86InstrInfo.td test/CodeGen/ARM/uxtb.ll test/CodeGen/X86/2009-03-05-burr-list-crash.ll test/CodeGen/X86/peep-test-0.ll test/CodeGen/X86/peep-test-1.ll test/CodeGen/X86/peep-test.ll
Bill Wendling
isanbard at gmail.com
Mon Mar 9 13:52:42 PDT 2009
Author: void
Date: Mon Mar 9 15:52:42 2009
New Revision: 66456
URL: http://llvm.org/viewvc/llvm-project?rev=66456&view=rev
Log:
--- Merging (from foreign repository) r66365 into '.':
U test/CodeGen/ARM/uxtb.ll
U lib/Target/ARM/ARMSubtarget.cpp
Recognize triplets starting with armv5-, armv6- etc. And set the ARM arch
version accordingly.
--- Merging (from foreign repository) r66435 into '.':
G lib/Target/ARM/ARMSubtarget.cpp
U lib/Target/ARM/ARMTargetMachine.cpp
ARM target now also recognize triplets like thumbv6-apple-darwin and set thumb
mode and arch subversion. Eventually thumb triplets will go way and replaced
with function notes.
Added:
llvm/branches/Apple/Dib/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-0.ll
llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-1.ll
llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test.ll
Modified:
llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
llvm/branches/Apple/Dib/lib/Target/ARM/ARMSubtarget.cpp
llvm/branches/Apple/Dib/lib/Target/ARM/ARMTargetMachine.cpp
llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.cpp
llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.h
llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td
llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp
llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td
llvm/branches/Apple/Dib/test/CodeGen/ARM/uxtb.ll
Modified: llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp (original)
+++ llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp Mon Mar 9 15:52:42 2009
@@ -402,7 +402,7 @@
NewSU->isCommutable = true;
ComputeLatency(NewSU);
- SDep ChainPred;
+ SmallVector<SDep, 4> ChainPreds;
SmallVector<SDep, 4> ChainSuccs;
SmallVector<SDep, 4> LoadPreds;
SmallVector<SDep, 4> NodePreds;
@@ -410,7 +410,7 @@
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
- ChainPred = *I;
+ ChainPreds.push_back(*I);
else if (I->getSUnit()->getNode() &&
I->getSUnit()->getNode()->isOperandOf(LoadNode))
LoadPreds.push_back(*I);
@@ -425,17 +425,17 @@
NodeSuccs.push_back(*I);
}
- if (ChainPred.getSUnit()) {
- RemovePred(SU, ChainPred);
+ for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
+ const SDep &Pred = ChainPreds[i];
+ RemovePred(SU, Pred);
if (isNewLoad)
- AddPred(LoadSU, ChainPred);
+ AddPred(LoadSU, Pred);
}
for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
const SDep &Pred = LoadPreds[i];
RemovePred(SU, Pred);
- if (isNewLoad) {
+ if (isNewLoad)
AddPred(LoadSU, Pred);
- }
}
for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
const SDep &Pred = NodePreds[i];
Modified: llvm/branches/Apple/Dib/lib/Target/ARM/ARMSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/ARM/ARMSubtarget.cpp?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/ARM/ARMSubtarget.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Target/ARM/ARMSubtarget.cpp Mon Mar 9 15:52:42 2009
@@ -35,8 +35,31 @@
// Set the boolean corresponding to the current target triple, or the default
// if one cannot be determined, to true.
const std::string& TT = M.getTargetTriple();
- if (TT.length() > 5) {
+ unsigned Len = TT.length();
+ unsigned Idx = 0;
+ if (Len >= 5 && TT.substr(0, 4) == "armv")
+ Idx = 4;
+ else if (Len >= 6 && TT.substr(0, 6) == "thumb") {
+ IsThumb = true;
+ if (Len >= 7 && TT[5] == 'v')
+ Idx = 6;
+ }
+ if (Idx) {
+ unsigned SubVer = TT[Idx];
+ if (SubVer > '4' && SubVer <= '9') {
+ if (SubVer >= '6')
+ ARMArchVersion = V6;
+ else if (SubVer == '5') {
+ ARMArchVersion = V5T;
+ if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == 'e')
+ ARMArchVersion = V5TE;
+ }
+ }
+ }
+
+ if (Len >= 10) {
if (TT.find("-darwin") != std::string::npos)
+ // arm-darwin
TargetType = isDarwin;
} else if (TT.empty()) {
#if defined(__APPLE__)
Modified: llvm/branches/Apple/Dib/lib/Target/ARM/ARMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/ARM/ARMTargetMachine.cpp?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/ARM/ARMTargetMachine.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Target/ARM/ARMTargetMachine.cpp Mon Mar 9 15:52:42 2009
@@ -53,7 +53,9 @@
unsigned ThumbTargetMachine::getModuleMatchQuality(const Module &M) {
std::string TT = M.getTargetTriple();
- if (TT.size() >= 6 && std::string(TT.begin(), TT.begin()+6) == "thumb-")
+ // Match thumb-foo-bar, as well as things like thumbv5blah-*
+ if (TT.size() >= 6 &&
+ (TT.substr(0, 6) == "thumb-" || TT.substr(0, 6) == "thumbv"))
return 20;
// If the target triple is something non-thumb, we don't match.
@@ -105,7 +107,8 @@
unsigned ARMTargetMachine::getModuleMatchQuality(const Module &M) {
std::string TT = M.getTargetTriple();
- if (TT.size() >= 4 && // Match arm-foo-bar, as well as things like armv5blah-*
+ // Match arm-foo-bar, as well as things like armv5blah-*
+ if (TT.size() >= 4 &&
(TT.substr(0, 4) == "arm-" || TT.substr(0, 4) == "armv"))
return 20;
// If the target triple is something non-arm, we don't match.
Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.cpp?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.cpp Mon Mar 9 15:52:42 2009
@@ -5345,6 +5345,113 @@
return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
}
+/// Emit nodes that will be selected as "test Op0,Op0", or something
+/// equivalent.
+SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
+ SelectionDAG &DAG) {
+ DebugLoc dl = Op.getDebugLoc();
+
+ // CF and OF aren't always set the way we want. Determine which
+ // of these we need.
+ bool NeedCF = false;
+ bool NeedOF = false;
+ switch (X86CC) {
+ case X86::COND_A: case X86::COND_AE:
+ case X86::COND_B: case X86::COND_BE:
+ NeedCF = true;
+ break;
+ case X86::COND_G: case X86::COND_GE:
+ case X86::COND_L: case X86::COND_LE:
+ case X86::COND_O: case X86::COND_NO:
+ NeedOF = true;
+ break;
+ default: break;
+ }
+
+ // See if we can use the EFLAGS value from the operand instead of
+ // doing a separate TEST. TEST always sets OF and CF to 0, so unless
+ // we prove that the arithmetic won't overflow, we can't use OF or CF.
+ if (Op.getResNo() == 0 && !NeedOF && !NeedCF) {
+ unsigned Opcode = 0;
+ unsigned NumOperands = 0;
+ switch (Op.getNode()->getOpcode()) {
+ case ISD::ADD:
+ // Due to an isel shortcoming, be conservative if this add is likely to
+ // be selected as part of a load-modify-store instruction. When the root
+ // node in a match is a store, isel doesn't know how to remap non-chain
+ // non-flag uses of other nodes in the match, such as the ADD in this
+ // case. This leads to the ADD being left around and reselected, with
+ // the result being two adds in the output.
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ UE = Op.getNode()->use_end(); UI != UE; ++UI)
+ if (UI->getOpcode() == ISD::STORE)
+ goto default_case;
+ if (ConstantSDNode *C =
+ dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) {
+ // An add of one will be selected as an INC.
+ if (C->getAPIntValue() == 1) {
+ Opcode = X86ISD::INC;
+ NumOperands = 1;
+ break;
+ }
+ // An add of negative one (subtract of one) will be selected as a DEC.
+ if (C->getAPIntValue().isAllOnesValue()) {
+ Opcode = X86ISD::DEC;
+ NumOperands = 1;
+ break;
+ }
+ }
+ // Otherwise use a regular EFLAGS-setting add.
+ Opcode = X86ISD::ADD;
+ NumOperands = 2;
+ break;
+ case ISD::SUB:
+ // Due to the ISEL shortcoming noted above, be conservative if this sub is
+ // likely to be selected as part of a load-modify-store instruction.
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ UE = Op.getNode()->use_end(); UI != UE; ++UI)
+ if (UI->getOpcode() == ISD::STORE)
+ goto default_case;
+ // Otherwise use a regular EFLAGS-setting sub.
+ Opcode = X86ISD::SUB;
+ NumOperands = 2;
+ break;
+ case X86ISD::ADD:
+ case X86ISD::SUB:
+ case X86ISD::INC:
+ case X86ISD::DEC:
+ return SDValue(Op.getNode(), 1);
+ default:
+ default_case:
+ break;
+ }
+ if (Opcode != 0) {
+ const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::i32);
+ SmallVector<SDValue, 4> Ops;
+ for (unsigned i = 0; i != NumOperands; ++i)
+ Ops.push_back(Op.getOperand(i));
+ SDValue New = DAG.getNode(Opcode, dl, VTs, 2, &Ops[0], NumOperands);
+ DAG.ReplaceAllUsesWith(Op, New);
+ return SDValue(New.getNode(), 1);
+ }
+ }
+
+ return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+ DAG.getConstant(0, Op.getValueType()));
+}
+
+/// Emit nodes that will be selected as "cmp Op0,Op1", or something
+/// equivalent.
+SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
+ SelectionDAG &DAG) {
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1))
+ if (C->getAPIntValue() == 0)
+ return EmitTest(Op0, X86CC, DAG);
+
+ DebugLoc dl = Op0.getDebugLoc();
+ return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
+}
+
SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
SDValue Op0 = Op.getOperand(0);
@@ -5407,7 +5514,7 @@
bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
- SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
+ SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG);
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(X86CC, MVT::i8), Cond);
}
@@ -5526,8 +5633,20 @@
}
// isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
-static bool isX86LogicalCmp(unsigned Opc) {
- return Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI;
+static bool isX86LogicalCmp(SDValue Op) {
+ unsigned Opc = Op.getNode()->getOpcode();
+ if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI)
+ return true;
+ if (Op.getResNo() == 1 &&
+ Opc == X86ISD::ADD ||
+ Opc == X86ISD::SUB ||
+ Opc == X86ISD::SMUL ||
+ Opc == X86ISD::UMUL ||
+ Opc == X86ISD::INC ||
+ Opc == X86ISD::DEC)
+ return true;
+
+ return false;
}
SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) {
@@ -5553,7 +5672,7 @@
!isScalarFPTypeInSSEReg(VT)) // FPStack?
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
- if ((isX86LogicalCmp(Opc) && !IllegalFPCMov) || Opc == X86ISD::BT) { // FIXME
+ if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || Opc == X86ISD::BT) { // FIXME
Cond = Cmp;
addTest = false;
}
@@ -5561,8 +5680,7 @@
if (addTest) {
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- Cond= DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond,
- DAG.getConstant(0, MVT::i8));
+ Cond = EmitTest(Cond, X86::COND_NE, DAG);
}
const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(),
@@ -5630,7 +5748,7 @@
SDValue Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
// FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
- if (isX86LogicalCmp(Opc) || Opc == X86ISD::BT) {
+ if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
Cond = Cmp;
addTest = false;
} else {
@@ -5649,13 +5767,12 @@
unsigned CondOpc;
if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
SDValue Cmp = Cond.getOperand(0).getOperand(1);
- unsigned Opc = Cmp.getOpcode();
if (CondOpc == ISD::OR) {
// Also, recognize the pattern generated by an FCMP_UNE. We can emit
// two branches instead of an explicit OR instruction with a
// separate test.
if (Cmp == Cond.getOperand(1).getOperand(1) &&
- isX86LogicalCmp(Opc)) {
+ isX86LogicalCmp(Cmp)) {
CC = Cond.getOperand(0).getOperand(0);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
@@ -5670,7 +5787,7 @@
// have a fall-through edge, because this requires an explicit
// jmp when the condition is false.
if (Cmp == Cond.getOperand(1).getOperand(1) &&
- isX86LogicalCmp(Opc) &&
+ isX86LogicalCmp(Cmp) &&
Op.getNode()->hasOneUse()) {
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
@@ -5713,8 +5830,7 @@
if (addTest) {
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
- Cond= DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond,
- DAG.getConstant(0, MVT::i8));
+ Cond = EmitTest(Cond, X86::COND_NE, DAG);
}
return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cond);
@@ -6647,6 +6763,14 @@
switch (Op.getOpcode()) {
default: assert(0 && "Unknown ovf instruction!");
case ISD::SADDO:
+ // A subtract of one will be selected as a INC. Note that INC doesn't
+ // set CF, so we can't do this for UADDO.
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (C->getAPIntValue() == 1) {
+ BaseOp = X86ISD::INC;
+ Cond = X86::COND_O;
+ break;
+ }
BaseOp = X86ISD::ADD;
Cond = X86::COND_O;
break;
@@ -6655,6 +6779,14 @@
Cond = X86::COND_B;
break;
case ISD::SSUBO:
+ // A subtract of one will be selected as a DEC. Note that DEC doesn't
+ // set CF, so we can't do this for USUBO.
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (C->getAPIntValue() == 1) {
+ BaseOp = X86ISD::DEC;
+ Cond = X86::COND_O;
+ break;
+ }
BaseOp = X86ISD::SUB;
Cond = X86::COND_O;
break;
@@ -6995,6 +7127,8 @@
case X86ISD::SUB: return "X86ISD::SUB";
case X86ISD::SMUL: return "X86ISD::SMUL";
case X86ISD::UMUL: return "X86ISD::UMUL";
+ case X86ISD::INC: return "X86ISD::INC";
+ case X86ISD::DEC: return "X86ISD::DEC";
}
}
@@ -7793,6 +7927,8 @@
case X86ISD::SUB:
case X86ISD::SMUL:
case X86ISD::UMUL:
+ case X86ISD::INC:
+ case X86ISD::DEC:
// These nodes' second result is a boolean.
if (Op.getResNo() == 0)
break;
Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.h?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.h (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86ISelLowering.h Mon Mar 9 15:52:42 2009
@@ -235,9 +235,9 @@
PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
- // ADD, SUB, SMUL, UMUL - Arithmetic operations with overflow/carry
- // intrinsics.
- ADD, SUB, SMUL, UMUL
+ // ADD, SUB, SMUL, UMUL, etc. - Arithmetic operations with FLAGS results.
+ ADD, SUB, SMUL, UMUL,
+ INC, DEC
};
}
@@ -659,6 +659,15 @@
MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
MachineBasicBlock *BB,
unsigned cmovOpc);
+
+ /// Emit nodes that will be selected as "test Op0,Op0", or something
+ /// equivalent, for use with the given x86 condition code.
+ SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG);
+
+ /// Emit nodes that will be selected as "cmp Op0,Op1", or something
+ /// equivalent, for use with the given x86 condition code.
+ SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
+ SelectionDAG &DAG);
};
namespace X86 {
Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td Mon Mar 9 15:52:42 2009
@@ -538,36 +538,46 @@
let Defs = [EFLAGS], CodeSize = 2 in {
let isTwoAddress = 1 in
def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
- [(set GR64:$dst, (ineg GR64:$src))]>;
+ [(set GR64:$dst, (ineg GR64:$src)),
+ (implicit EFLAGS)]>;
def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
- [(store (ineg (loadi64 addr:$dst)), addr:$dst)]>;
+ [(store (ineg (loadi64 addr:$dst)), addr:$dst),
+ (implicit EFLAGS)]>;
let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
- [(set GR64:$dst, (add GR64:$src, 1))]>;
+ [(set GR64:$dst, (add GR64:$src, 1)),
+ (implicit EFLAGS)]>;
def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
- [(store (add (loadi64 addr:$dst), 1), addr:$dst)]>;
+ [(store (add (loadi64 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>;
let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
- [(set GR64:$dst, (add GR64:$src, -1))]>;
+ [(set GR64:$dst, (add GR64:$src, -1)),
+ (implicit EFLAGS)]>;
def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
- [(store (add (loadi64 addr:$dst), -1), addr:$dst)]>;
+ [(store (add (loadi64 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>;
// In 64-bit mode, single byte INC and DEC cannot be encoded.
let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
// Can transform into LEA.
def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
- [(set GR16:$dst, (add GR16:$src, 1))]>,
+ [(set GR16:$dst, (add GR16:$src, 1)),
+ (implicit EFLAGS)]>,
OpSize, Requires<[In64BitMode]>;
def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
- [(set GR32:$dst, (add GR32:$src, 1))]>,
+ [(set GR32:$dst, (add GR32:$src, 1)),
+ (implicit EFLAGS)]>,
Requires<[In64BitMode]>;
def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
- [(set GR16:$dst, (add GR16:$src, -1))]>,
+ [(set GR16:$dst, (add GR16:$src, -1)),
+ (implicit EFLAGS)]>,
OpSize, Requires<[In64BitMode]>;
def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
- [(set GR32:$dst, (add GR32:$src, -1))]>,
+ [(set GR32:$dst, (add GR32:$src, -1)),
+ (implicit EFLAGS)]>,
Requires<[In64BitMode]>;
} // isConvertibleToThreeAddress
@@ -575,16 +585,20 @@
// how to unfold them.
let isTwoAddress = 0, CodeSize = 2 in {
def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
- [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
+ [(store (add (loadi16 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize, Requires<[In64BitMode]>;
def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
- [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
+ [(store (add (loadi32 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
Requires<[In64BitMode]>;
def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
- [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
+ [(store (add (loadi16 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize, Requires<[In64BitMode]>;
def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
- [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
+ [(store (add (loadi32 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
Requires<[In64BitMode]>;
}
} // Defs = [EFLAGS], CodeSize
@@ -780,86 +794,107 @@
def AND64rr : RI<0x21, MRMDestReg,
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, GR64:$src2))]>;
+ [(set GR64:$dst, (and GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>;
def AND64rm : RI<0x23, MRMSrcMem,
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, (load addr:$src2)))]>;
+ [(set GR64:$dst, (and GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def AND64ri8 : RIi8<0x83, MRM4r,
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2))]>;
+ [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
def AND64ri32 : RIi32<0x81, MRM4r,
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2))]>;
+ [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
} // isTwoAddress
def AND64mr : RI<0x21, MRMDestMem,
(outs), (ins i64mem:$dst, GR64:$src),
"and{q}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), GR64:$src), addr:$dst)]>;
+ [(store (and (load addr:$dst), GR64:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def AND64mi8 : RIi8<0x83, MRM4m,
(outs), (ins i64mem:$dst, i64i8imm :$src),
"and{q}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
+ [(store (and (load addr:$dst), i64immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def AND64mi32 : RIi32<0x81, MRM4m,
(outs), (ins i64mem:$dst, i64i32imm:$src),
"and{q}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
+ [(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
+ (implicit EFLAGS)]>;
let isTwoAddress = 1 in {
let isCommutable = 1 in
def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, GR64:$src2))]>;
+ [(set GR64:$dst, (or GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>;
def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, (load addr:$src2)))]>;
+ [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2))]>;
+ [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2))]>;
+ [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
} // isTwoAddress
def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"or{q}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), GR64:$src), addr:$dst)]>;
+ [(store (or (load addr:$dst), GR64:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def OR64mi8 : RIi8<0x83, MRM1m, (outs), (ins i64mem:$dst, i64i8imm:$src),
"or{q}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
+ [(store (or (load addr:$dst), i64immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
"or{q}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
+ [(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
+ (implicit EFLAGS)]>;
let isTwoAddress = 1 in {
let isCommutable = 1 in
def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, GR64:$src2))]>;
+ [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)),
+ (implicit EFLAGS)]>;
def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2)))]>;
+ [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2))]>;
+ [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)),
+ (implicit EFLAGS)]>;
def XOR64ri32 : RIi32<0x81, MRM6r,
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2))]>;
+ [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2)),
+ (implicit EFLAGS)]>;
} // isTwoAddress
def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"xor{q}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), GR64:$src), addr:$dst)]>;
+ [(store (xor (load addr:$dst), GR64:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def XOR64mi8 : RIi8<0x83, MRM6m, (outs), (ins i64mem:$dst, i64i8imm :$src),
"xor{q}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst)]>;
+ [(store (xor (load addr:$dst), i64immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
"xor{q}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst)]>;
+ [(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
+ (implicit EFLAGS)]>;
} // Defs = [EFLAGS]
//===----------------------------------------------------------------------===//
@@ -1596,101 +1631,135 @@
(SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
//===----------------------------------------------------------------------===//
-// Overflow Patterns
+// EFLAGS-defining Patterns
//===----------------------------------------------------------------------===//
-// Register-Register Addition with Overflow
-def : Pat<(parallel (X86add_ovf GR64:$src1, GR64:$src2),
+// Register-Register Addition with EFLAGS result
+def : Pat<(parallel (X86add_flag GR64:$src1, GR64:$src2),
(implicit EFLAGS)),
(ADD64rr GR64:$src1, GR64:$src2)>;
-// Register-Integer Addition with Overflow
-def : Pat<(parallel (X86add_ovf GR64:$src1, i64immSExt8:$src2),
+// Register-Integer Addition with EFLAGS result
+def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt8:$src2),
(implicit EFLAGS)),
(ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86add_ovf GR64:$src1, i64immSExt32:$src2),
+def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt32:$src2),
(implicit EFLAGS)),
(ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
-// Register-Memory Addition with Overflow
-def : Pat<(parallel (X86add_ovf GR64:$src1, (load addr:$src2)),
+// Register-Memory Addition with EFLAGS result
+def : Pat<(parallel (X86add_flag GR64:$src1, (loadi64 addr:$src2)),
(implicit EFLAGS)),
(ADD64rm GR64:$src1, addr:$src2)>;
-// Memory-Register Addition with Overflow
-def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR64:$src2),
+// Memory-Register Addition with EFLAGS result
+def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), GR64:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD64mr addr:$dst, GR64:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i64immSExt8:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD64mi8 addr:$dst, i64immSExt8:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i64immSExt32:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt32:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD64mi32 addr:$dst, i64immSExt32:$src2)>;
-// Register-Register Subtraction with Overflow
-def : Pat<(parallel (X86sub_ovf GR64:$src1, GR64:$src2),
+// Register-Register Subtraction with EFLAGS result
+def : Pat<(parallel (X86sub_flag GR64:$src1, GR64:$src2),
(implicit EFLAGS)),
(SUB64rr GR64:$src1, GR64:$src2)>;
-// Register-Memory Subtraction with Overflow
-def : Pat<(parallel (X86sub_ovf GR64:$src1, (load addr:$src2)),
+// Register-Memory Subtraction with EFLAGS result
+def : Pat<(parallel (X86sub_flag GR64:$src1, (loadi64 addr:$src2)),
(implicit EFLAGS)),
(SUB64rm GR64:$src1, addr:$src2)>;
-// Register-Integer Subtraction with Overflow
-def : Pat<(parallel (X86sub_ovf GR64:$src1, i64immSExt8:$src2),
+// Register-Integer Subtraction with EFLAGS result
+def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt8:$src2),
(implicit EFLAGS)),
(SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR64:$src1, i64immSExt32:$src2),
+def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt32:$src2),
(implicit EFLAGS)),
(SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
-// Memory-Register Subtraction with Overflow
-def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR64:$src2),
+// Memory-Register Subtraction with EFLAGS result
+def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), GR64:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB64mr addr:$dst, GR64:$src2)>;
-// Memory-Integer Subtraction with Overflow
-def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i64immSExt8:$src2),
+// Memory-Integer Subtraction with EFLAGS result
+def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB64mi8 addr:$dst, i64immSExt8:$src2)>;
-def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i64immSExt32:$src2),
+def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt32:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB64mi32 addr:$dst, i64immSExt32:$src2)>;
-// Register-Register Signed Integer Multiplication with Overflow
-def : Pat<(parallel (X86smul_ovf GR64:$src1, GR64:$src2),
+// Register-Register Signed Integer Multiplication with EFLAGS result
+def : Pat<(parallel (X86smul_flag GR64:$src1, GR64:$src2),
(implicit EFLAGS)),
(IMUL64rr GR64:$src1, GR64:$src2)>;
-// Register-Memory Signed Integer Multiplication with Overflow
-def : Pat<(parallel (X86smul_ovf GR64:$src1, (load addr:$src2)),
+// Register-Memory Signed Integer Multiplication with EFLAGS result
+def : Pat<(parallel (X86smul_flag GR64:$src1, (loadi64 addr:$src2)),
(implicit EFLAGS)),
(IMUL64rm GR64:$src1, addr:$src2)>;
-// Register-Integer Signed Integer Multiplication with Overflow
-def : Pat<(parallel (X86smul_ovf GR64:$src1, i64immSExt8:$src2),
+// Register-Integer Signed Integer Multiplication with EFLAGS result
+def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt8:$src2),
(implicit EFLAGS)),
(IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86smul_ovf GR64:$src1, i64immSExt32:$src2),
+def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt32:$src2),
(implicit EFLAGS)),
(IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
-// Memory-Integer Signed Integer Multiplication with Overflow
-def : Pat<(parallel (X86smul_ovf (load addr:$src1), i64immSExt8:$src2),
+// Memory-Integer Signed Integer Multiplication with EFLAGS result
+def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt8:$src2),
(implicit EFLAGS)),
(IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86smul_ovf (load addr:$src1), i64immSExt32:$src2),
+def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt32:$src2),
(implicit EFLAGS)),
(IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
+// INC and DEC with EFLAGS result. Note that these do not set CF.
+def : Pat<(parallel (X86inc_flag GR16:$src), (implicit EFLAGS)),
+ (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
+def : Pat<(parallel (store (i16 (X86inc_flag (loadi16 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (INC64_16m addr:$dst)>, Requires<[In64BitMode]>;
+def : Pat<(parallel (X86dec_flag GR16:$src), (implicit EFLAGS)),
+ (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
+def : Pat<(parallel (store (i16 (X86dec_flag (loadi16 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (DEC64_16m addr:$dst)>, Requires<[In64BitMode]>;
+
+def : Pat<(parallel (X86inc_flag GR32:$src), (implicit EFLAGS)),
+ (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
+def : Pat<(parallel (store (i32 (X86inc_flag (loadi32 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (INC64_32m addr:$dst)>, Requires<[In64BitMode]>;
+def : Pat<(parallel (X86dec_flag GR32:$src), (implicit EFLAGS)),
+ (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
+def : Pat<(parallel (store (i32 (X86dec_flag (loadi32 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (DEC64_32m addr:$dst)>, Requires<[In64BitMode]>;
+
+def : Pat<(parallel (X86inc_flag GR64:$src), (implicit EFLAGS)),
+ (INC64r GR64:$src)>;
+def : Pat<(parallel (store (i64 (X86inc_flag (loadi64 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (INC64m addr:$dst)>;
+def : Pat<(parallel (X86dec_flag GR64:$src), (implicit EFLAGS)),
+ (DEC64r GR64:$src)>;
+def : Pat<(parallel (store (i64 (X86dec_flag (loadi64 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (DEC64m addr:$dst)>;
+
//===----------------------------------------------------------------------===//
// X86-64 SSE Instructions
//===----------------------------------------------------------------------===//
Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp Mon Mar 9 15:52:42 2009
@@ -2404,6 +2404,7 @@
const TargetOperandInfo &TOI = TID.OpInfo[Index];
const TargetRegisterClass *RC = TOI.isLookupPtrRegClass()
? RI.getPointerRegClass() : RI.getRegClass(TOI.RegClass);
+ unsigned NumDefs = TID.NumDefs;
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
std::vector<SDValue> AfterOps;
@@ -2411,11 +2412,11 @@
unsigned NumOps = N->getNumOperands();
for (unsigned i = 0; i != NumOps-1; ++i) {
SDValue Op = N->getOperand(i);
- if (i >= Index && i < Index+4)
+ if (i >= Index-NumDefs && i < Index-NumDefs+4)
AddrOps.push_back(Op);
- else if (i < Index)
+ else if (i < Index-NumDefs)
BeforeOps.push_back(Op);
- else if (i > Index)
+ else if (i > Index-NumDefs)
AfterOps.push_back(Op);
}
SDValue Chain = N->getOperand(NumOps-1);
Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td Mon Mar 9 15:52:42 2009
@@ -27,11 +27,13 @@
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
-def SDTUnaryArithOvf : SDTypeProfile<1, 1,
- [SDTCisInt<0>]>;
-def SDTBinaryArithOvf : SDTypeProfile<1, 2,
- [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
- SDTCisInt<0>]>;
+// Unary and binary operator instructions that set EFLAGS as a side-effect.
+def SDTUnaryArithWithFlags : SDTypeProfile<1, 1,
+ [SDTCisInt<0>]>;
+def SDTBinaryArithWithFlags : SDTypeProfile<1, 2,
+ [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisInt<0>]>;
def SDTX86BrCond : SDTypeProfile<0, 3,
[SDTCisVT<0, OtherVT>,
@@ -148,10 +150,12 @@
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
[SDNPHasChain, SDNPOptInFlag]>;
-def X86add_ovf : SDNode<"X86ISD::ADD", SDTBinaryArithOvf>;
-def X86sub_ovf : SDNode<"X86ISD::SUB", SDTBinaryArithOvf>;
-def X86smul_ovf : SDNode<"X86ISD::SMUL", SDTBinaryArithOvf>;
-def X86umul_ovf : SDNode<"X86ISD::UMUL", SDTUnaryArithOvf>;
+def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags>;
+def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
+def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags>;
+def X86umul_flag : SDNode<"X86ISD::UMUL", SDTUnaryArithWithFlags>;
+def X86inc_flag : SDNode<"X86ISD::INC", SDTUnaryArithWithFlags>;
+def X86dec_flag : SDNode<"X86ISD::DEC", SDTUnaryArithWithFlags>;
//===----------------------------------------------------------------------===//
// X86 Operand Definitions.
@@ -1230,19 +1234,24 @@
let CodeSize = 2 in {
let Defs = [EFLAGS] in {
def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src), "neg{b}\t$dst",
- [(set GR8:$dst, (ineg GR8:$src))]>;
+ [(set GR8:$dst, (ineg GR8:$src)),
+ (implicit EFLAGS)]>;
def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src), "neg{w}\t$dst",
- [(set GR16:$dst, (ineg GR16:$src))]>, OpSize;
+ [(set GR16:$dst, (ineg GR16:$src)),
+ (implicit EFLAGS)]>, OpSize;
def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src), "neg{l}\t$dst",
- [(set GR32:$dst, (ineg GR32:$src))]>;
+ [(set GR32:$dst, (ineg GR32:$src)),
+ (implicit EFLAGS)]>;
let isTwoAddress = 0 in {
def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst), "neg{b}\t$dst",
- [(store (ineg (loadi8 addr:$dst)), addr:$dst)]>;
+ [(store (ineg (loadi8 addr:$dst)), addr:$dst),
+ (implicit EFLAGS)]>;
def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst), "neg{w}\t$dst",
- [(store (ineg (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
+ [(store (ineg (loadi16 addr:$dst)), addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst), "neg{l}\t$dst",
- [(store (ineg (loadi32 addr:$dst)), addr:$dst)]>;
-
+ [(store (ineg (loadi32 addr:$dst)), addr:$dst),
+ (implicit EFLAGS)]>;
}
} // Defs = [EFLAGS]
@@ -1269,44 +1278,56 @@
let Defs = [EFLAGS] in {
let CodeSize = 2 in
def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src), "inc{b}\t$dst",
- [(set GR8:$dst, (add GR8:$src, 1))]>;
+ [(set GR8:$dst, (add GR8:$src, 1)),
+ (implicit EFLAGS)]>;
let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst",
- [(set GR16:$dst, (add GR16:$src, 1))]>,
+ [(set GR16:$dst, (add GR16:$src, 1)),
+ (implicit EFLAGS)]>,
OpSize, Requires<[In32BitMode]>;
def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst",
- [(set GR32:$dst, (add GR32:$src, 1))]>, Requires<[In32BitMode]>;
+ [(set GR32:$dst, (add GR32:$src, 1)),
+ (implicit EFLAGS)]>, Requires<[In32BitMode]>;
}
let isTwoAddress = 0, CodeSize = 2 in {
def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst",
- [(store (add (loadi8 addr:$dst), 1), addr:$dst)]>;
+ [(store (add (loadi8 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>;
def INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
- [(store (add (loadi16 addr:$dst), 1), addr:$dst)]>,
+ [(store (add (loadi16 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize, Requires<[In32BitMode]>;
def INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
- [(store (add (loadi32 addr:$dst), 1), addr:$dst)]>,
+ [(store (add (loadi32 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
Requires<[In32BitMode]>;
}
let CodeSize = 2 in
def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src), "dec{b}\t$dst",
- [(set GR8:$dst, (add GR8:$src, -1))]>;
+ [(set GR8:$dst, (add GR8:$src, -1)),
+ (implicit EFLAGS)]>;
let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst",
- [(set GR16:$dst, (add GR16:$src, -1))]>,
+ [(set GR16:$dst, (add GR16:$src, -1)),
+ (implicit EFLAGS)]>,
OpSize, Requires<[In32BitMode]>;
def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst",
- [(set GR32:$dst, (add GR32:$src, -1))]>, Requires<[In32BitMode]>;
+ [(set GR32:$dst, (add GR32:$src, -1)),
+ (implicit EFLAGS)]>, Requires<[In32BitMode]>;
}
let isTwoAddress = 0, CodeSize = 2 in {
def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst",
- [(store (add (loadi8 addr:$dst), -1), addr:$dst)]>;
+ [(store (add (loadi8 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>;
def DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
- [(store (add (loadi16 addr:$dst), -1), addr:$dst)]>,
+ [(store (add (loadi16 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize, Requires<[In32BitMode]>;
def DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
- [(store (add (loadi32 addr:$dst), -1), addr:$dst)]>,
+ [(store (add (loadi32 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
Requires<[In32BitMode]>;
}
} // Defs = [EFLAGS]
@@ -1317,155 +1338,193 @@
def AND8rr : I<0x20, MRMDestReg,
(outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
"and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (and GR8:$src1, GR8:$src2))]>;
+ [(set GR8:$dst, (and GR8:$src1, GR8:$src2)),
+ (implicit EFLAGS)]>;
def AND16rr : I<0x21, MRMDestReg,
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (and GR16:$src1, GR16:$src2))]>, OpSize;
+ [(set GR16:$dst, (and GR16:$src1, GR16:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def AND32rr : I<0x21, MRMDestReg,
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, GR32:$src2))]>;
+ [(set GR32:$dst, (and GR32:$src1, GR32:$src2)),
+ (implicit EFLAGS)]>;
}
def AND8rm : I<0x22, MRMSrcMem,
(outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
"and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (and GR8:$src1, (load addr:$src2)))]>;
+ [(set GR8:$dst, (and GR8:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def AND16rm : I<0x23, MRMSrcMem,
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
"and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (and GR16:$src1, (load addr:$src2)))]>, OpSize;
+ [(set GR16:$dst, (and GR16:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>, OpSize;
def AND32rm : I<0x23, MRMSrcMem,
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, (load addr:$src2)))]>;
+ [(set GR32:$dst, (and GR32:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def AND8ri : Ii8<0x80, MRM4r,
(outs GR8 :$dst), (ins GR8 :$src1, i8imm :$src2),
"and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (and GR8:$src1, imm:$src2))]>;
+ [(set GR8:$dst, (and GR8:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def AND16ri : Ii16<0x81, MRM4r,
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (and GR16:$src1, imm:$src2))]>, OpSize;
+ [(set GR16:$dst, (and GR16:$src1, imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def AND32ri : Ii32<0x81, MRM4r,
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, imm:$src2))]>;
+ [(set GR32:$dst, (and GR32:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def AND16ri8 : Ii8<0x83, MRM4r,
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (and GR16:$src1, i16immSExt8:$src2))]>,
+ [(set GR16:$dst, (and GR16:$src1, i16immSExt8:$src2)),
+ (implicit EFLAGS)]>,
OpSize;
def AND32ri8 : Ii8<0x83, MRM4r,
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, i32immSExt8:$src2))]>;
+ [(set GR32:$dst, (and GR32:$src1, i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
let isTwoAddress = 0 in {
def AND8mr : I<0x20, MRMDestMem,
(outs), (ins i8mem :$dst, GR8 :$src),
"and{b}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), GR8:$src), addr:$dst)]>;
+ [(store (and (load addr:$dst), GR8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def AND16mr : I<0x21, MRMDestMem,
(outs), (ins i16mem:$dst, GR16:$src),
"and{w}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), GR16:$src), addr:$dst)]>,
+ [(store (and (load addr:$dst), GR16:$src), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize;
def AND32mr : I<0x21, MRMDestMem,
(outs), (ins i32mem:$dst, GR32:$src),
"and{l}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), GR32:$src), addr:$dst)]>;
+ [(store (and (load addr:$dst), GR32:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def AND8mi : Ii8<0x80, MRM4m,
(outs), (ins i8mem :$dst, i8imm :$src),
"and{b}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
+ [(store (and (loadi8 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def AND16mi : Ii16<0x81, MRM4m,
(outs), (ins i16mem:$dst, i16imm:$src),
"and{w}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
+ [(store (and (loadi16 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize;
def AND32mi : Ii32<0x81, MRM4m,
(outs), (ins i32mem:$dst, i32imm:$src),
"and{l}\t{$src, $dst|$dst, $src}",
- [(store (and (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
+ [(store (and (loadi32 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def AND16mi8 : Ii8<0x83, MRM4m,
(outs), (ins i16mem:$dst, i16i8imm :$src),
"and{w}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
+ [(store (and (load addr:$dst), i16immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize;
def AND32mi8 : Ii8<0x83, MRM4m,
(outs), (ins i32mem:$dst, i32i8imm :$src),
"and{l}\t{$src, $dst|$dst, $src}",
- [(store (and (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
+ [(store (and (load addr:$dst), i32immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
}
let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y
def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (or GR8:$src1, GR8:$src2))]>;
+ [(set GR8:$dst, (or GR8:$src1, GR8:$src2)),
+ (implicit EFLAGS)]>;
def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (or GR16:$src1, GR16:$src2))]>, OpSize;
+ [(set GR16:$dst, (or GR16:$src1, GR16:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, GR32:$src2))]>;
+ [(set GR32:$dst, (or GR32:$src1, GR32:$src2)),
+ (implicit EFLAGS)]>;
}
def OR8rm : I<0x0A, MRMSrcMem , (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (or GR8:$src1, (load addr:$src2)))]>;
+ [(set GR8:$dst, (or GR8:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def OR16rm : I<0x0B, MRMSrcMem , (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (or GR16:$src1, (load addr:$src2)))]>, OpSize;
+ [(set GR16:$dst, (or GR16:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>, OpSize;
def OR32rm : I<0x0B, MRMSrcMem , (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, (load addr:$src2)))]>;
+ [(set GR32:$dst, (or GR32:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (or GR8:$src1, imm:$src2))]>;
+ [(set GR8:$dst, (or GR8:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (or GR16:$src1, imm:$src2))]>, OpSize;
+ [(set GR16:$dst, (or GR16:$src1, imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, imm:$src2))]>;
+ [(set GR32:$dst, (or GR32:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (or GR16:$src1, i16immSExt8:$src2))]>, OpSize;
+ [(set GR16:$dst, (or GR16:$src1, i16immSExt8:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, i32immSExt8:$src2))]>;
+ [(set GR32:$dst, (or GR32:$src1, i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
let isTwoAddress = 0 in {
def OR8mr : I<0x08, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
"or{b}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), GR8:$src), addr:$dst)]>;
+ [(store (or (load addr:$dst), GR8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def OR16mr : I<0x09, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"or{w}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), GR16:$src), addr:$dst)]>, OpSize;
+ [(store (or (load addr:$dst), GR16:$src), addr:$dst),
+ (implicit EFLAGS)]>, OpSize;
def OR32mr : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"or{l}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), GR32:$src), addr:$dst)]>;
+ [(store (or (load addr:$dst), GR32:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def OR8mi : Ii8<0x80, MRM1m, (outs), (ins i8mem :$dst, i8imm:$src),
"or{b}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
+ [(store (or (loadi8 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def OR16mi : Ii16<0x81, MRM1m, (outs), (ins i16mem:$dst, i16imm:$src),
"or{w}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
+ [(store (or (loadi16 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize;
def OR32mi : Ii32<0x81, MRM1m, (outs), (ins i32mem:$dst, i32imm:$src),
"or{l}\t{$src, $dst|$dst, $src}",
- [(store (or (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
+ [(store (or (loadi32 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def OR16mi8 : Ii8<0x83, MRM1m, (outs), (ins i16mem:$dst, i16i8imm:$src),
"or{w}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
+ [(store (or (load addr:$dst), i16immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize;
def OR32mi8 : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$src),
"or{l}\t{$src, $dst|$dst, $src}",
- [(store (or (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
+ [(store (or (load addr:$dst), i32immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
} // isTwoAddress = 0
@@ -1473,89 +1532,108 @@
def XOR8rr : I<0x30, MRMDestReg,
(outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (xor GR8:$src1, GR8:$src2))]>;
+ [(set GR8:$dst, (xor GR8:$src1, GR8:$src2)),
+ (implicit EFLAGS)]>;
def XOR16rr : I<0x31, MRMDestReg,
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (xor GR16:$src1, GR16:$src2))]>, OpSize;
+ [(set GR16:$dst, (xor GR16:$src1, GR16:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def XOR32rr : I<0x31, MRMDestReg,
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, GR32:$src2))]>;
+ [(set GR32:$dst, (xor GR32:$src1, GR32:$src2)),
+ (implicit EFLAGS)]>;
} // isCommutable = 1
def XOR8rm : I<0x32, MRMSrcMem ,
(outs GR8 :$dst), (ins GR8:$src1, i8mem :$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (xor GR8:$src1, (load addr:$src2)))]>;
+ [(set GR8:$dst, (xor GR8:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def XOR16rm : I<0x33, MRMSrcMem ,
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (xor GR16:$src1, (load addr:$src2)))]>,
+ [(set GR16:$dst, (xor GR16:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>,
OpSize;
def XOR32rm : I<0x33, MRMSrcMem ,
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, (load addr:$src2)))]>;
+ [(set GR32:$dst, (xor GR32:$src1, (load addr:$src2))),
+ (implicit EFLAGS)]>;
def XOR8ri : Ii8<0x80, MRM6r,
(outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (xor GR8:$src1, imm:$src2))]>;
+ [(set GR8:$dst, (xor GR8:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def XOR16ri : Ii16<0x81, MRM6r,
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (xor GR16:$src1, imm:$src2))]>, OpSize;
+ [(set GR16:$dst, (xor GR16:$src1, imm:$src2)),
+ (implicit EFLAGS)]>, OpSize;
def XOR32ri : Ii32<0x81, MRM6r,
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, imm:$src2))]>;
+ [(set GR32:$dst, (xor GR32:$src1, imm:$src2)),
+ (implicit EFLAGS)]>;
def XOR16ri8 : Ii8<0x83, MRM6r,
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (xor GR16:$src1, i16immSExt8:$src2))]>,
+ [(set GR16:$dst, (xor GR16:$src1, i16immSExt8:$src2)),
+ (implicit EFLAGS)]>,
OpSize;
def XOR32ri8 : Ii8<0x83, MRM6r,
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, i32immSExt8:$src2))]>;
+ [(set GR32:$dst, (xor GR32:$src1, i32immSExt8:$src2)),
+ (implicit EFLAGS)]>;
let isTwoAddress = 0 in {
def XOR8mr : I<0x30, MRMDestMem,
(outs), (ins i8mem :$dst, GR8 :$src),
"xor{b}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), GR8:$src), addr:$dst)]>;
+ [(store (xor (load addr:$dst), GR8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def XOR16mr : I<0x31, MRMDestMem,
(outs), (ins i16mem:$dst, GR16:$src),
"xor{w}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), GR16:$src), addr:$dst)]>,
+ [(store (xor (load addr:$dst), GR16:$src), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize;
def XOR32mr : I<0x31, MRMDestMem,
(outs), (ins i32mem:$dst, GR32:$src),
"xor{l}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), GR32:$src), addr:$dst)]>;
+ [(store (xor (load addr:$dst), GR32:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def XOR8mi : Ii8<0x80, MRM6m,
(outs), (ins i8mem :$dst, i8imm :$src),
"xor{b}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi8 addr:$dst), imm:$src), addr:$dst)]>;
+ [(store (xor (loadi8 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def XOR16mi : Ii16<0x81, MRM6m,
(outs), (ins i16mem:$dst, i16imm:$src),
"xor{w}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi16 addr:$dst), imm:$src), addr:$dst)]>,
+ [(store (xor (loadi16 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize;
def XOR32mi : Ii32<0x81, MRM6m,
(outs), (ins i32mem:$dst, i32imm:$src),
"xor{l}\t{$src, $dst|$dst, $src}",
- [(store (xor (loadi32 addr:$dst), imm:$src), addr:$dst)]>;
+ [(store (xor (loadi32 addr:$dst), imm:$src), addr:$dst),
+ (implicit EFLAGS)]>;
def XOR16mi8 : Ii8<0x83, MRM6m,
(outs), (ins i16mem:$dst, i16i8imm :$src),
"xor{w}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), i16immSExt8:$src), addr:$dst)]>,
+ [(store (xor (load addr:$dst), i16immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>,
OpSize;
def XOR32mi8 : Ii8<0x83, MRM6m,
(outs), (ins i32mem:$dst, i32i8imm :$src),
"xor{l}\t{$src, $dst|$dst, $src}",
- [(store (xor (load addr:$dst), i32immSExt8:$src), addr:$dst)]>;
+ [(store (xor (load addr:$dst), i32immSExt8:$src), addr:$dst),
+ (implicit EFLAGS)]>;
} // isTwoAddress = 0
} // Defs = [EFLAGS]
@@ -3413,217 +3491,251 @@
(SHLD16mri8 addr:$dst, GR16:$src2, (i8 imm:$amt1))>;
//===----------------------------------------------------------------------===//
-// Overflow Patterns
+// EFLAGS-defining Patterns
//===----------------------------------------------------------------------===//
-// Register-Register Addition with Overflow
-def : Pat<(parallel (X86add_ovf GR8:$src1, GR8:$src2),
+// Register-Register Addition with EFLAGS result
+def : Pat<(parallel (X86add_flag GR8:$src1, GR8:$src2),
(implicit EFLAGS)),
(ADD8rr GR8:$src1, GR8:$src2)>;
-// Register-Register Addition with Overflow
-def : Pat<(parallel (X86add_ovf GR16:$src1, GR16:$src2),
+// Register-Register Addition with EFLAGS result
+def : Pat<(parallel (X86add_flag GR16:$src1, GR16:$src2),
(implicit EFLAGS)),
(ADD16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86add_ovf GR32:$src1, GR32:$src2),
+def : Pat<(parallel (X86add_flag GR32:$src1, GR32:$src2),
(implicit EFLAGS)),
(ADD32rr GR32:$src1, GR32:$src2)>;
-// Register-Memory Addition with Overflow
-def : Pat<(parallel (X86add_ovf GR8:$src1, (load addr:$src2)),
+// Register-Memory Addition with EFLAGS result
+def : Pat<(parallel (X86add_flag GR8:$src1, (loadi8 addr:$src2)),
(implicit EFLAGS)),
(ADD8rm GR8:$src1, addr:$src2)>;
-def : Pat<(parallel (X86add_ovf GR16:$src1, (load addr:$src2)),
+def : Pat<(parallel (X86add_flag GR16:$src1, (loadi16 addr:$src2)),
(implicit EFLAGS)),
(ADD16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86add_ovf GR32:$src1, (load addr:$src2)),
+def : Pat<(parallel (X86add_flag GR32:$src1, (loadi32 addr:$src2)),
(implicit EFLAGS)),
(ADD32rm GR32:$src1, addr:$src2)>;
-// Register-Integer Addition with Overflow
-def : Pat<(parallel (X86add_ovf GR8:$src1, imm:$src2),
+// Register-Integer Addition with EFLAGS result
+def : Pat<(parallel (X86add_flag GR8:$src1, imm:$src2),
(implicit EFLAGS)),
(ADD8ri GR8:$src1, imm:$src2)>;
-// Register-Integer Addition with Overflow
-def : Pat<(parallel (X86add_ovf GR16:$src1, imm:$src2),
+// Register-Integer Addition with EFLAGS result
+def : Pat<(parallel (X86add_flag GR16:$src1, imm:$src2),
(implicit EFLAGS)),
(ADD16ri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86add_ovf GR32:$src1, imm:$src2),
+def : Pat<(parallel (X86add_flag GR32:$src1, imm:$src2),
(implicit EFLAGS)),
(ADD32ri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86add_ovf GR16:$src1, i16immSExt8:$src2),
+def : Pat<(parallel (X86add_flag GR16:$src1, i16immSExt8:$src2),
(implicit EFLAGS)),
(ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86add_ovf GR32:$src1, i32immSExt8:$src2),
+def : Pat<(parallel (X86add_flag GR32:$src1, i32immSExt8:$src2),
(implicit EFLAGS)),
(ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Register Addition with Overflow
-def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR8:$src2),
+// Memory-Register Addition with EFLAGS result
+def : Pat<(parallel (store (X86add_flag (loadi8 addr:$dst), GR8:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD8mr addr:$dst, GR8:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR16:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi16 addr:$dst), GR16:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD16mr addr:$dst, GR16:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (load addr:$dst), GR32:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi32 addr:$dst), GR32:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD32mr addr:$dst, GR32:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (loadi8 addr:$dst), imm:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi8 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD8mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (loadi16 addr:$dst), imm:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi16 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD16mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (loadi32 addr:$dst), imm:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi32 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD32mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i16immSExt8:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi16 addr:$dst), i16immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD16mi8 addr:$dst, i16immSExt8:$src2)>;
-def : Pat<(parallel (store (X86add_ovf (load addr:$dst), i32immSExt8:$src2),
+def : Pat<(parallel (store (X86add_flag (loadi32 addr:$dst), i32immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(ADD32mi8 addr:$dst, i32immSExt8:$src2)>;
-// Register-Register Subtraction with Overflow
-def : Pat<(parallel (X86sub_ovf GR8:$src1, GR8:$src2),
+// Register-Register Subtraction with EFLAGS result
+def : Pat<(parallel (X86sub_flag GR8:$src1, GR8:$src2),
(implicit EFLAGS)),
(SUB8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR16:$src1, GR16:$src2),
+def : Pat<(parallel (X86sub_flag GR16:$src1, GR16:$src2),
(implicit EFLAGS)),
(SUB16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR32:$src1, GR32:$src2),
+def : Pat<(parallel (X86sub_flag GR32:$src1, GR32:$src2),
(implicit EFLAGS)),
(SUB32rr GR32:$src1, GR32:$src2)>;
-// Register-Memory Subtraction with Overflow
-def : Pat<(parallel (X86sub_ovf GR8:$src1, (load addr:$src2)),
+// Register-Memory Subtraction with EFLAGS result
+def : Pat<(parallel (X86sub_flag GR8:$src1, (loadi8 addr:$src2)),
(implicit EFLAGS)),
(SUB8rm GR8:$src1, addr:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR16:$src1, (load addr:$src2)),
+def : Pat<(parallel (X86sub_flag GR16:$src1, (loadi16 addr:$src2)),
(implicit EFLAGS)),
(SUB16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR32:$src1, (load addr:$src2)),
+def : Pat<(parallel (X86sub_flag GR32:$src1, (loadi32 addr:$src2)),
(implicit EFLAGS)),
(SUB32rm GR32:$src1, addr:$src2)>;
-// Register-Integer Subtraction with Overflow
-def : Pat<(parallel (X86sub_ovf GR8:$src1, imm:$src2),
+// Register-Integer Subtraction with EFLAGS result
+def : Pat<(parallel (X86sub_flag GR8:$src1, imm:$src2),
(implicit EFLAGS)),
(SUB8ri GR8:$src1, imm:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR16:$src1, imm:$src2),
+def : Pat<(parallel (X86sub_flag GR16:$src1, imm:$src2),
(implicit EFLAGS)),
(SUB16ri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR32:$src1, imm:$src2),
+def : Pat<(parallel (X86sub_flag GR32:$src1, imm:$src2),
(implicit EFLAGS)),
(SUB32ri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR16:$src1, i16immSExt8:$src2),
+def : Pat<(parallel (X86sub_flag GR16:$src1, i16immSExt8:$src2),
(implicit EFLAGS)),
(SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86sub_ovf GR32:$src1, i32immSExt8:$src2),
+def : Pat<(parallel (X86sub_flag GR32:$src1, i32immSExt8:$src2),
(implicit EFLAGS)),
(SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Register Subtraction with Overflow
-def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR8:$src2),
+// Memory-Register Subtraction with EFLAGS result
+def : Pat<(parallel (store (X86sub_flag (loadi8 addr:$dst), GR8:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB8mr addr:$dst, GR8:$src2)>;
-def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR16:$src2),
+def : Pat<(parallel (store (X86sub_flag (loadi16 addr:$dst), GR16:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB16mr addr:$dst, GR16:$src2)>;
-def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), GR32:$src2),
+def : Pat<(parallel (store (X86sub_flag (loadi32 addr:$dst), GR32:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB32mr addr:$dst, GR32:$src2)>;
-// Memory-Integer Subtraction with Overflow
-def : Pat<(parallel (store (X86sub_ovf (loadi8 addr:$dst), imm:$src2),
+// Memory-Integer Subtraction with EFLAGS result
+def : Pat<(parallel (store (X86sub_flag (loadi8 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB8mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86sub_ovf (loadi16 addr:$dst), imm:$src2),
+def : Pat<(parallel (store (X86sub_flag (loadi16 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB16mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86sub_ovf (loadi32 addr:$dst), imm:$src2),
+def : Pat<(parallel (store (X86sub_flag (loadi32 addr:$dst), imm:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB32mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i16immSExt8:$src2),
+def : Pat<(parallel (store (X86sub_flag (loadi16 addr:$dst), i16immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB16mi8 addr:$dst, i16immSExt8:$src2)>;
-def : Pat<(parallel (store (X86sub_ovf (load addr:$dst), i32immSExt8:$src2),
+def : Pat<(parallel (store (X86sub_flag (loadi32 addr:$dst), i32immSExt8:$src2),
addr:$dst),
(implicit EFLAGS)),
(SUB32mi8 addr:$dst, i32immSExt8:$src2)>;
-// Register-Register Signed Integer Multiply with Overflow
-def : Pat<(parallel (X86smul_ovf GR16:$src1, GR16:$src2),
+// Register-Register Signed Integer Multiply with EFLAGS result
+def : Pat<(parallel (X86smul_flag GR16:$src1, GR16:$src2),
(implicit EFLAGS)),
(IMUL16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86smul_ovf GR32:$src1, GR32:$src2),
+def : Pat<(parallel (X86smul_flag GR32:$src1, GR32:$src2),
(implicit EFLAGS)),
(IMUL32rr GR32:$src1, GR32:$src2)>;
-// Register-Memory Signed Integer Multiply with Overflow
-def : Pat<(parallel (X86smul_ovf GR16:$src1, (load addr:$src2)),
+// Register-Memory Signed Integer Multiply with EFLAGS result
+def : Pat<(parallel (X86smul_flag GR16:$src1, (loadi16 addr:$src2)),
(implicit EFLAGS)),
(IMUL16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86smul_ovf GR32:$src1, (load addr:$src2)),
+def : Pat<(parallel (X86smul_flag GR32:$src1, (loadi32 addr:$src2)),
(implicit EFLAGS)),
(IMUL32rm GR32:$src1, addr:$src2)>;
-// Register-Integer Signed Integer Multiply with Overflow
-def : Pat<(parallel (X86smul_ovf GR16:$src1, imm:$src2),
+// Register-Integer Signed Integer Multiply with EFLAGS result
+def : Pat<(parallel (X86smul_flag GR16:$src1, imm:$src2),
(implicit EFLAGS)),
(IMUL16rri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86smul_ovf GR32:$src1, imm:$src2),
+def : Pat<(parallel (X86smul_flag GR32:$src1, imm:$src2),
(implicit EFLAGS)),
(IMUL32rri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86smul_ovf GR16:$src1, i16immSExt8:$src2),
+def : Pat<(parallel (X86smul_flag GR16:$src1, i16immSExt8:$src2),
(implicit EFLAGS)),
(IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86smul_ovf GR32:$src1, i32immSExt8:$src2),
+def : Pat<(parallel (X86smul_flag GR32:$src1, i32immSExt8:$src2),
(implicit EFLAGS)),
(IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Integer Signed Integer Multiply with Overflow
-def : Pat<(parallel (X86smul_ovf (load addr:$src1), imm:$src2),
+// Memory-Integer Signed Integer Multiply with EFLAGS result
+def : Pat<(parallel (X86smul_flag (loadi16 addr:$src1), imm:$src2),
(implicit EFLAGS)),
(IMUL16rmi addr:$src1, imm:$src2)>;
-def : Pat<(parallel (X86smul_ovf (load addr:$src1), imm:$src2),
+def : Pat<(parallel (X86smul_flag (loadi32 addr:$src1), imm:$src2),
(implicit EFLAGS)),
(IMUL32rmi addr:$src1, imm:$src2)>;
-def : Pat<(parallel (X86smul_ovf (load addr:$src1), i16immSExt8:$src2),
+def : Pat<(parallel (X86smul_flag (loadi16 addr:$src1), i16immSExt8:$src2),
(implicit EFLAGS)),
(IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86smul_ovf (load addr:$src1), i32immSExt8:$src2),
+def : Pat<(parallel (X86smul_flag (loadi32 addr:$src1), i32immSExt8:$src2),
(implicit EFLAGS)),
(IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
-// Optimize multiple with overflow by 2.
+// Optimize multiply by 2 with EFLAGS result.
let AddedComplexity = 2 in {
-def : Pat<(parallel (X86smul_ovf GR16:$src1, 2),
+def : Pat<(parallel (X86smul_flag GR16:$src1, 2),
(implicit EFLAGS)),
(ADD16rr GR16:$src1, GR16:$src1)>;
-def : Pat<(parallel (X86smul_ovf GR32:$src1, 2),
+def : Pat<(parallel (X86smul_flag GR32:$src1, 2),
(implicit EFLAGS)),
(ADD32rr GR32:$src1, GR32:$src1)>;
}
+// INC and DEC with EFLAGS result. Note that these do not set CF.
+def : Pat<(parallel (X86inc_flag GR8:$src), (implicit EFLAGS)),
+ (INC8r GR8:$src)>;
+def : Pat<(parallel (store (i8 (X86inc_flag (loadi8 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (INC8m addr:$dst)>;
+def : Pat<(parallel (X86dec_flag GR8:$src), (implicit EFLAGS)),
+ (DEC8r GR8:$src)>;
+def : Pat<(parallel (store (i8 (X86dec_flag (loadi8 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (DEC8m addr:$dst)>;
+
+def : Pat<(parallel (X86inc_flag GR16:$src), (implicit EFLAGS)),
+ (INC16r GR16:$src)>, Requires<[In32BitMode]>;
+def : Pat<(parallel (store (i16 (X86inc_flag (loadi16 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (INC16m addr:$dst)>, Requires<[In32BitMode]>;
+def : Pat<(parallel (X86dec_flag GR16:$src), (implicit EFLAGS)),
+ (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
+def : Pat<(parallel (store (i16 (X86dec_flag (loadi16 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (DEC16m addr:$dst)>, Requires<[In32BitMode]>;
+
+def : Pat<(parallel (X86inc_flag GR32:$src), (implicit EFLAGS)),
+ (INC32r GR32:$src)>, Requires<[In32BitMode]>;
+def : Pat<(parallel (store (i32 (X86inc_flag (loadi32 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (INC32m addr:$dst)>, Requires<[In32BitMode]>;
+def : Pat<(parallel (X86dec_flag GR32:$src), (implicit EFLAGS)),
+ (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
+def : Pat<(parallel (store (i32 (X86dec_flag (loadi32 addr:$dst))), addr:$dst),
+ (implicit EFLAGS)),
+ (DEC32m addr:$dst)>, Requires<[In32BitMode]>;
+
//===----------------------------------------------------------------------===//
// Floating Point Stack Support
//===----------------------------------------------------------------------===//
Modified: llvm/branches/Apple/Dib/test/CodeGen/ARM/uxtb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/ARM/uxtb.ll?rev=66456&r1=66455&r2=66456&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/ARM/uxtb.ll (original)
+++ llvm/branches/Apple/Dib/test/CodeGen/ARM/uxtb.ll Mon Mar 9 15:52:42 2009
@@ -1,4 +1,4 @@
-; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | \
+; RUN: llvm-as < %s | llc -mtriple=armv6-apple-darwin | \
; RUN: grep uxt | count 10
define i32 @test1(i32 %x) {
Added: llvm/branches/Apple/Dib/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/2009-03-05-burr-list-crash.ll?rev=66456&view=auto
==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/2009-03-05-burr-list-crash.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/2009-03-05-burr-list-crash.ll Mon Mar 9 15:52:42 2009
@@ -0,0 +1,35 @@
+; RUN: llvm-as < %s | llc
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+target triple = "x86_64-unknown-linux-gnu"
+external global i32 ; <i32*>:0 [#uses=1]
+
+declare i64 @strlen(i8* nocapture) nounwind readonly
+
+define fastcc i8* @1(i8*) nounwind {
+ br i1 false, label %3, label %2
+
+; <label>:2 ; preds = %1
+ ret i8* %0
+
+; <label>:3 ; preds = %1
+ %4 = call i64 @strlen(i8* %0) nounwind readonly ; <i64> [#uses=1]
+ %5 = trunc i64 %4 to i32 ; <i32> [#uses=2]
+ %6 = load i32* @0, align 4 ; <i32> [#uses=1]
+ %7 = sub i32 %5, %6 ; <i32> [#uses=2]
+ %8 = sext i32 %5 to i64 ; <i64> [#uses=1]
+ %9 = sext i32 %7 to i64 ; <i64> [#uses=1]
+ %10 = sub i64 %8, %9 ; <i64> [#uses=1]
+ %11 = getelementptr i8* %0, i64 %10 ; <i8*> [#uses=1]
+ %12 = icmp sgt i32 %7, 0 ; <i1> [#uses=1]
+ br i1 %12, label %13, label %14
+
+; <label>:13 ; preds = %13, %3
+ br label %13
+
+; <label>:14 ; preds = %3
+ %15 = call noalias i8* @make_temp_file(i8* %11) nounwind ; <i8*> [#uses=0]
+ unreachable
+}
+
+declare noalias i8* @make_temp_file(i8*)
Added: llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-0.ll?rev=66456&view=auto
==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-0.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-0.ll Mon Mar 9 15:52:42 2009
@@ -0,0 +1,22 @@
+; RUN: llvm-as < %s | llc -march=x86-64 > %t
+; RUN: not grep cmp %t
+; RUN: not grep test %t
+
+define void @loop(i64 %n, double* nocapture %d) nounwind {
+entry:
+ br label %bb
+
+bb:
+ %indvar = phi i64 [ %n, %entry ], [ %indvar.next, %bb ]
+ %i.03 = add i64 %indvar, %n
+ %0 = getelementptr double* %d, i64 %i.03
+ %1 = load double* %0, align 8
+ %2 = mul double %1, 3.000000e+00
+ store double %2, double* %0, align 8
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp eq i64 %indvar.next, 0
+ br i1 %exitcond, label %return, label %bb
+
+return:
+ ret void
+}
Added: llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-1.ll?rev=66456&view=auto
==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-1.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test-1.ll Mon Mar 9 15:52:42 2009
@@ -0,0 +1,23 @@
+; RUN: llvm-as < %s | llc -march=x86 > %t
+; RUN: grep dec %t | count 1
+; RUN: not grep test %t
+; RUN: not grep cmp %t
+
+define void @foo(i32 %n, double* nocapture %p) nounwind {
+ br label %bb
+
+bb:
+ %indvar = phi i32 [ 0, %0 ], [ %indvar.next, %bb ]
+ %i.03 = sub i32 %n, %indvar
+ %1 = getelementptr double* %p, i32 %i.03
+ %2 = load double* %1, align 4
+ %3 = mul double %2, 2.930000e+00
+ store double %3, double* %1, align 4
+ %4 = add i32 %i.03, -1
+ %phitmp = icmp slt i32 %4, 0
+ %indvar.next = add i32 %indvar, 1
+ br i1 %phitmp, label %bb, label %return
+
+return:
+ ret void
+}
Added: llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test.ll?rev=66456&view=auto
==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/peep-test.ll Mon Mar 9 15:52:42 2009
@@ -0,0 +1,22 @@
+; RUN: llvm-as < %s | llc -march=x86-64 > %t
+; RUN: not grep cmp %t
+; RUN: not grep test %t
+
+define void @loop(i64 %n, double* nocapture %d) nounwind {
+entry:
+ br label %bb
+
+bb:
+ %indvar = phi i64 [ %n, %entry ], [ %indvar.next, %bb ]
+ %i.03 = add i64 %indvar, %n
+ %0 = getelementptr double* %d, i64 %i.03
+ %1 = load double* %0, align 8
+ %2 = mul double %1, 3.000000e+00
+ store double %2, double* %0, align 8
+ %indvar.next = add i64 %indvar, 1
+ %exitcond = icmp eq i64 %indvar.next, 0
+ br i1 %exitcond, label %return, label %bb
+
+return:
+ ret void
+}
More information about the llvm-commits
mailing list