[llvm-commits] CVS: llvm/lib/Target/X86/X86ISelLowering.cpp X86ISelLowering.h X86InstrInfo.td X86RegisterInfo.cpp
Evan Cheng
evan.cheng at apple.com
Thu Jan 5 16:43:15 PST 2006
Changes in directory llvm/lib/Target/X86:
X86ISelLowering.cpp updated: 1.21 -> 1.22
X86ISelLowering.h updated: 1.10 -> 1.11
X86InstrInfo.td updated: 1.188 -> 1.189
X86RegisterInfo.cpp updated: 1.115 -> 1.116
---
Log message:
* Fast call support.
* FP cmp, setcc, etc.
---
Diffs of the changes: (+572 -177)
X86ISelLowering.cpp | 341 +++++++++++++++++++++++++++++++++++++++++------
X86ISelLowering.h | 28 +++
X86InstrInfo.td | 377 +++++++++++++++++++++++++++++++++-------------------
X86RegisterInfo.cpp | 3
4 files changed, 572 insertions(+), 177 deletions(-)
Index: llvm/lib/Target/X86/X86ISelLowering.cpp
diff -u llvm/lib/Target/X86/X86ISelLowering.cpp:1.21 llvm/lib/Target/X86/X86ISelLowering.cpp:1.22
--- llvm/lib/Target/X86/X86ISelLowering.cpp:1.21 Wed Jan 4 19:47:43 2006
+++ llvm/lib/Target/X86/X86ISelLowering.cpp Thu Jan 5 18:43:03 2006
@@ -118,13 +118,20 @@
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
setOperationAction(ISD::SELECT , MVT::i8 , Promote);
- // X86 wants to expand cmov itself.
if (X86DAGIsel) {
+ // X86 wants to expand cmov itself.
setOperationAction(ISD::SELECT , MVT::i16 , Custom);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
+ setOperationAction(ISD::SELECT , MVT::f32 , Custom);
+ setOperationAction(ISD::SELECT , MVT::f64 , Custom);
setOperationAction(ISD::SETCC , MVT::i8 , Custom);
setOperationAction(ISD::SETCC , MVT::i16 , Custom);
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
+ setOperationAction(ISD::SETCC , MVT::f32 , Custom);
+ setOperationAction(ISD::SETCC , MVT::f64 , Custom);
+ // X86 ret instruction may pop stack.
+ setOperationAction(ISD::RET , MVT::Other, Custom);
+ // Darwin ABI issue.
setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
}
@@ -201,6 +208,12 @@
SelectionDAG &DAG) {
assert((!isVarArg || CallingConv == CallingConv::C) &&
"Only C takes varargs!");
+
+ // If the callee is a GlobalAddress node (quite common, every direct call is)
+ // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
+
if (CallingConv == CallingConv::Fast && EnableFastCC)
return LowerFastCCCallTo(Chain, RetTy, isTailCall, Callee, Args, DAG);
return LowerCCCCallTo(Chain, RetTy, isVarArg, isTailCall, Callee, Args, DAG);
@@ -223,8 +236,8 @@
DAG.getConstant(1, MVT::i32));
SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op,
DAG.getConstant(0, MVT::i32));
- Copy = DAG.getCopyToReg(Chain, X86::EAX, Hi, SDOperand());
- Copy = DAG.getCopyToReg(Copy, X86::EDX, Lo, Copy.getValue(1));
+ Copy = DAG.getCopyToReg(Chain, X86::EDX, Hi, SDOperand());
+ Copy = DAG.getCopyToReg(Copy, X86::EAX, Lo, Copy.getValue(1));
break;
}
case MVT::f32:
@@ -468,8 +481,8 @@
Ops.push_back(Chain);
Ops.push_back(Callee);
- Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
- NodeTys, Ops);
+ // FIXME: Do not generate X86ISD::TAILCALL for now.
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
SDOperand InFlag = Chain.getValue(1);
SDOperand RetVal;
@@ -951,43 +964,145 @@
break;
}
- std::vector<SDOperand> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
- Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
- // Callee pops all arg values on the stack.
- Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
-
- // Pass register arguments as needed.
- Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
-
- SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
- RetVals, Ops);
- Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
+ if (X86DAGIsel) {
+ // Build a sequence of copy-to-reg nodes chained together with token chain
+ // and flag operands which copy the outgoing args into registers.
+ SDOperand InFlag;
+ for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
+ unsigned CCReg;
+ SDOperand RegToPass = RegValuesToPass[i];
+ switch (RegToPass.getValueType()) {
+ default: assert(0 && "Bad thing to pass in regs");
+ case MVT::i8:
+ CCReg = (i == 0) ? X86::AL : X86::DL;
+ break;
+ case MVT::i16:
+ CCReg = (i == 0) ? X86::AX : X86::DX;
+ break;
+ case MVT::i32:
+ CCReg = (i == 0) ? X86::EAX : X86::EDX;
+ break;
+ }
- SDOperand ResultVal;
- switch (RetTyVT) {
- case MVT::isVoid: break;
- default:
- ResultVal = TheCall.getValue(1);
- break;
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
- break;
- case MVT::f32:
- // FIXME: we would really like to remember that this FP_ROUND operation is
- // okay to eliminate if we allow excess FP precision.
- ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
- break;
- case MVT::i64:
- ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
- TheCall.getValue(2));
- break;
- }
+ Chain = DAG.getCopyToReg(Chain, CCReg, RegToPass, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ std::vector<MVT::ValueType> NodeTys;
+ NodeTys.push_back(MVT::Other); // Returns a chain
+ NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
+
+ std::vector<SDOperand> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ if (InFlag.Val)
+ Ops.push_back(InFlag);
+
+ // FIXME: Do not generate X86ISD::TAILCALL for now.
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops);
+ InFlag = Chain.getValue(1);
+
+ SDOperand RetVal;
+ if (RetTyVT != MVT::isVoid) {
+ switch (RetTyVT) {
+ default: assert(0 && "Unknown value type to return!");
+ case MVT::i1:
+ case MVT::i8:
+ RetVal = DAG.getCopyFromReg(Chain, X86::AL, MVT::i8, InFlag);
+ Chain = RetVal.getValue(1);
+ break;
+ case MVT::i16:
+ RetVal = DAG.getCopyFromReg(Chain, X86::AX, MVT::i16, InFlag);
+ Chain = RetVal.getValue(1);
+ break;
+ case MVT::i32:
+ RetVal = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
+ Chain = RetVal.getValue(1);
+ break;
+ case MVT::i64: {
+ SDOperand Lo = DAG.getCopyFromReg(Chain, X86::EAX, MVT::i32, InFlag);
+ SDOperand Hi = DAG.getCopyFromReg(Lo.getValue(1), X86::EDX, MVT::i32,
+ Lo.getValue(2));
+ RetVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi);
+ Chain = Hi.getValue(1);
+ break;
+ }
+ case MVT::f32:
+ case MVT::f64: {
+ std::vector<MVT::ValueType> Tys;
+ Tys.push_back(MVT::f64);
+ Tys.push_back(MVT::Other);
+ std::vector<SDOperand> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(InFlag);
+ RetVal = DAG.getNode(X86ISD::FP_GET_RESULT, Tys, Ops);
+ Chain = RetVal.getValue(1);
+ if (X86ScalarSSE) {
+ unsigned Size = MVT::getSizeInBits(MVT::f64)/8;
+ MachineFunction &MF = DAG.getMachineFunction();
+ int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
+ SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
+ Tys.clear();
+ Tys.push_back(MVT::Other);
+ Ops.clear();
+ Ops.push_back(Chain);
+ Ops.push_back(RetVal);
+ Ops.push_back(StackSlot);
+ Ops.push_back(DAG.getValueType(RetTyVT));
+ Chain = DAG.getNode(X86ISD::FST, Tys, Ops);
+ RetVal = DAG.getLoad(RetTyVT, Chain, StackSlot,
+ DAG.getSrcValue(NULL));
+ Chain = RetVal.getValue(1);
+ } else if (RetTyVT == MVT::f32)
+ RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal);
+ break;
+ }
+ }
+ }
+
+ Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
+ DAG.getConstant(ArgOffset, getPointerTy()),
+ DAG.getConstant(ArgOffset, getPointerTy()));
+ return std::make_pair(RetVal, Chain);
+ } else {
+ std::vector<SDOperand> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
+ // Callee pops all arg values on the stack.
+ Ops.push_back(DAG.getConstant(ArgOffset, getPointerTy()));
+
+ // Pass register arguments as needed.
+ Ops.insert(Ops.end(), RegValuesToPass.begin(), RegValuesToPass.end());
- return std::make_pair(ResultVal, Chain);
+ SDOperand TheCall = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
+ RetVals, Ops);
+ Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, TheCall);
+
+ SDOperand ResultVal;
+ switch (RetTyVT) {
+ case MVT::isVoid: break;
+ default:
+ ResultVal = TheCall.getValue(1);
+ break;
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ ResultVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, TheCall.getValue(1));
+ break;
+ case MVT::f32:
+ // FIXME: we would really like to remember that this FP_ROUND operation is
+ // okay to eliminate if we allow excess FP precision.
+ ResultVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, TheCall.getValue(1));
+ break;
+ case MVT::i64:
+ ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, TheCall.getValue(1),
+ TheCall.getValue(2));
+ break;
+ }
+
+ return std::make_pair(ResultVal, Chain);
+ }
}
SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
@@ -1025,6 +1140,54 @@
// X86 Custom Lowering Hooks
//===----------------------------------------------------------------------===//
+/// SetCCToX86CondCode - do a one to one translation of a ISD::CondCode to
+/// X86 specific CondCode. It returns a X86ISD::COND_INVALID if it cannot
+/// do a direct translation.
+static unsigned CCToX86CondCode(SDOperand CC, bool isFP) {
+ ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
+ unsigned X86CC = X86ISD::COND_INVALID;
+ if (!isFP) {
+ switch (SetCCOpcode) {
+ default: break;
+ case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
+ case ISD::SETGT: X86CC = X86ISD::COND_G; break;
+ case ISD::SETGE: X86CC = X86ISD::COND_GE; break;
+ case ISD::SETLT: X86CC = X86ISD::COND_L; break;
+ case ISD::SETLE: X86CC = X86ISD::COND_LE; break;
+ case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
+ case ISD::SETULT: X86CC = X86ISD::COND_B; break;
+ case ISD::SETUGT: X86CC = X86ISD::COND_A; break;
+ case ISD::SETULE: X86CC = X86ISD::COND_BE; break;
+ case ISD::SETUGE: X86CC = X86ISD::COND_AE; break;
+ }
+ } else {
+ // On a floating point condition, the flags are set as follows:
+ // ZF PF CF op
+ // 0 | 0 | 0 | X > Y
+ // 0 | 0 | 1 | X < Y
+ // 1 | 0 | 0 | X == Y
+ // 1 | 1 | 1 | unordered
+ switch (SetCCOpcode) {
+ default: break;
+ case ISD::SETUEQ:
+ case ISD::SETEQ: X86CC = X86ISD::COND_E; break;
+ case ISD::SETOGT:
+ case ISD::SETGT: X86CC = X86ISD::COND_A; break;
+ case ISD::SETOGE:
+ case ISD::SETGE: X86CC = X86ISD::COND_AE; break;
+ case ISD::SETULT:
+ case ISD::SETLT: X86CC = X86ISD::COND_B; break;
+ case ISD::SETULE:
+ case ISD::SETLE: X86CC = X86ISD::COND_BE; break;
+ case ISD::SETONE:
+ case ISD::SETNE: X86CC = X86ISD::COND_NE; break;
+ case ISD::SETUO: X86CC = X86ISD::COND_P; break;
+ case ISD::SETO: X86CC = X86ISD::COND_NP; break;
+ }
+ }
+ return X86CC;
+}
+
/// LowerOperation - Provide custom lowering hooks for some operations.
///
SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
@@ -1100,7 +1263,87 @@
SDOperand CC = Op.getOperand(2);
SDOperand Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
Op.getOperand(0), Op.getOperand(1));
- return DAG.getNode(X86ISD::SETCC, MVT::i8, CC, Cond);
+ ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
+ bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
+ unsigned X86CC = CCToX86CondCode(CC, isFP);
+ if (X86CC != X86ISD::COND_INVALID) {
+ return DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), Cond);
+ } else {
+ assert(isFP && "Illegal integer SetCC!");
+
+ std::vector<MVT::ValueType> Tys;
+ std::vector<SDOperand> Ops;
+ switch (SetCCOpcode) {
+ default: assert(false && "Illegal floating point SetCC!");
+ case ISD::SETOEQ: { // !PF & ZF
+ Tys.push_back(MVT::i8);
+ Tys.push_back(MVT::Flag);
+ Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
+ Ops.push_back(Cond);
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86ISD::COND_E, MVT::i8),
+ Tmp1.getValue(1));
+ return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
+ }
+ case ISD::SETOLT: { // !PF & CF
+ Tys.push_back(MVT::i8);
+ Tys.push_back(MVT::Flag);
+ Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
+ Ops.push_back(Cond);
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86ISD::COND_B, MVT::i8),
+ Tmp1.getValue(1));
+ return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
+ }
+ case ISD::SETOLE: { // !PF & (CF || ZF)
+ Tys.push_back(MVT::i8);
+ Tys.push_back(MVT::Flag);
+ Ops.push_back(DAG.getConstant(X86ISD::COND_NP, MVT::i8));
+ Ops.push_back(Cond);
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86ISD::COND_BE, MVT::i8),
+ Tmp1.getValue(1));
+ return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
+ }
+ case ISD::SETUGT: { // PF | (!ZF & !CF)
+ Tys.push_back(MVT::i8);
+ Tys.push_back(MVT::Flag);
+ Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
+ Ops.push_back(Cond);
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86ISD::COND_A, MVT::i8),
+ Tmp1.getValue(1));
+ return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
+ }
+ case ISD::SETUGE: { // PF | !CF
+ Tys.push_back(MVT::i8);
+ Tys.push_back(MVT::Flag);
+ Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
+ Ops.push_back(Cond);
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86ISD::COND_AE, MVT::i8),
+ Tmp1.getValue(1));
+ return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
+ }
+ case ISD::SETUNE: { // PF | !ZF
+ Tys.push_back(MVT::i8);
+ Tys.push_back(MVT::Flag);
+ Ops.push_back(DAG.getConstant(X86ISD::COND_P, MVT::i8));
+ Ops.push_back(Cond);
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, Tys, Ops);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8,
+ DAG.getConstant(X86ISD::COND_NE, MVT::i8),
+ Tmp1.getValue(1));
+ return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
+ }
+ }
+ }
}
case ISD::SELECT: {
SDOperand Cond = Op.getOperand(0);
@@ -1110,10 +1353,13 @@
Cond = Cond.getOperand(1);
} else if (Cond.getOpcode() == ISD::SETCC) {
CC = Cond.getOperand(2);
+ bool isFP = MVT::isFloatingPoint(Cond.getOperand(1).getValueType());
+ unsigned X86CC = CCToX86CondCode(CC, isFP);
+ CC = DAG.getConstant(X86CC, MVT::i8);
Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
Cond.getOperand(0), Cond.getOperand(1));
} else {
- CC = DAG.getCondCode(ISD::SETEQ);
+ CC = DAG.getConstant(X86ISD::COND_E, MVT::i8);
Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
}
return DAG.getNode(X86ISD::CMOV, Op.getValueType(),
@@ -1129,15 +1375,23 @@
Cond = Cond.getOperand(1);
} else if (Cond.getOpcode() == ISD::SETCC) {
CC = Cond.getOperand(2);
+ bool isFP = MVT::isFloatingPoint(Cond.getOperand(1).getValueType());
+ unsigned X86CC = CCToX86CondCode(CC, isFP);
+ CC = DAG.getConstant(X86CC, MVT::i8);
Cond = DAG.getNode(X86ISD::CMP, MVT::Flag,
Cond.getOperand(0), Cond.getOperand(1));
} else {
- CC = DAG.getCondCode(ISD::SETNE);
+ CC = DAG.getConstant(X86ISD::COND_NE, MVT::i8);
Cond = DAG.getNode(X86ISD::TEST, MVT::Flag, Cond, Cond);
}
return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
Op.getOperand(0), Op.getOperand(2), CC, Cond);
}
+ case ISD::RET: {
+ // Can only be return void.
+ return DAG.getNode(X86ISD::RET, MVT::Other, Op.getOperand(0),
+ DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
+ }
case ISD::GlobalAddress: {
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDOperand GVOp = DAG.getTargetGlobalAddress(GV, getPointerTy());
@@ -1176,6 +1430,7 @@
case X86ISD::SETCC: return "X86ISD::SETCC";
case X86ISD::CMOV: return "X86ISD::CMOV";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
+ case X86ISD::RET: return "X86ISD::RET";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
}
}
Index: llvm/lib/Target/X86/X86ISelLowering.h
diff -u llvm/lib/Target/X86/X86ISelLowering.h:1.10 llvm/lib/Target/X86/X86ISelLowering.h:1.11
--- llvm/lib/Target/X86/X86ISelLowering.h:1.10 Wed Jan 4 18:27:02 2006
+++ llvm/lib/Target/X86/X86ISelLowering.h Thu Jan 5 18:43:03 2006
@@ -19,8 +19,8 @@
#include "llvm/CodeGen/SelectionDAG.h"
namespace llvm {
- // X86 Specific DAG Nodes
namespace X86ISD {
+ // X86 Specific DAG Nodes
enum NodeType {
// Start the numbering where the builtin ops leave off.
FIRST_NUMBER = ISD::BUILTIN_OP_END+X86::INSTRUCTION_LIST_END,
@@ -108,10 +108,36 @@
/// or TEST instruction.
BRCOND,
+ /// Return without a flag operand. Operand 1 is the number of bytes of
+ /// stack to pop, and operand 2 is the chain.
+ RET,
+
/// Return with a flag operand. Operand 1 is the number of bytes of stack
/// to pop, operand 2 is the chain and operand 3 is a flag operand.
RET_FLAG,
};
+
+ // X86 specific condition code. These correspond to X86_*_COND in
+ // X86InstrInfo.td. They must be kept in synch.
+ enum CondCode {
+ COND_A = 0,
+ COND_AE = 1,
+ COND_B = 2,
+ COND_BE = 3,
+ COND_E = 4,
+ COND_G = 5,
+ COND_GE = 6,
+ COND_L = 7,
+ COND_LE = 8,
+ COND_NE = 9,
+ COND_NO = 10,
+ COND_NP = 11,
+ COND_NS = 12,
+ COND_O = 13,
+ COND_P = 14,
+ COND_S = 15,
+ COND_INVALID
+ };
}
//===----------------------------------------------------------------------===//
Index: llvm/lib/Target/X86/X86InstrInfo.td
diff -u llvm/lib/Target/X86/X86InstrInfo.td:1.188 llvm/lib/Target/X86/X86InstrInfo.td:1.189
--- llvm/lib/Target/X86/X86InstrInfo.td:1.188 Wed Jan 4 20:08:37 2006
+++ llvm/lib/Target/X86/X86InstrInfo.td Thu Jan 5 18:43:03 2006
@@ -17,22 +17,21 @@
// X86 specific DAG Nodes.
//
-def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, FlagVT>, SDTCisInt<1>,
- SDTCisSameAs<1, 2>]>;
+def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, FlagVT>, SDTCisSameAs<1, 2>]>;
def SDTX86Cmov : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
- SDTCisVT<3, OtherVT>, SDTCisVT<4, FlagVT>]>;
+ SDTCisVT<3, i8>, SDTCisVT<4, FlagVT>]>;
def SDTX86BrCond : SDTypeProfile<0, 3,
[SDTCisVT<0, OtherVT>,
- SDTCisVT<1, OtherVT>, SDTCisVT<2, FlagVT>]>;
+ SDTCisVT<1, i8>, SDTCisVT<2, FlagVT>]>;
def SDTX86SetCC : SDTypeProfile<1, 2,
- [SDTCisVT<0, i8>, SDTCisVT<1, OtherVT>,
+ [SDTCisVT<0, i8>, SDTCisVT<1, i8>,
SDTCisVT<2, FlagVT>]>;
-def SDTX86RetFlag : SDTypeProfile<0, 1, [SDTCisVT<0, i16>]>;
+def SDTX86Ret : SDTypeProfile<0, 1, [SDTCisVT<0, i16>]>;
def SDTX86Fld : SDTypeProfile<1, 2, [SDTCisVT<0, f64>,
SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>]>;
@@ -47,10 +46,11 @@
def X86test : SDNode<"X86ISD::TEST", SDTX86CmpTest, []>;
def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov, []>;
-def X86Brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond, [SDNPHasChain]>;
-def X86SetCC : SDNode<"X86ISD::SETCC", SDTX86SetCC, []>;
+def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond, [SDNPHasChain]>;
+def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC, []>;
-def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86RetFlag, [SDNPHasChain]>;
+def X86ret : SDNode<"X86ISD::RET", SDTX86Ret, [SDNPHasChain]>;
+def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret, [SDNPHasChain]>;
def X86fld : SDNode<"X86ISD::FLD", SDTX86Fld, [SDNPHasChain]>;
def X86fst : SDNode<"X86ISD::FST", SDTX86Fst, [SDNPHasChain]>;
@@ -226,6 +226,26 @@
//===----------------------------------------------------------------------===//
// Pattern fragments...
//
+
+// X86 specific condition code. These correspond to CondCode in
+// X86ISelLowering.h. They must be kept in synch.
+def X86_COND_A : PatLeaf<(i8 0)>;
+def X86_COND_AE : PatLeaf<(i8 1)>;
+def X86_COND_B : PatLeaf<(i8 2)>;
+def X86_COND_BE : PatLeaf<(i8 3)>;
+def X86_COND_E : PatLeaf<(i8 4)>;
+def X86_COND_G : PatLeaf<(i8 5)>;
+def X86_COND_GE : PatLeaf<(i8 6)>;
+def X86_COND_L : PatLeaf<(i8 7)>;
+def X86_COND_LE : PatLeaf<(i8 8)>;
+def X86_COND_NE : PatLeaf<(i8 9)>;
+def X86_COND_NO : PatLeaf<(i8 10)>;
+def X86_COND_NP : PatLeaf<(i8 11)>;
+def X86_COND_NS : PatLeaf<(i8 12)>;
+def X86_COND_O : PatLeaf<(i8 13)>;
+def X86_COND_P : PatLeaf<(i8 14)>;
+def X86_COND_S : PatLeaf<(i8 15)>;
+
def i16immSExt8 : PatLeaf<(i16 imm), [{
// i16immSExt8 predicate - True if the 16-bit immediate fits in a 8-bit
// sign extended field.
@@ -332,12 +352,13 @@
let isTerminator = 1, isReturn = 1, isBarrier = 1,
hasCtrlDep = 1, noResults = 1 in {
// FIXME: temporary workaround for return without an incoming flag.
- def RETVOID : I<0xC3, RawFrm, (ops), "ret", [(ret)]>;
+ def RETVOID : I<0xC3, RawFrm, (ops), "ret", [(X86ret 0)]>;
+ def RETIVOID : Ii16<0xC2, RawFrm, (ops i16imm:$amt), "ret $amt",
+ [(X86ret imm:$amt)]>;
let hasInFlag = 1 in {
- def RET : I<0xC3, RawFrm, (ops), "ret",
- [(X86retflag 0)]>;
- def RETI : Ii16<0xC2, RawFrm, (ops i16imm:$amt), "ret $amt",
- [(X86retflag imm:$amt)]>;
+ def RET : I<0xC3, RawFrm, (ops), "ret", [(X86retflag 0)]>;
+ def RETI : Ii16<0xC2, RawFrm, (ops i16imm:$amt), "ret $amt",
+ [(X86retflag imm:$amt)]>;
}
}
@@ -350,31 +371,35 @@
def JMP : IBr<0xE9, (ops brtarget:$dst), "jmp $dst", [(br bb:$dst)]>;
def JE : IBr<0x84, (ops brtarget:$dst), "je $dst",
- [(X86Brcond bb:$dst, SETEQ, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_E, STATUS)]>, Imp<[STATUS],[]>, TB;
def JNE : IBr<0x85, (ops brtarget:$dst), "jne $dst",
- [(X86Brcond bb:$dst, SETNE, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_NE, STATUS)]>, Imp<[STATUS],[]>, TB;
def JL : IBr<0x8C, (ops brtarget:$dst), "jl $dst",
- [(X86Brcond bb:$dst, SETLT, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_L, STATUS)]>, Imp<[STATUS],[]>, TB;
def JLE : IBr<0x8E, (ops brtarget:$dst), "jle $dst",
- [(X86Brcond bb:$dst, SETLE, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_LE, STATUS)]>, Imp<[STATUS],[]>, TB;
def JG : IBr<0x8F, (ops brtarget:$dst), "jg $dst",
- [(X86Brcond bb:$dst, SETGT, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_G, STATUS)]>, Imp<[STATUS],[]>, TB;
def JGE : IBr<0x8D, (ops brtarget:$dst), "jge $dst",
- [(X86Brcond bb:$dst, SETGE, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_GE, STATUS)]>, Imp<[STATUS],[]>, TB;
def JB : IBr<0x82, (ops brtarget:$dst), "jb $dst",
- [(X86Brcond bb:$dst, SETULT, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_B, STATUS)]>, Imp<[STATUS],[]>, TB;
def JBE : IBr<0x86, (ops brtarget:$dst), "jbe $dst",
- [(X86Brcond bb:$dst, SETULE, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_BE, STATUS)]>, Imp<[STATUS],[]>, TB;
def JA : IBr<0x87, (ops brtarget:$dst), "ja $dst",
- [(X86Brcond bb:$dst, SETUGT, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_A, STATUS)]>, Imp<[STATUS],[]>, TB;
def JAE : IBr<0x83, (ops brtarget:$dst), "jae $dst",
- [(X86Brcond bb:$dst, SETUGE, STATUS)]>, Imp<[STATUS],[]>, TB;
+ [(X86brcond bb:$dst, X86_COND_AE, STATUS)]>, Imp<[STATUS],[]>, TB;
-def JS : IBr<0x88, (ops brtarget:$dst), "js $dst", []>, TB;
-def JNS : IBr<0x89, (ops brtarget:$dst), "jns $dst", []>, TB;
-def JP : IBr<0x8A, (ops brtarget:$dst), "jp $dst", []>, TB;
-def JNP : IBr<0x8B, (ops brtarget:$dst), "jnp $dst", []>, TB;
+def JS : IBr<0x88, (ops brtarget:$dst), "js $dst",
+ [(X86brcond bb:$dst, X86_COND_S, STATUS)]>, Imp<[STATUS],[]>, TB;
+def JNS : IBr<0x89, (ops brtarget:$dst), "jns $dst",
+ [(X86brcond bb:$dst, X86_COND_NS, STATUS)]>, Imp<[STATUS],[]>, TB;
+def JP : IBr<0x8A, (ops brtarget:$dst), "jp $dst",
+ [(X86brcond bb:$dst, X86_COND_P, STATUS)]>, Imp<[STATUS],[]>, TB;
+def JNP : IBr<0x8B, (ops brtarget:$dst), "jnp $dst",
+ [(X86brcond bb:$dst, X86_COND_NP, STATUS)]>, Imp<[STATUS],[]>, TB;
//===----------------------------------------------------------------------===//
// Call Instructions...
@@ -388,9 +413,9 @@
def CALLpcrel32 : I<0xE8, RawFrm, (ops calltarget:$dst), "call $dst",
[]>;
def CALL32r : I<0xFF, MRM2r, (ops R32:$dst), "call {*}$dst",
- []>;
+ [(call R32:$dst)]>;
def CALL32m : I<0xFF, MRM2m, (ops i32mem:$dst), "call {*}$dst",
- []>;
+ [(call (loadi32 addr:$dst))]>;
}
def : Pat<(call tglobaladdr:$dst),
@@ -658,303 +683,351 @@
(ops R16:$dst, R16:$src1, R16:$src2),
"cmovb {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETULT, STATUS))]>,
+ X86_COND_B, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmovb {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETULT, STATUS))]>,
+ X86_COND_B, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmovb {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETULT, STATUS))]>,
+ X86_COND_B, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmovb {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETULT, STATUS))]>,
+ X86_COND_B, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmovae {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETUGE, STATUS))]>,
+ X86_COND_AE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmovae {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETUGE, STATUS))]>,
+ X86_COND_AE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmovae {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETUGE, STATUS))]>,
+ X86_COND_AE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmovae {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETUGE, STATUS))]>,
+ X86_COND_AE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmove {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETEQ, STATUS))]>,
+ X86_COND_E, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmove {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETEQ, STATUS))]>,
+ X86_COND_E, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmove {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETEQ, STATUS))]>,
+ X86_COND_E, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmove {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETEQ, STATUS))]>,
+ X86_COND_E, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmovne {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETNE, STATUS))]>,
+ X86_COND_NE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmovne {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETNE, STATUS))]>,
+ X86_COND_NE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmovne {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETNE, STATUS))]>,
+ X86_COND_NE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmovne {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETNE, STATUS))]>,
+ X86_COND_NE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmovbe {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETULE, STATUS))]>,
+ X86_COND_BE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmovbe {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETULE, STATUS))]>,
+ X86_COND_BE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmovbe {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETULE, STATUS))]>,
+ X86_COND_BE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmovbe {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETULE, STATUS))]>,
+ X86_COND_BE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmova {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETUGT, STATUS))]>,
+ X86_COND_A, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmova {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETUGT, STATUS))]>,
+ X86_COND_A, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmova {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETUGT, STATUS))]>,
+ X86_COND_A, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmova {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETUGT, STATUS))]>,
+ X86_COND_A, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmovl {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETLT, STATUS))]>,
+ X86_COND_L, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmovl {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETLT, STATUS))]>,
+ X86_COND_L, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmovl {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETLT, STATUS))]>,
+ X86_COND_L, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmovl {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETLT, STATUS))]>,
+ X86_COND_L, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmovge {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETGE, STATUS))]>,
+ X86_COND_GE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmovge {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETGE, STATUS))]>,
+ X86_COND_GE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmovge {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETGE, STATUS))]>,
+ X86_COND_GE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmovge {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETGE, STATUS))]>,
+ X86_COND_GE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmovle {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETLE, STATUS))]>,
+ X86_COND_LE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmovle {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETLE, STATUS))]>,
+ X86_COND_LE, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmovle {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETLE, STATUS))]>,
+ X86_COND_LE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmovle {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETLE, STATUS))]>,
+ X86_COND_LE, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
"cmovg {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
- SETGT, STATUS))]>,
+ X86_COND_G, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
"cmovg {$src2, $dst|$dst, $src2}",
[(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
- SETGT, STATUS))]>,
+ X86_COND_G, STATUS))]>,
Imp<[STATUS],[]>, TB, OpSize;
def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
"cmovg {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
- SETGT, STATUS))]>,
+ X86_COND_G, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
"cmovg {$src2, $dst|$dst, $src2}",
[(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
- SETGT, STATUS))]>,
+ X86_COND_G, STATUS))]>,
Imp<[STATUS],[]>, TB;
def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
- "cmovs {$src2, $dst|$dst, $src2}", []>, TB, OpSize;
+ "cmovs {$src2, $dst|$dst, $src2}",
+ [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ X86_COND_S, STATUS))]>,
+ Imp<[STATUS],[]>, TB, OpSize;
def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
- "cmovs {$src2, $dst|$dst, $src2}", []>, TB, OpSize;
+ "cmovs {$src2, $dst|$dst, $src2}",
+ [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ X86_COND_S, STATUS))]>,
+ Imp<[STATUS],[]>, TB, OpSize;
def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
- "cmovs {$src2, $dst|$dst, $src2}", []>, TB;
+ "cmovs {$src2, $dst|$dst, $src2}",
+ [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ X86_COND_S, STATUS))]>,
+ Imp<[STATUS],[]>, TB;
def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
- "cmovs {$src2, $dst|$dst, $src2}", []>, TB;
+ "cmovs {$src2, $dst|$dst, $src2}",
+ [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ X86_COND_S, STATUS))]>,
+ Imp<[STATUS],[]>, TB;
def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
- "cmovns {$src2, $dst|$dst, $src2}", []>, TB, OpSize;
+ "cmovns {$src2, $dst|$dst, $src2}",
+ [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ X86_COND_NS, STATUS))]>,
+ Imp<[STATUS],[]>, TB, OpSize;
def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
- "cmovns {$src2, $dst|$dst, $src2}", []>, TB, OpSize;
+ "cmovns {$src2, $dst|$dst, $src2}",
+ [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ X86_COND_NS, STATUS))]>,
+ Imp<[STATUS],[]>, TB, OpSize;
def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
- "cmovns {$src2, $dst|$dst, $src2}", []>, TB;
+ "cmovns {$src2, $dst|$dst, $src2}",
+ [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ X86_COND_NS, STATUS))]>,
+ Imp<[STATUS],[]>, TB;
def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
- "cmovns {$src2, $dst|$dst, $src2}", []>, TB;
+ "cmovns {$src2, $dst|$dst, $src2}",
+ [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ X86_COND_NS, STATUS))]>,
+ Imp<[STATUS],[]>, TB;
def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
- "cmovp {$src2, $dst|$dst, $src2}", []>, TB, OpSize;
+ "cmovp {$src2, $dst|$dst, $src2}",
+ [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ X86_COND_P, STATUS))]>,
+ Imp<[STATUS],[]>, TB, OpSize;
def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
- "cmovp {$src2, $dst|$dst, $src2}", []>, TB, OpSize;
+ "cmovp {$src2, $dst|$dst, $src2}",
+ [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ X86_COND_P, STATUS))]>,
+ Imp<[STATUS],[]>, TB, OpSize;
def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
- "cmovp {$src2, $dst|$dst, $src2}", []>, TB;
+ "cmovp {$src2, $dst|$dst, $src2}",
+ [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ X86_COND_P, STATUS))]>,
+ Imp<[STATUS],[]>, TB;
def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
- "cmovp {$src2, $dst|$dst, $src2}", []>, TB;
+ "cmovp {$src2, $dst|$dst, $src2}",
+ [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ X86_COND_P, STATUS))]>,
+ Imp<[STATUS],[]>, TB;
def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, R16 = R16
(ops R16:$dst, R16:$src1, R16:$src2),
- "cmovnp {$src2, $dst|$dst, $src2}", []>, TB, OpSize;
+ "cmovnp {$src2, $dst|$dst, $src2}",
+ [(set R16:$dst, (X86cmov R16:$src1, R16:$src2,
+ X86_COND_NP, STATUS))]>,
+ Imp<[STATUS],[]>, TB, OpSize;
def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, R16 = [mem16]
(ops R16:$dst, R16:$src1, i16mem:$src2),
- "cmovnp {$src2, $dst|$dst, $src2}", []>, TB, OpSize;
+ "cmovnp {$src2, $dst|$dst, $src2}",
+ [(set R16:$dst, (X86cmov R16:$src1, (loadi16 addr:$src2),
+ X86_COND_NP, STATUS))]>,
+ Imp<[STATUS],[]>, TB, OpSize;
def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, R32 = R32
(ops R32:$dst, R32:$src1, R32:$src2),
- "cmovnp {$src2, $dst|$dst, $src2}", []>, TB;
+ "cmovnp {$src2, $dst|$dst, $src2}",
+ [(set R32:$dst, (X86cmov R32:$src1, R32:$src2,
+ X86_COND_NP, STATUS))]>,
+ Imp<[STATUS],[]>, TB;
def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, R32 = [mem32]
(ops R32:$dst, R32:$src1, i32mem:$src2),
- "cmovnp {$src2, $dst|$dst, $src2}", []>, TB;
+ "cmovnp {$src2, $dst|$dst, $src2}",
+ [(set R32:$dst, (X86cmov R32:$src1, (loadi32 addr:$src2),
+ X86_COND_NP, STATUS))]>,
+ Imp<[STATUS],[]>, TB;
// unary instructions
@@ -1863,109 +1936,146 @@
def SETEr : I<0x94, MRM0r,
(ops R8 :$dst),
- "sete $dst", [(set R8:$dst, (X86SetCC SETEQ, STATUS))]>,
+ "sete $dst",
+ [(set R8:$dst, (X86setcc X86_COND_E, STATUS))]>,
TB; // R8 = ==
def SETEm : I<0x94, MRM0m,
(ops i8mem:$dst),
- "sete $dst", [(store (X86SetCC SETEQ, STATUS), addr:$dst)]>,
+ "sete $dst",
+ [(store (X86setcc X86_COND_E, STATUS), addr:$dst)]>,
TB; // [mem8] = ==
def SETNEr : I<0x95, MRM0r,
(ops R8 :$dst),
- "setne $dst", [(set R8:$dst, (X86SetCC SETNE, STATUS))]>,
+ "setne $dst",
+ [(set R8:$dst, (X86setcc X86_COND_NE, STATUS))]>,
TB; // R8 = !=
def SETNEm : I<0x95, MRM0m,
(ops i8mem:$dst),
- "setne $dst", [(store (X86SetCC SETNE, STATUS), addr:$dst)]>,
+ "setne $dst",
+ [(store (X86setcc X86_COND_NE, STATUS), addr:$dst)]>,
TB; // [mem8] = !=
def SETLr : I<0x9C, MRM0r,
(ops R8 :$dst),
- "setl $dst", [(set R8:$dst, (X86SetCC SETLT, STATUS))]>,
+ "setl $dst",
+ [(set R8:$dst, (X86setcc X86_COND_L, STATUS))]>,
TB; // R8 = < signed
def SETLm : I<0x9C, MRM0m,
(ops i8mem:$dst),
- "setl $dst", [(store (X86SetCC SETLT, STATUS), addr:$dst)]>,
+ "setl $dst",
+ [(store (X86setcc X86_COND_L, STATUS), addr:$dst)]>,
TB; // [mem8] = < signed
def SETGEr : I<0x9D, MRM0r,
(ops R8 :$dst),
- "setge $dst", [(set R8:$dst, (X86SetCC SETGE, STATUS))]>,
+ "setge $dst",
+ [(set R8:$dst, (X86setcc X86_COND_GE, STATUS))]>,
TB; // R8 = >= signed
def SETGEm : I<0x9D, MRM0m,
(ops i8mem:$dst),
- "setge $dst", [(store (X86SetCC SETGE, STATUS), addr:$dst)]>,
+ "setge $dst",
+ [(store (X86setcc X86_COND_GE, STATUS), addr:$dst)]>,
TB; // [mem8] = >= signed
def SETLEr : I<0x9E, MRM0r,
(ops R8 :$dst),
- "setle $dst", [(set R8:$dst, (X86SetCC SETLE, STATUS))]>,
+ "setle $dst",
+ [(set R8:$dst, (X86setcc X86_COND_LE, STATUS))]>,
TB; // R8 = <= signed
def SETLEm : I<0x9E, MRM0m,
(ops i8mem:$dst),
- "setle $dst", [(store (X86SetCC SETLE, STATUS), addr:$dst)]>,
+ "setle $dst",
+ [(store (X86setcc X86_COND_LE, STATUS), addr:$dst)]>,
TB; // [mem8] = <= signed
def SETGr : I<0x9F, MRM0r,
(ops R8 :$dst),
- "setg $dst", [(set R8:$dst, (X86SetCC SETGT, STATUS))]>,
+ "setg $dst",
+ [(set R8:$dst, (X86setcc X86_COND_G, STATUS))]>,
TB; // R8 = > signed
def SETGm : I<0x9F, MRM0m,
(ops i8mem:$dst),
- "setg $dst", [(store (X86SetCC SETGT, STATUS), addr:$dst)]>,
+ "setg $dst",
+ [(store (X86setcc X86_COND_G, STATUS), addr:$dst)]>,
TB; // [mem8] = > signed
def SETBr : I<0x92, MRM0r,
(ops R8 :$dst),
- "setb $dst", [(set R8:$dst, (X86SetCC SETULT, STATUS))]>,
+ "setb $dst",
+ [(set R8:$dst, (X86setcc X86_COND_B, STATUS))]>,
TB; // R8 = < unsign
def SETBm : I<0x92, MRM0m,
(ops i8mem:$dst),
- "setb $dst", [(store (X86SetCC SETULT, STATUS), addr:$dst)]>,
+ "setb $dst",
+ [(store (X86setcc X86_COND_B, STATUS), addr:$dst)]>,
TB; // [mem8] = < unsign
def SETAEr : I<0x93, MRM0r,
(ops R8 :$dst),
- "setae $dst", [(set R8:$dst, (X86SetCC SETUGE, STATUS))]>,
+ "setae $dst",
+ [(set R8:$dst, (X86setcc X86_COND_AE, STATUS))]>,
TB; // R8 = >= unsign
def SETAEm : I<0x93, MRM0m,
(ops i8mem:$dst),
- "setae $dst", [(store (X86SetCC SETUGE, STATUS), addr:$dst)]>,
+ "setae $dst",
+ [(store (X86setcc X86_COND_AE, STATUS), addr:$dst)]>,
TB; // [mem8] = >= unsign
def SETBEr : I<0x96, MRM0r,
(ops R8 :$dst),
- "setbe $dst", [(set R8:$dst, (X86SetCC SETULE, STATUS))]>,
+ "setbe $dst",
+ [(set R8:$dst, (X86setcc X86_COND_BE, STATUS))]>,
TB; // R8 = <= unsign
def SETBEm : I<0x96, MRM0m,
(ops i8mem:$dst),
- "setbe $dst", [(store (X86SetCC SETULE, STATUS), addr:$dst)]>,
+ "setbe $dst",
+ [(store (X86setcc X86_COND_BE, STATUS), addr:$dst)]>,
TB; // [mem8] = <= unsign
def SETAr : I<0x97, MRM0r,
(ops R8 :$dst),
- "seta $dst", [(set R8:$dst, (X86SetCC SETUGT, STATUS))]>,
+ "seta $dst",
+ [(set R8:$dst, (X86setcc X86_COND_A, STATUS))]>,
TB; // R8 = > signed
def SETAm : I<0x97, MRM0m,
(ops i8mem:$dst),
- "seta $dst", [(store (X86SetCC SETUGT, STATUS), addr:$dst)]>,
+ "seta $dst",
+ [(store (X86setcc X86_COND_A, STATUS), addr:$dst)]>,
TB; // [mem8] = > signed
+
def SETSr : I<0x98, MRM0r,
(ops R8 :$dst),
- "sets $dst", []>, TB; // R8 = <sign bit>
+ "sets $dst",
+ [(set R8:$dst, (X86setcc X86_COND_S, STATUS))]>,
+ TB; // R8 = <sign bit>
def SETSm : I<0x98, MRM0m,
(ops i8mem:$dst),
- "sets $dst", []>, TB; // [mem8] = <sign bit>
+ "sets $dst",
+ [(store (X86setcc X86_COND_S, STATUS), addr:$dst)]>,
+ TB; // [mem8] = <sign bit>
def SETNSr : I<0x99, MRM0r,
(ops R8 :$dst),
- "setns $dst", []>, TB; // R8 = !<sign bit>
+ "setns $dst",
+ [(set R8:$dst, (X86setcc X86_COND_NS, STATUS))]>,
+ TB; // R8 = !<sign bit>
def SETNSm : I<0x99, MRM0m,
(ops i8mem:$dst),
- "setns $dst", []>, TB; // [mem8] = !<sign bit>
+ "setns $dst",
+ [(store (X86setcc X86_COND_NS, STATUS), addr:$dst)]>,
+ TB; // [mem8] = !<sign bit>
def SETPr : I<0x9A, MRM0r,
(ops R8 :$dst),
- "setp $dst", []>, TB; // R8 = parity
+ "setp $dst",
+ [(set R8:$dst, (X86setcc X86_COND_P, STATUS))]>,
+ TB; // R8 = parity
def SETPm : I<0x9A, MRM0m,
(ops i8mem:$dst),
- "setp $dst", []>, TB; // [mem8] = parity
+ "setp $dst",
+ [(store (X86setcc X86_COND_P, STATUS), addr:$dst)]>,
+ TB; // [mem8] = parity
def SETNPr : I<0x9B, MRM0r,
(ops R8 :$dst),
- "setnp $dst", []>, TB; // R8 = not parity
+ "setnp $dst",
+ [(set R8:$dst, (X86setcc X86_COND_NP, STATUS))]>,
+ TB; // R8 = not parity
def SETNPm : I<0x9B, MRM0m,
(ops i8mem:$dst),
- "setnp $dst", []>, TB; // [mem8] = not parity
+ "setnp $dst",
+ [(store (X86setcc X86_COND_NP, STATUS), addr:$dst)]>,
+ TB; // [mem8] = not parity
// Integer comparisons
def CMP8rr : I<0x38, MRMDestReg,
@@ -2191,28 +2301,30 @@
[(set FR64:$dst, (fsqrt FR64:$src))]>,
Requires<[HasSSE2]>, XD;
-def UCOMISDrr: I<0x2E, MRMSrcReg, (ops FR64:$dst, FR64:$src),
- "ucomisd {$src, $dst|$dst, $src}", []>,
+def UCOMISDrr: I<0x2E, MRMSrcReg, (ops FR64:$src1, FR64:$src2),
+ "ucomisd {$src2, $src1|$src1, $src2}",
+ [(set STATUS, (X86cmp FR64:$src1, FR64:$src2))]>,
Requires<[HasSSE2]>, TB, OpSize;
-def UCOMISDrm: I<0x2E, MRMSrcMem, (ops FR64:$dst, f64mem:$src),
- "ucomisd {$src, $dst|$dst, $src}", []>,
- Requires<[HasSSE2]>, TB, OpSize;
-def UCOMISSrr: I<0x2E, MRMSrcReg, (ops FR32:$dst, FR32:$src),
- "ucomiss {$src, $dst|$dst, $src}", []>,
- Requires<[HasSSE1]>, TB;
-def UCOMISSrm: I<0x2E, MRMSrcMem, (ops FR32:$dst, f32mem:$src),
- "ucomiss {$src, $dst|$dst, $src}", []>,
- Requires<[HasSSE1]>, TB;
+def UCOMISDrm: I<0x2E, MRMSrcMem, (ops FR64:$src1, f64mem:$src2),
+ "ucomisd {$src2, $src1|$src1, $src2}",
+ [(set STATUS, (X86cmp FR64:$src1, (loadf64 addr:$src2)))]>,
+ Imp<[],[STATUS]>, Requires<[HasSSE2]>, TB, OpSize;
+def UCOMISSrr: I<0x2E, MRMSrcReg, (ops FR32:$src1, FR32:$src2),
+ "ucomiss {$src2, $src1|$src1, $src2}",
+ [(set STATUS, (X86cmp FR32:$src1, FR32:$src2))]>,
+ Imp<[],[STATUS]>, Requires<[HasSSE1]>, TB;
+def UCOMISSrm: I<0x2E, MRMSrcMem, (ops FR32:$src1, f32mem:$src2),
+ "ucomiss {$src2, $src1|$src1, $src2}",
+ [(set STATUS, (X86cmp FR32:$src1, (loadf32 addr:$src2)))]>,
+ Imp<[],[STATUS]>, Requires<[HasSSE1]>, TB;
// Pseudo-instructions that map fld0 to xorps/xorpd for sse.
// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
def FLD0SS : I<0x57, MRMSrcReg, (ops FR32:$dst),
- "xorps $dst, $dst",
- [(set FR32:$dst, fp32imm0)]>,
+ "xorps $dst, $dst", [(set FR32:$dst, fp32imm0)]>,
Requires<[HasSSE1]>, TB;
def FLD0SD : I<0x57, MRMSrcReg, (ops FR64:$dst),
- "xorpd $dst, $dst",
- [(set FR64:$dst, fp64imm0)]>,
+ "xorpd $dst, $dst", [(set FR64:$dst, fp64imm0)]>,
Requires<[HasSSE2]>, TB, OpSize;
let isTwoAddress = 1 in {
@@ -2605,10 +2717,11 @@
// Floating point compares.
-def FpUCOMr : FpI<(ops RST:$lhs, RST:$rhs), CompareFP,
+def FpUCOMr : FpI<(ops RFP:$lhs, RFP:$rhs), CompareFP,
[]>; // FPSW = cmp ST(0) with ST(i)
-def FpUCOMIr : FpI<(ops RST:$lhs, RST:$rhs), CompareFP,
- []>; // CC = cmp ST(0) with ST(i)
+def FpUCOMIr : FpI<(ops RFP:$lhs, RFP:$rhs), CompareFP,
+ [(set STATUS, (X86cmp RFP:$lhs, RFP:$rhs))]>,
+ Imp<[],[STATUS]>; // CC = cmp ST(0) with ST(i)
def FUCOMr : FPI<0xE0, AddRegFrm, // FPSW = cmp ST(0) with ST(i)
(ops RST:$reg),
Index: llvm/lib/Target/X86/X86RegisterInfo.cpp
diff -u llvm/lib/Target/X86/X86RegisterInfo.cpp:1.115 llvm/lib/Target/X86/X86RegisterInfo.cpp:1.116
--- llvm/lib/Target/X86/X86RegisterInfo.cpp:1.115 Sat Dec 24 03:48:35 2005
+++ llvm/lib/Target/X86/X86RegisterInfo.cpp Thu Jan 5 18:43:03 2006
@@ -568,7 +568,8 @@
switch (MBBI->getOpcode()) {
case X86::RET:
case X86::RETI:
- case X86::RETVOID: // FIXME: See X86InstrInfo.td
+ case X86::RETVOID: // FIXME: See X86InstrInfo.td
+ case X86::RETIVOID: // FIXME: See X86InstrInfo.td
case X86::TAILJMPd:
case X86::TAILJMPr:
case X86::TAILJMPm: break; // These are ok
More information about the llvm-commits
mailing list