[llvm-commits] CVS: llvm/lib/Target/X86/X86RegisterInfo.cpp X86InstrInfo.td X86InstrInfo.cpp Printer.cpp PeepholeOptimizer.cpp InstSelectSimple.cpp FloatingPoint.cpp
Alkis Evlogimenos
alkis at cs.uiuc.edu
Sun Feb 29 02:51:01 PST 2004
Changes in directory llvm/lib/Target/X86:
X86RegisterInfo.cpp updated: 1.74 -> 1.75
X86InstrInfo.td updated: 1.56 -> 1.57
X86InstrInfo.cpp updated: 1.25 -> 1.26
Printer.cpp updated: 1.89 -> 1.90
PeepholeOptimizer.cpp updated: 1.30 -> 1.31
InstSelectSimple.cpp updated: 1.186 -> 1.187
FloatingPoint.cpp updated: 1.24 -> 1.25
---
Log message:
A big X86 instruction rename. The instructions are renamed to make
their names more decriptive. A name consists of the base name, a
default operand size followed by a character per operand with an
optional special size. For example:
ADD8rr -> add, 8-bit register, 8-bit register
IMUL16rmi -> imul, 16-bit register, 16-bit memory, 16-bit immediate
IMUL16rmi8 -> imul, 16-bit register, 16-bit memory, 8-bit immediate
MOVSX32rm16 -> movsx, 32-bit register, 16-bit memory
---
Diffs of the changes: (+753 -753)
Index: llvm/lib/Target/X86/X86RegisterInfo.cpp
diff -u llvm/lib/Target/X86/X86RegisterInfo.cpp:1.74 llvm/lib/Target/X86/X86RegisterInfo.cpp:1.75
--- llvm/lib/Target/X86/X86RegisterInfo.cpp:1.74 Sat Feb 28 17:46:44 2004
+++ llvm/lib/Target/X86/X86RegisterInfo.cpp Sun Feb 29 02:50:03 2004
@@ -59,7 +59,7 @@
unsigned SrcReg, int FrameIdx,
const TargetRegisterClass *RC) const {
static const unsigned Opcode[] =
- { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FSTPm80 };
+ { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FSTP80m };
MachineInstr *I = addFrameReference(BuildMI(Opcode[getIdx(RC)], 5),
FrameIdx).addReg(SrcReg);
MBB.insert(MI, I);
@@ -71,7 +71,7 @@
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC) const{
static const unsigned Opcode[] =
- { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FLDm80 };
+ { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD80m };
unsigned OC = Opcode[getIdx(RC)];
MBB.insert(MI, addFrameReference(BuildMI(OC, 4, DestReg), FrameIdx));
return 1;
@@ -82,7 +82,7 @@
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *RC) const {
static const unsigned Opcode[] =
- { X86::MOVrr8, X86::MOVrr16, X86::MOVrr32, X86::FpMOV };
+ { X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV };
MBB.insert(MI, BuildMI(Opcode[getIdx(RC)],1,DestReg).addReg(SrcReg));
return 1;
}
@@ -142,90 +142,90 @@
MachineInstr* NI = 0;
if (i == 0) {
switch(MI->getOpcode()) {
- case X86::XCHGrr8: NI = MakeMRInst(X86::XCHGmr8 ,FrameIndex, MI); break;
- case X86::XCHGrr16:NI = MakeMRInst(X86::XCHGmr16,FrameIndex, MI); break;
- case X86::XCHGrr32:NI = MakeMRInst(X86::XCHGmr32,FrameIndex, MI); break;
- case X86::MOVrr8: NI = MakeMRInst(X86::MOVmr8 , FrameIndex, MI); break;
- case X86::MOVrr16: NI = MakeMRInst(X86::MOVmr16, FrameIndex, MI); break;
- case X86::MOVrr32: NI = MakeMRInst(X86::MOVmr32, FrameIndex, MI); break;
- case X86::MOVri8: NI = MakeMIInst(X86::MOVmi8 , FrameIndex, MI); break;
- case X86::MOVri16: NI = MakeMIInst(X86::MOVmi16, FrameIndex, MI); break;
- case X86::MOVri32: NI = MakeMIInst(X86::MOVmi32, FrameIndex, MI); break;
- case X86::MULr8: NI = MakeMInst( X86::MULm8 , FrameIndex, MI); break;
- case X86::MULr16: NI = MakeMInst( X86::MULm16, FrameIndex, MI); break;
- case X86::MULr32: NI = MakeMInst( X86::MULm32, FrameIndex, MI); break;
- case X86::DIVr8: NI = MakeMInst( X86::DIVm8 , FrameIndex, MI); break;
- case X86::DIVr16: NI = MakeMInst( X86::DIVm16, FrameIndex, MI); break;
- case X86::DIVr32: NI = MakeMInst( X86::DIVm32, FrameIndex, MI); break;
- case X86::IDIVr8: NI = MakeMInst( X86::IDIVm8 , FrameIndex, MI); break;
- case X86::IDIVr16: NI = MakeMInst( X86::IDIVm16, FrameIndex, MI); break;
- case X86::IDIVr32: NI = MakeMInst( X86::IDIVm32, FrameIndex, MI); break;
- case X86::NEGr8: NI = MakeMInst( X86::NEGm8 , FrameIndex, MI); break;
- case X86::NEGr16: NI = MakeMInst( X86::NEGm16, FrameIndex, MI); break;
- case X86::NEGr32: NI = MakeMInst( X86::NEGm32, FrameIndex, MI); break;
- case X86::NOTr8: NI = MakeMInst( X86::NOTm8 , FrameIndex, MI); break;
- case X86::NOTr16: NI = MakeMInst( X86::NOTm16, FrameIndex, MI); break;
- case X86::NOTr32: NI = MakeMInst( X86::NOTm32, FrameIndex, MI); break;
- case X86::INCr8: NI = MakeMInst( X86::INCm8 , FrameIndex, MI); break;
- case X86::INCr16: NI = MakeMInst( X86::INCm16, FrameIndex, MI); break;
- case X86::INCr32: NI = MakeMInst( X86::INCm32, FrameIndex, MI); break;
- case X86::DECr8: NI = MakeMInst( X86::DECm8 , FrameIndex, MI); break;
- case X86::DECr16: NI = MakeMInst( X86::DECm16, FrameIndex, MI); break;
- case X86::DECr32: NI = MakeMInst( X86::DECm32, FrameIndex, MI); break;
- case X86::ADDrr8: NI = MakeMRInst(X86::ADDmr8 , FrameIndex, MI); break;
- case X86::ADDrr16: NI = MakeMRInst(X86::ADDmr16, FrameIndex, MI); break;
- case X86::ADDrr32: NI = MakeMRInst(X86::ADDmr32, FrameIndex, MI); break;
- case X86::ADCrr32: NI = MakeMRInst(X86::ADCmr32, FrameIndex, MI); break;
- case X86::ADDri8: NI = MakeMIInst(X86::ADDmi8 , FrameIndex, MI); break;
- case X86::ADDri16: NI = MakeMIInst(X86::ADDmi16, FrameIndex, MI); break;
- case X86::ADDri32: NI = MakeMIInst(X86::ADDmi32, FrameIndex, MI); break;
- case X86::SUBrr8: NI = MakeMRInst(X86::SUBmr8 , FrameIndex, MI); break;
- case X86::SUBrr16: NI = MakeMRInst(X86::SUBmr16, FrameIndex, MI); break;
- case X86::SUBrr32: NI = MakeMRInst(X86::SUBmr32, FrameIndex, MI); break;
- case X86::SBBrr32: NI = MakeMRInst(X86::SBBmr32, FrameIndex, MI); break;
- case X86::SUBri8: NI = MakeMIInst(X86::SUBmi8 , FrameIndex, MI); break;
- case X86::SUBri16: NI = MakeMIInst(X86::SUBmi16, FrameIndex, MI); break;
- case X86::SUBri32: NI = MakeMIInst(X86::SUBmi32, FrameIndex, MI); break;
- case X86::ANDrr8: NI = MakeMRInst(X86::ANDmr8 , FrameIndex, MI); break;
- case X86::ANDrr16: NI = MakeMRInst(X86::ANDmr16, FrameIndex, MI); break;
- case X86::ANDrr32: NI = MakeMRInst(X86::ANDmr32, FrameIndex, MI); break;
- case X86::ANDri8: NI = MakeMIInst(X86::ANDmi8 , FrameIndex, MI); break;
- case X86::ANDri16: NI = MakeMIInst(X86::ANDmi16, FrameIndex, MI); break;
- case X86::ANDri32: NI = MakeMIInst(X86::ANDmi32, FrameIndex, MI); break;
- case X86::ORrr8: NI = MakeMRInst(X86::ORmr8 , FrameIndex, MI); break;
- case X86::ORrr16: NI = MakeMRInst(X86::ORmr16, FrameIndex, MI); break;
- case X86::ORrr32: NI = MakeMRInst(X86::ORmr32, FrameIndex, MI); break;
- case X86::ORri8: NI = MakeMIInst(X86::ORmi8 , FrameIndex, MI); break;
- case X86::ORri16: NI = MakeMIInst(X86::ORmi16, FrameIndex, MI); break;
- case X86::ORri32: NI = MakeMIInst(X86::ORmi32, FrameIndex, MI); break;
- case X86::XORrr8: NI = MakeMRInst(X86::XORmr8 , FrameIndex, MI); break;
- case X86::XORrr16: NI = MakeMRInst(X86::XORmr16, FrameIndex, MI); break;
- case X86::XORrr32: NI = MakeMRInst(X86::XORmr32, FrameIndex, MI); break;
- case X86::XORri8: NI = MakeMIInst(X86::XORmi8 , FrameIndex, MI); break;
- case X86::XORri16: NI = MakeMIInst(X86::XORmi16, FrameIndex, MI); break;
- case X86::XORri32: NI = MakeMIInst(X86::XORmi32, FrameIndex, MI); break;
- case X86::SHLrCL8: NI = MakeMInst( X86::SHLmCL8 ,FrameIndex, MI); break;
- case X86::SHLrCL16:NI = MakeMInst( X86::SHLmCL16,FrameIndex, MI); break;
- case X86::SHLrCL32:NI = MakeMInst( X86::SHLmCL32,FrameIndex, MI); break;
- case X86::SHLri8: NI = MakeMIInst(X86::SHLmi8 , FrameIndex, MI); break;
- case X86::SHLri16: NI = MakeMIInst(X86::SHLmi16, FrameIndex, MI); break;
- case X86::SHLri32: NI = MakeMIInst(X86::SHLmi32, FrameIndex, MI); break;
- case X86::SHRrCL8: NI = MakeMInst( X86::SHRmCL8 ,FrameIndex, MI); break;
- case X86::SHRrCL16:NI = MakeMInst( X86::SHRmCL16,FrameIndex, MI); break;
- case X86::SHRrCL32:NI = MakeMInst( X86::SHRmCL32,FrameIndex, MI); break;
- case X86::SHRri8: NI = MakeMIInst(X86::SHRmi8 , FrameIndex, MI); break;
- case X86::SHRri16: NI = MakeMIInst(X86::SHRmi16, FrameIndex, MI); break;
- case X86::SHRri32: NI = MakeMIInst(X86::SHRmi32, FrameIndex, MI); break;
- case X86::SARrCL8: NI = MakeMInst( X86::SARmCL8 ,FrameIndex, MI); break;
- case X86::SARrCL16:NI = MakeMInst( X86::SARmCL16,FrameIndex, MI); break;
- case X86::SARrCL32:NI = MakeMInst( X86::SARmCL32,FrameIndex, MI); break;
- case X86::SARri8: NI = MakeMIInst(X86::SARmi8 , FrameIndex, MI); break;
- case X86::SARri16: NI = MakeMIInst(X86::SARmi16, FrameIndex, MI); break;
- case X86::SARri32: NI = MakeMIInst(X86::SARmi32, FrameIndex, MI); break;
- case X86::SHLDrrCL32:NI = MakeMRInst( X86::SHLDmrCL32,FrameIndex, MI);break;
- case X86::SHLDrr32i8:NI = MakeMRIInst(X86::SHLDmr32i8,FrameIndex, MI);break;
- case X86::SHRDrrCL32:NI = MakeMRInst( X86::SHRDmrCL32,FrameIndex, MI);break;
- case X86::SHRDrr32i8:NI = MakeMRIInst(X86::SHRDmr32i8,FrameIndex, MI);break;
+ case X86::XCHG8rr: NI = MakeMRInst(X86::XCHG8mr ,FrameIndex, MI); break;
+ case X86::XCHG16rr:NI = MakeMRInst(X86::XCHG16mr,FrameIndex, MI); break;
+ case X86::XCHG32rr:NI = MakeMRInst(X86::XCHG32mr,FrameIndex, MI); break;
+ case X86::MOV8rr: NI = MakeMRInst(X86::MOV8mr , FrameIndex, MI); break;
+ case X86::MOV16rr: NI = MakeMRInst(X86::MOV16mr, FrameIndex, MI); break;
+ case X86::MOV32rr: NI = MakeMRInst(X86::MOV32mr, FrameIndex, MI); break;
+ case X86::MOV8ri: NI = MakeMIInst(X86::MOV8mi , FrameIndex, MI); break;
+ case X86::MOV16ri: NI = MakeMIInst(X86::MOV16mi, FrameIndex, MI); break;
+ case X86::MOV32ri: NI = MakeMIInst(X86::MOV32mi, FrameIndex, MI); break;
+ case X86::MUL8r: NI = MakeMInst( X86::MUL8m , FrameIndex, MI); break;
+ case X86::MUL16r: NI = MakeMInst( X86::MUL16m, FrameIndex, MI); break;
+ case X86::MUL32r: NI = MakeMInst( X86::MUL32m, FrameIndex, MI); break;
+ case X86::DIV8r: NI = MakeMInst( X86::DIV8m , FrameIndex, MI); break;
+ case X86::DIV16r: NI = MakeMInst( X86::DIV16m, FrameIndex, MI); break;
+ case X86::DIV32r: NI = MakeMInst( X86::DIV32m, FrameIndex, MI); break;
+ case X86::IDIV8r: NI = MakeMInst( X86::IDIV8m , FrameIndex, MI); break;
+ case X86::IDIV16r: NI = MakeMInst( X86::IDIV16m, FrameIndex, MI); break;
+ case X86::IDIV32r: NI = MakeMInst( X86::IDIV32m, FrameIndex, MI); break;
+ case X86::NEG8r: NI = MakeMInst( X86::NEG8m , FrameIndex, MI); break;
+ case X86::NEG16r: NI = MakeMInst( X86::NEG16m, FrameIndex, MI); break;
+ case X86::NEG32r: NI = MakeMInst( X86::NEG32m, FrameIndex, MI); break;
+ case X86::NOT8r: NI = MakeMInst( X86::NOT8m , FrameIndex, MI); break;
+ case X86::NOT16r: NI = MakeMInst( X86::NOT16m, FrameIndex, MI); break;
+ case X86::NOT32r: NI = MakeMInst( X86::NOT32m, FrameIndex, MI); break;
+ case X86::INC8r: NI = MakeMInst( X86::INC8m , FrameIndex, MI); break;
+ case X86::INC16r: NI = MakeMInst( X86::INC16m, FrameIndex, MI); break;
+ case X86::INC32r: NI = MakeMInst( X86::INC32m, FrameIndex, MI); break;
+ case X86::DEC8r: NI = MakeMInst( X86::DEC8m , FrameIndex, MI); break;
+ case X86::DEC16r: NI = MakeMInst( X86::DEC16m, FrameIndex, MI); break;
+ case X86::DEC32r: NI = MakeMInst( X86::DEC32m, FrameIndex, MI); break;
+ case X86::ADD8rr: NI = MakeMRInst(X86::ADD8mr , FrameIndex, MI); break;
+ case X86::ADD16rr: NI = MakeMRInst(X86::ADD16mr, FrameIndex, MI); break;
+ case X86::ADD32rr: NI = MakeMRInst(X86::ADD32mr, FrameIndex, MI); break;
+ case X86::ADC32rr: NI = MakeMRInst(X86::ADC32mr, FrameIndex, MI); break;
+ case X86::ADD8ri: NI = MakeMIInst(X86::ADD8mi , FrameIndex, MI); break;
+ case X86::ADD16ri: NI = MakeMIInst(X86::ADD16mi, FrameIndex, MI); break;
+ case X86::ADD32ri: NI = MakeMIInst(X86::ADD32mi, FrameIndex, MI); break;
+ case X86::SUB8rr: NI = MakeMRInst(X86::SUB8mr , FrameIndex, MI); break;
+ case X86::SUB16rr: NI = MakeMRInst(X86::SUB16mr, FrameIndex, MI); break;
+ case X86::SUB32rr: NI = MakeMRInst(X86::SUB32mr, FrameIndex, MI); break;
+ case X86::SBB32rr: NI = MakeMRInst(X86::SBB32mr, FrameIndex, MI); break;
+ case X86::SUB8ri: NI = MakeMIInst(X86::SUB8mi , FrameIndex, MI); break;
+ case X86::SUB16ri: NI = MakeMIInst(X86::SUB16mi, FrameIndex, MI); break;
+ case X86::SUB32ri: NI = MakeMIInst(X86::SUB32mi, FrameIndex, MI); break;
+ case X86::AND8rr: NI = MakeMRInst(X86::AND8mr , FrameIndex, MI); break;
+ case X86::AND16rr: NI = MakeMRInst(X86::AND16mr, FrameIndex, MI); break;
+ case X86::AND32rr: NI = MakeMRInst(X86::AND32mr, FrameIndex, MI); break;
+ case X86::AND8ri: NI = MakeMIInst(X86::AND8mi , FrameIndex, MI); break;
+ case X86::AND16ri: NI = MakeMIInst(X86::AND16mi, FrameIndex, MI); break;
+ case X86::AND32ri: NI = MakeMIInst(X86::AND32mi, FrameIndex, MI); break;
+ case X86::OR8rr: NI = MakeMRInst(X86::OR8mr , FrameIndex, MI); break;
+ case X86::OR16rr: NI = MakeMRInst(X86::OR16mr, FrameIndex, MI); break;
+ case X86::OR32rr: NI = MakeMRInst(X86::OR32mr, FrameIndex, MI); break;
+ case X86::OR8ri: NI = MakeMIInst(X86::OR8mi , FrameIndex, MI); break;
+ case X86::OR16ri: NI = MakeMIInst(X86::OR16mi, FrameIndex, MI); break;
+ case X86::OR32ri: NI = MakeMIInst(X86::OR32mi, FrameIndex, MI); break;
+ case X86::XOR8rr: NI = MakeMRInst(X86::XOR8mr , FrameIndex, MI); break;
+ case X86::XOR16rr: NI = MakeMRInst(X86::XOR16mr, FrameIndex, MI); break;
+ case X86::XOR32rr: NI = MakeMRInst(X86::XOR32mr, FrameIndex, MI); break;
+ case X86::XOR8ri: NI = MakeMIInst(X86::XOR8mi , FrameIndex, MI); break;
+ case X86::XOR16ri: NI = MakeMIInst(X86::XOR16mi, FrameIndex, MI); break;
+ case X86::XOR32ri: NI = MakeMIInst(X86::XOR32mi, FrameIndex, MI); break;
+ case X86::SHL8rCL: NI = MakeMInst( X86::SHL8mCL ,FrameIndex, MI); break;
+ case X86::SHL16rCL:NI = MakeMInst( X86::SHL16mCL,FrameIndex, MI); break;
+ case X86::SHL32rCL:NI = MakeMInst( X86::SHL32mCL,FrameIndex, MI); break;
+ case X86::SHL8ri: NI = MakeMIInst(X86::SHL8mi , FrameIndex, MI); break;
+ case X86::SHL16ri: NI = MakeMIInst(X86::SHL16mi, FrameIndex, MI); break;
+ case X86::SHL32ri: NI = MakeMIInst(X86::SHL32mi, FrameIndex, MI); break;
+ case X86::SHR8rCL: NI = MakeMInst( X86::SHR8mCL ,FrameIndex, MI); break;
+ case X86::SHR16rCL:NI = MakeMInst( X86::SHR16mCL,FrameIndex, MI); break;
+ case X86::SHR32rCL:NI = MakeMInst( X86::SHR32mCL,FrameIndex, MI); break;
+ case X86::SHR8ri: NI = MakeMIInst(X86::SHR8mi , FrameIndex, MI); break;
+ case X86::SHR16ri: NI = MakeMIInst(X86::SHR16mi, FrameIndex, MI); break;
+ case X86::SHR32ri: NI = MakeMIInst(X86::SHR32mi, FrameIndex, MI); break;
+ case X86::SAR8rCL: NI = MakeMInst( X86::SAR8mCL ,FrameIndex, MI); break;
+ case X86::SAR16rCL:NI = MakeMInst( X86::SAR16mCL,FrameIndex, MI); break;
+ case X86::SAR32rCL:NI = MakeMInst( X86::SAR32mCL,FrameIndex, MI); break;
+ case X86::SAR8ri: NI = MakeMIInst(X86::SAR8mi , FrameIndex, MI); break;
+ case X86::SAR16ri: NI = MakeMIInst(X86::SAR16mi, FrameIndex, MI); break;
+ case X86::SAR32ri: NI = MakeMIInst(X86::SAR32mi, FrameIndex, MI); break;
+ case X86::SHLD32rrCL:NI = MakeMRInst( X86::SHLD32mrCL,FrameIndex, MI);break;
+ case X86::SHLD32rri8:NI = MakeMRIInst(X86::SHLD32mri8,FrameIndex, MI);break;
+ case X86::SHRD32rrCL:NI = MakeMRInst( X86::SHRD32mrCL,FrameIndex, MI);break;
+ case X86::SHRD32rri8:NI = MakeMRIInst(X86::SHRD32mri8,FrameIndex, MI);break;
case X86::SETBr: NI = MakeMInst( X86::SETBm, FrameIndex, MI); break;
case X86::SETAEr: NI = MakeMInst( X86::SETAEm, FrameIndex, MI); break;
case X86::SETEr: NI = MakeMInst( X86::SETEm, FrameIndex, MI); break;
@@ -238,61 +238,61 @@
case X86::SETGEr: NI = MakeMInst( X86::SETGEm, FrameIndex, MI); break;
case X86::SETLEr: NI = MakeMInst( X86::SETLEm, FrameIndex, MI); break;
case X86::SETGr: NI = MakeMInst( X86::SETGm, FrameIndex, MI); break;
- case X86::TESTrr8: NI = MakeMRInst(X86::TESTmr8 ,FrameIndex, MI); break;
- case X86::TESTrr16:NI = MakeMRInst(X86::TESTmr16,FrameIndex, MI); break;
- case X86::TESTrr32:NI = MakeMRInst(X86::TESTmr32,FrameIndex, MI); break;
- case X86::TESTri8: NI = MakeMIInst(X86::TESTmi8 ,FrameIndex, MI); break;
- case X86::TESTri16:NI = MakeMIInst(X86::TESTmi16,FrameIndex, MI); break;
- case X86::TESTri32:NI = MakeMIInst(X86::TESTmi32,FrameIndex, MI); break;
- case X86::CMPrr8: NI = MakeMRInst(X86::CMPmr8 , FrameIndex, MI); break;
- case X86::CMPrr16: NI = MakeMRInst(X86::CMPmr16, FrameIndex, MI); break;
- case X86::CMPrr32: NI = MakeMRInst(X86::CMPmr32, FrameIndex, MI); break;
- case X86::CMPri8: NI = MakeMIInst(X86::CMPmi8 , FrameIndex, MI); break;
- case X86::CMPri16: NI = MakeMIInst(X86::CMPmi16, FrameIndex, MI); break;
- case X86::CMPri32: NI = MakeMIInst(X86::CMPmi32, FrameIndex, MI); break;
+ case X86::TEST8rr: NI = MakeMRInst(X86::TEST8mr ,FrameIndex, MI); break;
+ case X86::TEST16rr:NI = MakeMRInst(X86::TEST16mr,FrameIndex, MI); break;
+ case X86::TEST32rr:NI = MakeMRInst(X86::TEST32mr,FrameIndex, MI); break;
+ case X86::TEST8ri: NI = MakeMIInst(X86::TEST8mi ,FrameIndex, MI); break;
+ case X86::TEST16ri:NI = MakeMIInst(X86::TEST16mi,FrameIndex, MI); break;
+ case X86::TEST32ri:NI = MakeMIInst(X86::TEST32mi,FrameIndex, MI); break;
+ case X86::CMP8rr: NI = MakeMRInst(X86::CMP8mr , FrameIndex, MI); break;
+ case X86::CMP16rr: NI = MakeMRInst(X86::CMP16mr, FrameIndex, MI); break;
+ case X86::CMP32rr: NI = MakeMRInst(X86::CMP32mr, FrameIndex, MI); break;
+ case X86::CMP8ri: NI = MakeMIInst(X86::CMP8mi , FrameIndex, MI); break;
+ case X86::CMP16ri: NI = MakeMIInst(X86::CMP16mi, FrameIndex, MI); break;
+ case X86::CMP32ri: NI = MakeMIInst(X86::CMP32mi, FrameIndex, MI); break;
default: break; // Cannot fold
}
} else if (i == 1) {
switch(MI->getOpcode()) {
- case X86::XCHGrr8: NI = MakeRMInst(X86::XCHGrm8 ,FrameIndex, MI); break;
- case X86::XCHGrr16:NI = MakeRMInst(X86::XCHGrm16,FrameIndex, MI); break;
- case X86::XCHGrr32:NI = MakeRMInst(X86::XCHGrm32,FrameIndex, MI); break;
- case X86::MOVrr8: NI = MakeRMInst(X86::MOVrm8 , FrameIndex, MI); break;
- case X86::MOVrr16: NI = MakeRMInst(X86::MOVrm16, FrameIndex, MI); break;
- case X86::MOVrr32: NI = MakeRMInst(X86::MOVrm32, FrameIndex, MI); break;
- case X86::ADDrr8: NI = MakeRMInst(X86::ADDrm8 , FrameIndex, MI); break;
- case X86::ADDrr16: NI = MakeRMInst(X86::ADDrm16, FrameIndex, MI); break;
- case X86::ADDrr32: NI = MakeRMInst(X86::ADDrm32, FrameIndex, MI); break;
- case X86::ADCrr32: NI = MakeRMInst(X86::ADCrm32, FrameIndex, MI); break;
- case X86::SUBrr8: NI = MakeRMInst(X86::SUBrm8 , FrameIndex, MI); break;
- case X86::SUBrr16: NI = MakeRMInst(X86::SUBrm16, FrameIndex, MI); break;
- case X86::SUBrr32: NI = MakeRMInst(X86::SUBrm32, FrameIndex, MI); break;
- case X86::SBBrr32: NI = MakeRMInst(X86::SBBrm32, FrameIndex, MI); break;
- case X86::ANDrr8: NI = MakeRMInst(X86::ANDrm8 , FrameIndex, MI); break;
- case X86::ANDrr16: NI = MakeRMInst(X86::ANDrm16, FrameIndex, MI); break;
- case X86::ANDrr32: NI = MakeRMInst(X86::ANDrm32, FrameIndex, MI); break;
- case X86::ORrr8: NI = MakeRMInst(X86::ORrm8 , FrameIndex, MI); break;
- case X86::ORrr16: NI = MakeRMInst(X86::ORrm16, FrameIndex, MI); break;
- case X86::ORrr32: NI = MakeRMInst(X86::ORrm32, FrameIndex, MI); break;
- case X86::XORrr8: NI = MakeRMInst(X86::XORrm8 , FrameIndex, MI); break;
- case X86::XORrr16: NI = MakeRMInst(X86::XORrm16, FrameIndex, MI); break;
- case X86::XORrr32: NI = MakeRMInst(X86::XORrm32, FrameIndex, MI); break;
- case X86::TESTrr8: NI = MakeRMInst(X86::TESTrm8 ,FrameIndex, MI); break;
- case X86::TESTrr16:NI = MakeRMInst(X86::TESTrm16,FrameIndex, MI); break;
- case X86::TESTrr32:NI = MakeRMInst(X86::TESTrm32,FrameIndex, MI); break;
- case X86::IMULrr16:NI = MakeRMInst(X86::IMULrm16,FrameIndex, MI); break;
- case X86::IMULrr32:NI = MakeRMInst(X86::IMULrm32,FrameIndex, MI); break;
- case X86::IMULrri16: NI = MakeRMIInst(X86::IMULrmi16, FrameIndex, MI);break;
- case X86::IMULrri32: NI = MakeRMIInst(X86::IMULrmi32, FrameIndex, MI);break;
- case X86::CMPrr8: NI = MakeRMInst(X86::CMPrm8 , FrameIndex, MI); break;
- case X86::CMPrr16: NI = MakeRMInst(X86::CMPrm16, FrameIndex, MI); break;
- case X86::CMPrr32: NI = MakeRMInst(X86::CMPrm32, FrameIndex, MI); break;
- case X86::MOVSXr16r8: NI = MakeRMInst(X86::MOVSXr16m8 , FrameIndex, MI); break;
- case X86::MOVSXr32r8: NI = MakeRMInst(X86::MOVSXr32m8, FrameIndex, MI); break;
- case X86::MOVSXr32r16:NI = MakeRMInst(X86::MOVSXr32m16, FrameIndex, MI); break;
- case X86::MOVZXr16r8: NI = MakeRMInst(X86::MOVZXr16m8 , FrameIndex, MI); break;
- case X86::MOVZXr32r8: NI = MakeRMInst(X86::MOVZXr32m8, FrameIndex, MI); break;
- case X86::MOVZXr32r16:NI = MakeRMInst(X86::MOVZXr32m16, FrameIndex, MI); break;
+ case X86::XCHG8rr: NI = MakeRMInst(X86::XCHG8rm ,FrameIndex, MI); break;
+ case X86::XCHG16rr:NI = MakeRMInst(X86::XCHG16rm,FrameIndex, MI); break;
+ case X86::XCHG32rr:NI = MakeRMInst(X86::XCHG32rm,FrameIndex, MI); break;
+ case X86::MOV8rr: NI = MakeRMInst(X86::MOV8rm , FrameIndex, MI); break;
+ case X86::MOV16rr: NI = MakeRMInst(X86::MOV16rm, FrameIndex, MI); break;
+ case X86::MOV32rr: NI = MakeRMInst(X86::MOV32rm, FrameIndex, MI); break;
+ case X86::ADD8rr: NI = MakeRMInst(X86::ADD8rm , FrameIndex, MI); break;
+ case X86::ADD16rr: NI = MakeRMInst(X86::ADD16rm, FrameIndex, MI); break;
+ case X86::ADD32rr: NI = MakeRMInst(X86::ADD32rm, FrameIndex, MI); break;
+ case X86::ADC32rr: NI = MakeRMInst(X86::ADC32rm, FrameIndex, MI); break;
+ case X86::SUB8rr: NI = MakeRMInst(X86::SUB8rm , FrameIndex, MI); break;
+ case X86::SUB16rr: NI = MakeRMInst(X86::SUB16rm, FrameIndex, MI); break;
+ case X86::SUB32rr: NI = MakeRMInst(X86::SUB32rm, FrameIndex, MI); break;
+ case X86::SBB32rr: NI = MakeRMInst(X86::SBB32rm, FrameIndex, MI); break;
+ case X86::AND8rr: NI = MakeRMInst(X86::AND8rm , FrameIndex, MI); break;
+ case X86::AND16rr: NI = MakeRMInst(X86::AND16rm, FrameIndex, MI); break;
+ case X86::AND32rr: NI = MakeRMInst(X86::AND32rm, FrameIndex, MI); break;
+ case X86::OR8rr: NI = MakeRMInst(X86::OR8rm , FrameIndex, MI); break;
+ case X86::OR16rr: NI = MakeRMInst(X86::OR16rm, FrameIndex, MI); break;
+ case X86::OR32rr: NI = MakeRMInst(X86::OR32rm, FrameIndex, MI); break;
+ case X86::XOR8rr: NI = MakeRMInst(X86::XOR8rm , FrameIndex, MI); break;
+ case X86::XOR16rr: NI = MakeRMInst(X86::XOR16rm, FrameIndex, MI); break;
+ case X86::XOR32rr: NI = MakeRMInst(X86::XOR32rm, FrameIndex, MI); break;
+ case X86::TEST8rr: NI = MakeRMInst(X86::TEST8rm ,FrameIndex, MI); break;
+ case X86::TEST16rr:NI = MakeRMInst(X86::TEST16rm,FrameIndex, MI); break;
+ case X86::TEST32rr:NI = MakeRMInst(X86::TEST32rm,FrameIndex, MI); break;
+ case X86::IMUL16rr:NI = MakeRMInst(X86::IMUL16rm,FrameIndex, MI); break;
+ case X86::IMUL32rr:NI = MakeRMInst(X86::IMUL32rm,FrameIndex, MI); break;
+ case X86::IMUL16rri: NI = MakeRMIInst(X86::IMUL16rmi, FrameIndex, MI);break;
+ case X86::IMUL32rri: NI = MakeRMIInst(X86::IMUL32rmi, FrameIndex, MI);break;
+ case X86::CMP8rr: NI = MakeRMInst(X86::CMP8rm , FrameIndex, MI); break;
+ case X86::CMP16rr: NI = MakeRMInst(X86::CMP16rm, FrameIndex, MI); break;
+ case X86::CMP32rr: NI = MakeRMInst(X86::CMP32rm, FrameIndex, MI); break;
+ case X86::MOVSX16rr8: NI = MakeRMInst(X86::MOVSX16rm8 , FrameIndex, MI); break;
+ case X86::MOVSX32rr8: NI = MakeRMInst(X86::MOVSX32rm8, FrameIndex, MI); break;
+ case X86::MOVSX32rr16:NI = MakeRMInst(X86::MOVSX32rm16, FrameIndex, MI); break;
+ case X86::MOVZX16rr8: NI = MakeRMInst(X86::MOVZX16rm8 , FrameIndex, MI); break;
+ case X86::MOVZX32rr8: NI = MakeRMInst(X86::MOVZX32rm8, FrameIndex, MI); break;
+ case X86::MOVZX32rr16:NI = MakeRMInst(X86::MOVZX32rm16, FrameIndex, MI); break;
default: break;
}
}
@@ -336,11 +336,11 @@
MachineInstr *New;
if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) {
- New=BuildMI(X86::SUBri32, 1, X86::ESP, MachineOperand::UseAndDef)
+ New=BuildMI(X86::SUB32ri, 1, X86::ESP, MachineOperand::UseAndDef)
.addZImm(Amount);
} else {
assert(Old->getOpcode() == X86::ADJCALLSTACKUP);
- New=BuildMI(X86::ADDri32, 1, X86::ESP, MachineOperand::UseAndDef)
+ New=BuildMI(X86::ADD32ri, 1, X86::ESP, MachineOperand::UseAndDef)
.addZImm(Amount);
}
@@ -403,21 +403,21 @@
int EBPOffset = MFI->getObjectOffset(MFI->getObjectIndexBegin())+4;
if (NumBytes) { // adjust stack pointer: ESP -= numbytes
- MI= BuildMI(X86::SUBri32, 1, X86::ESP, MachineOperand::UseAndDef)
+ MI= BuildMI(X86::SUB32ri, 1, X86::ESP, MachineOperand::UseAndDef)
.addZImm(NumBytes);
MBB.insert(MBBI, MI);
}
// Save EBP into the appropriate stack slot...
- MI = addRegOffset(BuildMI(X86::MOVmr32, 5), // mov [ESP-<offset>], EBP
+ MI = addRegOffset(BuildMI(X86::MOV32mr, 5), // mov [ESP-<offset>], EBP
X86::ESP, EBPOffset+NumBytes).addReg(X86::EBP);
MBB.insert(MBBI, MI);
// Update EBP with the new base value...
if (NumBytes == 4) // mov EBP, ESP
- MI = BuildMI(X86::MOVrr32, 2, X86::EBP).addReg(X86::ESP);
+ MI = BuildMI(X86::MOV32rr, 2, X86::EBP).addReg(X86::ESP);
else // lea EBP, [ESP+StackSize]
- MI = addRegOffset(BuildMI(X86::LEAr32, 5, X86::EBP), X86::ESP,NumBytes-4);
+ MI = addRegOffset(BuildMI(X86::LEA32r, 5, X86::EBP), X86::ESP,NumBytes-4);
MBB.insert(MBBI, MI);
@@ -440,7 +440,7 @@
if (NumBytes) {
// adjust stack pointer: ESP -= numbytes
- MI= BuildMI(X86::SUBri32, 1, X86::ESP, MachineOperand::UseAndDef)
+ MI= BuildMI(X86::SUB32ri, 1, X86::ESP, MachineOperand::UseAndDef)
.addZImm(NumBytes);
MBB.insert(MBBI, MI);
}
@@ -461,18 +461,18 @@
int EBPOffset = MFI->getObjectOffset(MFI->getObjectIndexEnd()-1)+4;
// mov ESP, EBP
- MI = BuildMI(X86::MOVrr32, 1,X86::ESP).addReg(X86::EBP);
+ MI = BuildMI(X86::MOV32rr, 1,X86::ESP).addReg(X86::EBP);
MBB.insert(MBBI, MI);
// pop EBP
- MI = BuildMI(X86::POPr32, 0, X86::EBP);
+ MI = BuildMI(X86::POP32r, 0, X86::EBP);
MBB.insert(MBBI, MI);
} else {
// Get the number of bytes allocated from the FrameInfo...
unsigned NumBytes = MFI->getStackSize();
if (NumBytes) { // adjust stack pointer back: ESP += numbytes
- MI =BuildMI(X86::ADDri32, 1, X86::ESP, MachineOperand::UseAndDef)
+ MI =BuildMI(X86::ADD32ri, 1, X86::ESP, MachineOperand::UseAndDef)
.addZImm(NumBytes);
MBB.insert(MBBI, MI);
}
Index: llvm/lib/Target/X86/X86InstrInfo.td
diff -u llvm/lib/Target/X86/X86InstrInfo.td:1.56 llvm/lib/Target/X86/X86InstrInfo.td:1.57
--- llvm/lib/Target/X86/X86InstrInfo.td:1.56 Sat Feb 28 20:18:17 2004
+++ llvm/lib/Target/X86/X86InstrInfo.td Sun Feb 29 02:50:03 2004
@@ -189,8 +189,8 @@
// All calls clobber the non-callee saved registers...
let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6] in {
def CALLpcrel32 : I <"call", 0xE8, RawFrm>;
- def CALLr32 : I <"call", 0xFF, MRM2r>;
- def CALLm32 : Im32<"call", 0xFF, MRM2m>;
+ def CALL32r : I <"call", 0xFF, MRM2r>;
+ def CALL32m : Im32<"call", 0xFF, MRM2m>;
}
@@ -198,23 +198,23 @@
// Miscellaneous Instructions...
//
def LEAVE : I<"leave", 0xC9, RawFrm>, Imp<[EBP,ESP],[EBP,ESP]>;
-def POPr32 : I<"pop", 0x58, AddRegFrm>, Imp<[ESP],[ESP]>;
+def POP32r : I<"pop", 0x58, AddRegFrm>, Imp<[ESP],[ESP]>;
let isTwoAddress = 1 in // R32 = bswap R32
- def BSWAPr32 : I<"bswap", 0xC8, AddRegFrm>, TB;
+ def BSWAP32r : I<"bswap", 0xC8, AddRegFrm>, TB;
-def XCHGrr8 : I <"xchg", 0x86, MRMDestReg>; // xchg R8, R8
-def XCHGrr16 : I <"xchg", 0x87, MRMDestReg>, OpSize; // xchg R16, R16
-def XCHGrr32 : I <"xchg", 0x87, MRMDestReg>; // xchg R32, R32
-def XCHGmr8 : Im8 <"xchg", 0x86, MRMDestMem>; // xchg [mem8], R8
-def XCHGmr16 : Im16<"xchg", 0x87, MRMDestMem>, OpSize; // xchg [mem16], R16
-def XCHGmr32 : Im32<"xchg", 0x87, MRMDestMem>; // xchg [mem32], R32
-def XCHGrm8 : Im8 <"xchg", 0x86, MRMSrcMem >; // xchg R8, [mem8]
-def XCHGrm16 : Im16<"xchg", 0x87, MRMSrcMem >, OpSize; // xchg R16, [mem16]
-def XCHGrm32 : Im32<"xchg", 0x87, MRMSrcMem >; // xchg R32, [mem32]
+def XCHG8rr : I <"xchg", 0x86, MRMDestReg>; // xchg R8, R8
+def XCHG16rr : I <"xchg", 0x87, MRMDestReg>, OpSize; // xchg R16, R16
+def XCHG32rr : I <"xchg", 0x87, MRMDestReg>; // xchg R32, R32
+def XCHG8mr : Im8 <"xchg", 0x86, MRMDestMem>; // xchg [mem8], R8
+def XCHG16mr : Im16<"xchg", 0x87, MRMDestMem>, OpSize; // xchg [mem16], R16
+def XCHG32mr : Im32<"xchg", 0x87, MRMDestMem>; // xchg [mem32], R32
+def XCHG8rm : Im8 <"xchg", 0x86, MRMSrcMem >; // xchg R8, [mem8]
+def XCHG16rm : Im16<"xchg", 0x87, MRMSrcMem >, OpSize; // xchg R16, [mem16]
+def XCHG32rm : Im32<"xchg", 0x87, MRMSrcMem >; // xchg R32, [mem32]
-def LEAr16 : Im32<"lea", 0x8D, MRMSrcMem>, OpSize; // R16 = lea [mem]
-def LEAr32 : Im32<"lea", 0x8D, MRMSrcMem>; // R32 = lea [mem]
+def LEA16r : Im32<"lea", 0x8D, MRMSrcMem>, OpSize; // R16 = lea [mem]
+def LEA32r : Im32<"lea", 0x8D, MRMSrcMem>; // R32 = lea [mem]
def REP_MOVSB : I<"rep movsb", 0xA4, RawFrm>, REP,
@@ -234,53 +234,53 @@
//===----------------------------------------------------------------------===//
// Move Instructions...
//
-def MOVrr8 : I <"mov", 0x88, MRMDestReg>, Pattern<(set R8 , R8 )>;
-def MOVrr16 : I <"mov", 0x89, MRMDestReg>, OpSize, Pattern<(set R16, R16)>;
-def MOVrr32 : I <"mov", 0x89, MRMDestReg>, Pattern<(set R32, R32)>;
-def MOVri8 : Ii8 <"mov", 0xB0, AddRegFrm >, Pattern<(set R8 , imm )>;
-def MOVri16 : Ii16 <"mov", 0xB8, AddRegFrm >, OpSize, Pattern<(set R16, imm)>;
-def MOVri32 : Ii32 <"mov", 0xB8, AddRegFrm >, Pattern<(set R32, imm)>;
-def MOVmi8 : Im8i8 <"mov", 0xC6, MRM0m >; // [mem8] = imm8
-def MOVmi16 : Im16i16<"mov", 0xC7, MRM0m >, OpSize; // [mem16] = imm16
-def MOVmi32 : Im32i32<"mov", 0xC7, MRM0m >; // [mem32] = imm32
+def MOV8rr : I <"mov", 0x88, MRMDestReg>, Pattern<(set R8 , R8 )>;
+def MOV16rr : I <"mov", 0x89, MRMDestReg>, OpSize, Pattern<(set R16, R16)>;
+def MOV32rr : I <"mov", 0x89, MRMDestReg>, Pattern<(set R32, R32)>;
+def MOV8ri : Ii8 <"mov", 0xB0, AddRegFrm >, Pattern<(set R8 , imm )>;
+def MOV16ri : Ii16 <"mov", 0xB8, AddRegFrm >, OpSize, Pattern<(set R16, imm)>;
+def MOV32ri : Ii32 <"mov", 0xB8, AddRegFrm >, Pattern<(set R32, imm)>;
+def MOV8mi : Im8i8 <"mov", 0xC6, MRM0m >; // [mem8] = imm8
+def MOV16mi : Im16i16<"mov", 0xC7, MRM0m >, OpSize; // [mem16] = imm16
+def MOV32mi : Im32i32<"mov", 0xC7, MRM0m >; // [mem32] = imm32
-def MOVrm8 : Im8 <"mov", 0x8A, MRMSrcMem>; // R8 = [mem8]
-def MOVrm16 : Im16 <"mov", 0x8B, MRMSrcMem>, OpSize, // R16 = [mem16]
+def MOV8rm : Im8 <"mov", 0x8A, MRMSrcMem>; // R8 = [mem8]
+def MOV16rm : Im16 <"mov", 0x8B, MRMSrcMem>, OpSize, // R16 = [mem16]
Pattern<(set R16, (load (plus R32, (plus (times imm, R32), imm))))>;
-def MOVrm32 : Im32 <"mov", 0x8B, MRMSrcMem>, // R32 = [mem32]
+def MOV32rm : Im32 <"mov", 0x8B, MRMSrcMem>, // R32 = [mem32]
Pattern<(set R32, (load (plus R32, (plus (times imm, R32), imm))))>;
-def MOVmr8 : Im8 <"mov", 0x88, MRMDestMem>; // [mem8] = R8
-def MOVmr16 : Im16 <"mov", 0x89, MRMDestMem>, OpSize; // [mem16] = R16
-def MOVmr32 : Im32 <"mov", 0x89, MRMDestMem>; // [mem32] = R32
+def MOV8mr : Im8 <"mov", 0x88, MRMDestMem>; // [mem8] = R8
+def MOV16mr : Im16 <"mov", 0x89, MRMDestMem>, OpSize; // [mem16] = R16
+def MOV32mr : Im32 <"mov", 0x89, MRMDestMem>; // [mem32] = R32
//===----------------------------------------------------------------------===//
// Fixed-Register Multiplication and Division Instructions...
//
// Extra precision multiplication
-def MULr8 : I <"mul", 0xF6, MRM4r>, Imp<[AL],[AX]>; // AL,AH = AL*R8
-def MULr16 : I <"mul", 0xF7, MRM4r>, Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*R16
-def MULr32 : I <"mul", 0xF7, MRM4r>, Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*R32
-def MULm8 : Im8 <"mul", 0xF6, MRM4m>, Imp<[AL],[AX]>; // AL,AH = AL*[mem8]
-def MULm16 : Im16<"mul", 0xF7, MRM4m>, Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*[mem16]
-def MULm32 : Im32<"mul", 0xF7, MRM4m>, Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*[mem32]
+def MUL8r : I <"mul", 0xF6, MRM4r>, Imp<[AL],[AX]>; // AL,AH = AL*R8
+def MUL16r : I <"mul", 0xF7, MRM4r>, Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*R16
+def MUL32r : I <"mul", 0xF7, MRM4r>, Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*R32
+def MUL8m : Im8 <"mul", 0xF6, MRM4m>, Imp<[AL],[AX]>; // AL,AH = AL*[mem8]
+def MUL16m : Im16<"mul", 0xF7, MRM4m>, Imp<[AX],[AX,DX]>, OpSize; // AX,DX = AX*[mem16]
+def MUL32m : Im32<"mul", 0xF7, MRM4m>, Imp<[EAX],[EAX,EDX]>; // EAX,EDX = EAX*[mem32]
// unsigned division/remainder
-def DIVr8 : I <"div", 0xF6, MRM6r>, Imp<[AX],[AX]>; // AX/r8 = AL,AH
-def DIVr16 : I <"div", 0xF7, MRM6r>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/r16 = AX,DX
-def DIVr32 : I <"div", 0xF7, MRM6r>, Imp<[EAX,EDX],[EAX,EDX]>; // EDX:EAX/r32 = EAX,EDX
-def DIVm8 : Im8 <"div", 0xF6, MRM6m>, Imp<[AX],[AX]>; // AX/[mem8] = AL,AH
-def DIVm16 : Im16<"div", 0xF7, MRM6m>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/[mem16] = AX,DX
-def DIVm32 : Im32<"div", 0xF7, MRM6m>, Imp<[EAX,EDX],[EAX,EDX]>; // EDX:EAX/[mem32] = EAX,EDX
+def DIV8r : I <"div", 0xF6, MRM6r>, Imp<[AX],[AX]>; // AX/r8 = AL,AH
+def DIV16r : I <"div", 0xF7, MRM6r>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/r16 = AX,DX
+def DIV32r : I <"div", 0xF7, MRM6r>, Imp<[EAX,EDX],[EAX,EDX]>; // EDX:EAX/r32 = EAX,EDX
+def DIV8m : Im8 <"div", 0xF6, MRM6m>, Imp<[AX],[AX]>; // AX/[mem8] = AL,AH
+def DIV16m : Im16<"div", 0xF7, MRM6m>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/[mem16] = AX,DX
+def DIV32m : Im32<"div", 0xF7, MRM6m>, Imp<[EAX,EDX],[EAX,EDX]>; // EDX:EAX/[mem32] = EAX,EDX
// signed division/remainder
-def IDIVr8 : I <"idiv",0xF6, MRM7r>, Imp<[AX],[AX]>; // AX/r8 = AL,AH
-def IDIVr16: I <"idiv",0xF7, MRM7r>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/r16 = AX,DX
-def IDIVr32: I <"idiv",0xF7, MRM7r>, Imp<[EAX,EDX],[EAX,EDX]>; // EDX:EAX/r32 = EAX,EDX
-def IDIVm8 : Im8 <"idiv",0xF6, MRM7m>, Imp<[AX],[AX]>; // AX/[mem8] = AL,AH
-def IDIVm16: Im16<"idiv",0xF7, MRM7m>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/[mem16] = AX,DX
-def IDIVm32: Im32<"idiv",0xF7, MRM7m>, Imp<[EAX,EDX],[EAX,EDX]>; // EDX:EAX/[mem32] = EAX,EDX
+def IDIV8r : I <"idiv",0xF6, MRM7r>, Imp<[AX],[AX]>; // AX/r8 = AL,AH
+def IDIV16r: I <"idiv",0xF7, MRM7r>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/r16 = AX,DX
+def IDIV32r: I <"idiv",0xF7, MRM7r>, Imp<[EAX,EDX],[EAX,EDX]>; // EDX:EAX/r32 = EAX,EDX
+def IDIV8m : Im8 <"idiv",0xF6, MRM7m>, Imp<[AX],[AX]>; // AX/[mem8] = AL,AH
+def IDIV16m: Im16<"idiv",0xF7, MRM7m>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/[mem16] = AX,DX
+def IDIV32m: Im32<"idiv",0xF7, MRM7m>, Imp<[EAX,EDX],[EAX,EDX]>; // EDX:EAX/[mem32] = EAX,EDX
// Sign-extenders for division
def CBW : I<"cbw", 0x98, RawFrm >, Imp<[AL],[AH]>; // AX = signext(AL)
@@ -294,251 +294,251 @@
// Conditional moves. These are modelled as X = cmovXX Y, Z. Eventually
// register allocated to cmovXX XY, Z
-def CMOVErr16 : I<"cmove", 0x44, MRMSrcReg>, TB, OpSize; // if ==, R16 = R16
-def CMOVNErr32: I<"cmovne",0x45, MRMSrcReg>, TB; // if !=, R32 = R32
-def CMOVSrr32 : I<"cmovs", 0x48, MRMSrcReg>, TB; // if signed, R32 = R32
+def CMOVE16rr : I<"cmove", 0x44, MRMSrcReg>, TB, OpSize; // if ==, R16 = R16
+def CMOVNE32rr: I<"cmovne",0x45, MRMSrcReg>, TB; // if !=, R32 = R32
+def CMOVS32rr : I<"cmovs", 0x48, MRMSrcReg>, TB; // if signed, R32 = R32
// unary instructions
-def NEGr8 : I <"neg", 0xF6, MRM3r>; // R8 = -R8 = 0-R8
-def NEGr16 : I <"neg", 0xF7, MRM3r>, OpSize; // R16 = -R16 = 0-R16
-def NEGr32 : I <"neg", 0xF7, MRM3r>; // R32 = -R32 = 0-R32
-def NEGm8 : Im8 <"neg", 0xF6, MRM3m>; // [mem8] = -[mem8] = 0-[mem8]
-def NEGm16 : Im16<"neg", 0xF7, MRM3m>, OpSize; // [mem16] = -[mem16] = 0-[mem16]
-def NEGm32 : Im32<"neg", 0xF7, MRM3m>; // [mem32] = -[mem32] = 0-[mem32]
-
-def NOTr8 : I <"not", 0xF6, MRM2r>; // R8 = ~R8 = R8^-1
-def NOTr16 : I <"not", 0xF7, MRM2r>, OpSize; // R16 = ~R16 = R16^-1
-def NOTr32 : I <"not", 0xF7, MRM2r>; // R32 = ~R32 = R32^-1
-def NOTm8 : Im8 <"not", 0xF6, MRM2m>; // [mem8] = ~[mem8] = [mem8^-1]
-def NOTm16 : Im16<"not", 0xF7, MRM2m>, OpSize; // [mem16] = ~[mem16] = [mem16^-1]
-def NOTm32 : Im32<"not", 0xF7, MRM2m>; // [mem32] = ~[mem32] = [mem32^-1]
-
-def INCr8 : I <"inc", 0xFE, MRM0r>; // ++R8
-def INCr16 : I <"inc", 0xFF, MRM0r>, OpSize; // ++R16
-def INCr32 : I <"inc", 0xFF, MRM0r>; // ++R32
-def INCm8 : Im8 <"inc", 0xFE, MRM0m>; // ++R8
-def INCm16 : Im16<"inc", 0xFF, MRM0m>, OpSize; // ++R16
-def INCm32 : Im32<"inc", 0xFF, MRM0m>; // ++R32
-
-def DECr8 : I <"dec", 0xFE, MRM1r>; // --R8
-def DECr16 : I <"dec", 0xFF, MRM1r>, OpSize; // --R16
-def DECr32 : I <"dec", 0xFF, MRM1r>; // --R32
-def DECm8 : Im8 <"dec", 0xFE, MRM1m>; // --[mem8]
-def DECm16 : Im16<"dec", 0xFF, MRM1m>, OpSize; // --[mem16]
-def DECm32 : Im32<"dec", 0xFF, MRM1m>; // --[mem32]
+def NEG8r : I <"neg", 0xF6, MRM3r>; // R8 = -R8 = 0-R8
+def NEG16r : I <"neg", 0xF7, MRM3r>, OpSize; // R16 = -R16 = 0-R16
+def NEG32r : I <"neg", 0xF7, MRM3r>; // R32 = -R32 = 0-R32
+def NEG8m : Im8 <"neg", 0xF6, MRM3m>; // [mem8] = -[mem8] = 0-[mem8]
+def NEG16m : Im16<"neg", 0xF7, MRM3m>, OpSize; // [mem16] = -[mem16] = 0-[mem16]
+def NEG32m : Im32<"neg", 0xF7, MRM3m>; // [mem32] = -[mem32] = 0-[mem32]
+
+def NOT8r : I <"not", 0xF6, MRM2r>; // R8 = ~R8 = R8^-1
+def NOT16r : I <"not", 0xF7, MRM2r>, OpSize; // R16 = ~R16 = R16^-1
+def NOT32r : I <"not", 0xF7, MRM2r>; // R32 = ~R32 = R32^-1
+def NOT8m : Im8 <"not", 0xF6, MRM2m>; // [mem8] = ~[mem8] = [mem8^-1]
+def NOT16m : Im16<"not", 0xF7, MRM2m>, OpSize; // [mem16] = ~[mem16] = [mem16^-1]
+def NOT32m : Im32<"not", 0xF7, MRM2m>; // [mem32] = ~[mem32] = [mem32^-1]
+
+def INC8r : I <"inc", 0xFE, MRM0r>; // ++R8
+def INC16r : I <"inc", 0xFF, MRM0r>, OpSize; // ++R16
+def INC32r : I <"inc", 0xFF, MRM0r>; // ++R32
+def INC8m : Im8 <"inc", 0xFE, MRM0m>; // ++R8
+def INC16m : Im16<"inc", 0xFF, MRM0m>, OpSize; // ++R16
+def INC32m : Im32<"inc", 0xFF, MRM0m>; // ++R32
+
+def DEC8r : I <"dec", 0xFE, MRM1r>; // --R8
+def DEC16r : I <"dec", 0xFF, MRM1r>, OpSize; // --R16
+def DEC32r : I <"dec", 0xFF, MRM1r>; // --R32
+def DEC8m : Im8 <"dec", 0xFE, MRM1m>; // --[mem8]
+def DEC16m : Im16<"dec", 0xFF, MRM1m>, OpSize; // --[mem16]
+def DEC32m : Im32<"dec", 0xFF, MRM1m>; // --[mem32]
// Logical operators...
-def ANDrr8 : I <"and", 0x20, MRMDestReg>, Pattern<(set R8 , (and R8 , R8 ))>;
-def ANDrr16 : I <"and", 0x21, MRMDestReg>, OpSize, Pattern<(set R16, (and R16, R16))>;
-def ANDrr32 : I <"and", 0x21, MRMDestReg>, Pattern<(set R32, (and R32, R32))>;
-def ANDmr8 : Im8 <"and", 0x20, MRMDestMem>; // [mem8] &= R8
-def ANDmr16 : Im16 <"and", 0x21, MRMDestMem>, OpSize; // [mem16] &= R16
-def ANDmr32 : Im32 <"and", 0x21, MRMDestMem>; // [mem32] &= R32
-def ANDrm8 : Im8 <"and", 0x22, MRMSrcMem >; // R8 &= [mem8]
-def ANDrm16 : Im16 <"and", 0x23, MRMSrcMem >, OpSize; // R16 &= [mem16]
-def ANDrm32 : Im32 <"and", 0x23, MRMSrcMem >; // R32 &= [mem32]
-
-def ANDri8 : Ii8 <"and", 0x80, MRM4r >, Pattern<(set R8 , (and R8 , imm))>;
-def ANDri16 : Ii16 <"and", 0x81, MRM4r >, OpSize, Pattern<(set R16, (and R16, imm))>;
-def ANDri32 : Ii32 <"and", 0x81, MRM4r >, Pattern<(set R32, (and R32, imm))>;
-def ANDmi8 : Im8i8 <"and", 0x80, MRM4m >; // [mem8] &= imm8
-def ANDmi16 : Im16i16 <"and", 0x81, MRM4m >, OpSize; // [mem16] &= imm16
-def ANDmi32 : Im32i32 <"and", 0x81, MRM4m >; // [mem32] &= imm32
-
-def ANDri16b : Ii8 <"and", 0x83, MRM4r >, OpSize; // R16 &= imm8
-def ANDri32b : Ii8 <"and", 0x83, MRM4r >; // R32 &= imm8
-def ANDmi16b : Im16i8<"and", 0x83, MRM4m >, OpSize; // [mem16] &= imm8
-def ANDmi32b : Im32i8<"and", 0x83, MRM4m >; // [mem32] &= imm8
-
-
-def ORrr8 : I <"or" , 0x08, MRMDestReg>, Pattern<(set R8 , (or R8 , R8 ))>;
-def ORrr16 : I <"or" , 0x09, MRMDestReg>, OpSize, Pattern<(set R16, (or R16, R16))>;
-def ORrr32 : I <"or" , 0x09, MRMDestReg>, Pattern<(set R32, (or R32, R32))>;
-def ORmr8 : Im8 <"or" , 0x08, MRMDestMem>; // [mem8] |= R8
-def ORmr16 : Im16 <"or" , 0x09, MRMDestMem>, OpSize; // [mem16] |= R16
-def ORmr32 : Im32 <"or" , 0x09, MRMDestMem>; // [mem32] |= R32
-def ORrm8 : Im8 <"or" , 0x0A, MRMSrcMem >; // R8 |= [mem8]
-def ORrm16 : Im16 <"or" , 0x0B, MRMSrcMem >, OpSize; // R16 |= [mem16]
-def ORrm32 : Im32 <"or" , 0x0B, MRMSrcMem >; // R32 |= [mem32]
-
-def ORri8 : Ii8 <"or" , 0x80, MRM1r >, Pattern<(set R8 , (or R8 , imm))>;
-def ORri16 : Ii16 <"or" , 0x81, MRM1r >, OpSize, Pattern<(set R16, (or R16, imm))>;
-def ORri32 : Ii32 <"or" , 0x81, MRM1r >, Pattern<(set R32, (or R32, imm))>;
-def ORmi8 : Im8i8 <"or" , 0x80, MRM1m >; // [mem8] |= imm8
-def ORmi16 : Im16i16 <"or" , 0x81, MRM1m >, OpSize; // [mem16] |= imm16
-def ORmi32 : Im32i32 <"or" , 0x81, MRM1m >; // [mem32] |= imm32
-
-def ORri16b : Ii8 <"or" , 0x83, MRM1r >, OpSize; // R16 |= imm8
-def ORri32b : Ii8 <"or" , 0x83, MRM1r >; // R32 |= imm8
-def ORmi16b : Im16i8<"or" , 0x83, MRM1m >, OpSize; // [mem16] |= imm8
-def ORmi32b : Im32i8<"or" , 0x83, MRM1m >; // [mem32] |= imm8
-
-
-def XORrr8 : I <"xor", 0x30, MRMDestReg>, Pattern<(set R8 , (xor R8 , R8 ))>;
-def XORrr16 : I <"xor", 0x31, MRMDestReg>, OpSize, Pattern<(set R16, (xor R16, R16))>;
-def XORrr32 : I <"xor", 0x31, MRMDestReg>, Pattern<(set R32, (xor R32, R32))>;
-def XORmr8 : Im8 <"xor", 0x30, MRMDestMem>; // [mem8] ^= R8
-def XORmr16 : Im16 <"xor", 0x31, MRMDestMem>, OpSize; // [mem16] ^= R16
-def XORmr32 : Im32 <"xor", 0x31, MRMDestMem>; // [mem32] ^= R32
-def XORrm8 : Im8 <"xor", 0x32, MRMSrcMem >; // R8 ^= [mem8]
-def XORrm16 : Im16 <"xor", 0x33, MRMSrcMem >, OpSize; // R16 ^= [mem16]
-def XORrm32 : Im32 <"xor", 0x33, MRMSrcMem >; // R32 ^= [mem32]
-
-def XORri8 : Ii8 <"xor", 0x80, MRM6r >, Pattern<(set R8 , (xor R8 , imm))>;
-def XORri16 : Ii16 <"xor", 0x81, MRM6r >, OpSize, Pattern<(set R16, (xor R16, imm))>;
-def XORri32 : Ii32 <"xor", 0x81, MRM6r >, Pattern<(set R32, (xor R32, imm))>;
-def XORmi8 : Im8i8 <"xor", 0x80, MRM6m >; // [mem8] ^= R8
-def XORmi16 : Im16i16 <"xor", 0x81, MRM6m >, OpSize; // [mem16] ^= R16
-def XORmi32 : Im32i32 <"xor", 0x81, MRM6m >; // [mem32] ^= R32
-
-def XORri16b : Ii8 <"xor", 0x83, MRM6r >, OpSize; // R16 ^= imm8
-def XORri32b : Ii8 <"xor", 0x83, MRM6r >; // R32 ^= imm8
-def XORmi16b : Im16i8<"xor", 0x83, MRM6m >, OpSize; // [mem16] ^= imm8
-def XORmi32b : Im32i8<"xor", 0x83, MRM6m >; // [mem32] ^= imm8
+def AND8rr : I <"and", 0x20, MRMDestReg>, Pattern<(set R8 , (and R8 , R8 ))>;
+def AND16rr : I <"and", 0x21, MRMDestReg>, OpSize, Pattern<(set R16, (and R16, R16))>;
+def AND32rr : I <"and", 0x21, MRMDestReg>, Pattern<(set R32, (and R32, R32))>;
+def AND8mr : Im8 <"and", 0x20, MRMDestMem>; // [mem8] &= R8
+def AND16mr : Im16 <"and", 0x21, MRMDestMem>, OpSize; // [mem16] &= R16
+def AND32mr : Im32 <"and", 0x21, MRMDestMem>; // [mem32] &= R32
+def AND8rm : Im8 <"and", 0x22, MRMSrcMem >; // R8 &= [mem8]
+def AND16rm : Im16 <"and", 0x23, MRMSrcMem >, OpSize; // R16 &= [mem16]
+def AND32rm : Im32 <"and", 0x23, MRMSrcMem >; // R32 &= [mem32]
+
+def AND8ri : Ii8 <"and", 0x80, MRM4r >, Pattern<(set R8 , (and R8 , imm))>;
+def AND16ri : Ii16 <"and", 0x81, MRM4r >, OpSize, Pattern<(set R16, (and R16, imm))>;
+def AND32ri : Ii32 <"and", 0x81, MRM4r >, Pattern<(set R32, (and R32, imm))>;
+def AND8mi : Im8i8 <"and", 0x80, MRM4m >; // [mem8] &= imm8
+def AND16mi : Im16i16 <"and", 0x81, MRM4m >, OpSize; // [mem16] &= imm16
+def AND32mi : Im32i32 <"and", 0x81, MRM4m >; // [mem32] &= imm32
+
+def AND16ri8 : Ii8 <"and", 0x83, MRM4r >, OpSize; // R16 &= imm8
+def AND32ri8 : Ii8 <"and", 0x83, MRM4r >; // R32 &= imm8
+def AND16mi8 : Im16i8<"and", 0x83, MRM4m >, OpSize; // [mem16] &= imm8
+def AND32mi8 : Im32i8<"and", 0x83, MRM4m >; // [mem32] &= imm8
+
+
+def OR8rr : I <"or" , 0x08, MRMDestReg>, Pattern<(set R8 , (or R8 , R8 ))>;
+def OR16rr : I <"or" , 0x09, MRMDestReg>, OpSize, Pattern<(set R16, (or R16, R16))>;
+def OR32rr : I <"or" , 0x09, MRMDestReg>, Pattern<(set R32, (or R32, R32))>;
+def OR8mr : Im8 <"or" , 0x08, MRMDestMem>; // [mem8] |= R8
+def OR16mr : Im16 <"or" , 0x09, MRMDestMem>, OpSize; // [mem16] |= R16
+def OR32mr : Im32 <"or" , 0x09, MRMDestMem>; // [mem32] |= R32
+def OR8rm : Im8 <"or" , 0x0A, MRMSrcMem >; // R8 |= [mem8]
+def OR16rm : Im16 <"or" , 0x0B, MRMSrcMem >, OpSize; // R16 |= [mem16]
+def OR32rm : Im32 <"or" , 0x0B, MRMSrcMem >; // R32 |= [mem32]
+
+def OR8ri : Ii8 <"or" , 0x80, MRM1r >, Pattern<(set R8 , (or R8 , imm))>;
+def OR16ri : Ii16 <"or" , 0x81, MRM1r >, OpSize, Pattern<(set R16, (or R16, imm))>;
+def OR32ri : Ii32 <"or" , 0x81, MRM1r >, Pattern<(set R32, (or R32, imm))>;
+def OR8mi : Im8i8 <"or" , 0x80, MRM1m >; // [mem8] |= imm8
+def OR16mi : Im16i16 <"or" , 0x81, MRM1m >, OpSize; // [mem16] |= imm16
+def OR32mi : Im32i32 <"or" , 0x81, MRM1m >; // [mem32] |= imm32
+
+def OR16ri8 : Ii8 <"or" , 0x83, MRM1r >, OpSize; // R16 |= imm8
+def OR32ri8 : Ii8 <"or" , 0x83, MRM1r >; // R32 |= imm8
+def OR16mi8 : Im16i8<"or" , 0x83, MRM1m >, OpSize; // [mem16] |= imm8
+def OR32mi8 : Im32i8<"or" , 0x83, MRM1m >; // [mem32] |= imm8
+
+
+def XOR8rr : I <"xor", 0x30, MRMDestReg>, Pattern<(set R8 , (xor R8 , R8 ))>;
+def XOR16rr : I <"xor", 0x31, MRMDestReg>, OpSize, Pattern<(set R16, (xor R16, R16))>;
+def XOR32rr : I <"xor", 0x31, MRMDestReg>, Pattern<(set R32, (xor R32, R32))>;
+def XOR8mr : Im8 <"xor", 0x30, MRMDestMem>; // [mem8] ^= R8
+def XOR16mr : Im16 <"xor", 0x31, MRMDestMem>, OpSize; // [mem16] ^= R16
+def XOR32mr : Im32 <"xor", 0x31, MRMDestMem>; // [mem32] ^= R32
+def XOR8rm : Im8 <"xor", 0x32, MRMSrcMem >; // R8 ^= [mem8]
+def XOR16rm : Im16 <"xor", 0x33, MRMSrcMem >, OpSize; // R16 ^= [mem16]
+def XOR32rm : Im32 <"xor", 0x33, MRMSrcMem >; // R32 ^= [mem32]
+
+def XOR8ri : Ii8 <"xor", 0x80, MRM6r >, Pattern<(set R8 , (xor R8 , imm))>;
+def XOR16ri : Ii16 <"xor", 0x81, MRM6r >, OpSize, Pattern<(set R16, (xor R16, imm))>;
+def XOR32ri : Ii32 <"xor", 0x81, MRM6r >, Pattern<(set R32, (xor R32, imm))>;
+def XOR8mi : Im8i8 <"xor", 0x80, MRM6m >; // [mem8] ^= R8
+def XOR16mi : Im16i16 <"xor", 0x81, MRM6m >, OpSize; // [mem16] ^= R16
+def XOR32mi : Im32i32 <"xor", 0x81, MRM6m >; // [mem32] ^= R32
+
+def XOR16ri8 : Ii8 <"xor", 0x83, MRM6r >, OpSize; // R16 ^= imm8
+def XOR32ri8 : Ii8 <"xor", 0x83, MRM6r >; // R32 ^= imm8
+def XOR16mi8 : Im16i8<"xor", 0x83, MRM6m >, OpSize; // [mem16] ^= imm8
+def XOR32mi8 : Im32i8<"xor", 0x83, MRM6m >; // [mem32] ^= imm8
// Shift instructions
-def SHLrCL8 : I <"shl", 0xD2, MRM4r > , UsesCL; // R8 <<= cl
-def SHLrCL16 : I <"shl", 0xD3, MRM4r >, OpSize, UsesCL; // R16 <<= cl
-def SHLrCL32 : I <"shl", 0xD3, MRM4r > , UsesCL; // R32 <<= cl
-def SHLmCL8 : Im8 <"shl", 0xD2, MRM4m > , UsesCL; // [mem8] <<= cl
-def SHLmCL16 : Im16 <"shl", 0xD3, MRM4m >, OpSize, UsesCL; // [mem16] <<= cl
-def SHLmCL32 : Im32 <"shl", 0xD3, MRM4m > , UsesCL; // [mem32] <<= cl
-
-def SHLri8 : Ii8 <"shl", 0xC0, MRM4r >; // R8 <<= imm8
-def SHLri16 : Ii8 <"shl", 0xC1, MRM4r >, OpSize; // R16 <<= imm8
-def SHLri32 : Ii8 <"shl", 0xC1, MRM4r >; // R32 <<= imm8
-def SHLmi8 : Im8i8 <"shl", 0xC0, MRM4m >; // [mem8] <<= imm8
-def SHLmi16 : Im16i8<"shl", 0xC1, MRM4m >, OpSize; // [mem16] <<= imm8
-def SHLmi32 : Im32i8<"shl", 0xC1, MRM4m >; // [mem32] <<= imm8
-
-def SHRrCL8 : I <"shr", 0xD2, MRM5r > , UsesCL; // R8 >>= cl
-def SHRrCL16 : I <"shr", 0xD3, MRM5r >, OpSize, UsesCL; // R16 >>= cl
-def SHRrCL32 : I <"shr", 0xD3, MRM5r > , UsesCL; // R32 >>= cl
-def SHRmCL8 : Im8 <"shr", 0xD2, MRM5m > , UsesCL; // [mem8] >>= cl
-def SHRmCL16 : Im16 <"shr", 0xD3, MRM5m >, OpSize, UsesCL; // [mem16] >>= cl
-def SHRmCL32 : Im32 <"shr", 0xD3, MRM5m > , UsesCL; // [mem32] >>= cl
-
-def SHRri8 : Ii8 <"shr", 0xC0, MRM5r >; // R8 >>= imm8
-def SHRri16 : Ii8 <"shr", 0xC1, MRM5r >, OpSize; // R16 >>= imm8
-def SHRri32 : Ii8 <"shr", 0xC1, MRM5r >; // R32 >>= imm8
-def SHRmi8 : Im8i8 <"shr", 0xC0, MRM5m >; // [mem8] >>= imm8
-def SHRmi16 : Im16i8<"shr", 0xC1, MRM5m >, OpSize; // [mem16] >>= imm8
-def SHRmi32 : Im32i8<"shr", 0xC1, MRM5m >; // [mem32] >>= imm8
-
-def SARrCL8 : I <"sar", 0xD2, MRM7r > , UsesCL; // R8 >>>= cl
-def SARrCL16 : I <"sar", 0xD3, MRM7r >, OpSize, UsesCL; // R16 >>>= cl
-def SARrCL32 : I <"sar", 0xD3, MRM7r > , UsesCL; // R32 >>>= cl
-def SARmCL8 : Im8 <"sar", 0xD2, MRM7m > , UsesCL; // [mem8] >>>= cl
-def SARmCL16 : Im16 <"sar", 0xD3, MRM7m >, OpSize, UsesCL; // [mem16] >>>= cl
-def SARmCL32 : Im32 <"sar", 0xD3, MRM7m > , UsesCL; // [mem32] >>>= cl
-
-def SARri8 : Ii8 <"sar", 0xC0, MRM7r >; // R8 >>>= imm8
-def SARri16 : Ii8 <"sar", 0xC1, MRM7r >, OpSize; // R16 >>>= imm8
-def SARri32 : Ii8 <"sar", 0xC1, MRM7r >; // R32 >>>= imm8
-def SARmi8 : Im8i8 <"sar", 0xC0, MRM7m >; // [mem8] >>>= imm8
-def SARmi16 : Im16i8<"sar", 0xC1, MRM7m >, OpSize; // [mem16] >>>= imm8
-def SARmi32 : Im32i8<"sar", 0xC1, MRM7m >; // [mem32] >>>= imm8
-
-def SHLDrrCL32 : I <"shld", 0xA5, MRMDestReg>, TB, UsesCL; // R32 <<= R32,R32 cl
-def SHLDmrCL32 : I <"shld", 0xA5, MRMDestMem>, TB, UsesCL; // [mem32] <<= [mem32],R32 cl
-def SHLDrr32i8 : Ii8 <"shld", 0xA4, MRMDestReg>, TB; // R32 <<= R32,R32 imm8
-def SHLDmr32i8 : Ii8 <"shld", 0xA4, MRMDestMem>, TB; // [mem32] <<= [mem32],R32 imm8
-
-def SHRDrrCL32 : I <"shrd", 0xAD, MRMDestReg>, TB, UsesCL; // R32 >>= R32,R32 cl
-def SHRDmrCL32 : I <"shrd", 0xAD, MRMDestMem>, TB, UsesCL; // [mem32] >>= [mem32],R32 cl
-def SHRDrr32i8 : Ii8 <"shrd", 0xAC, MRMDestReg>, TB; // R32 >>= R32,R32 imm8
-def SHRDmr32i8 : Ii8 <"shrd", 0xAC, MRMDestMem>, TB; // [mem32] >>= [mem32],R32 imm8
+def SHL8rCL : I <"shl", 0xD2, MRM4r > , UsesCL; // R8 <<= cl
+def SHL16rCL : I <"shl", 0xD3, MRM4r >, OpSize, UsesCL; // R16 <<= cl
+def SHL32rCL : I <"shl", 0xD3, MRM4r > , UsesCL; // R32 <<= cl
+def SHL8mCL : Im8 <"shl", 0xD2, MRM4m > , UsesCL; // [mem8] <<= cl
+def SHL16mCL : Im16 <"shl", 0xD3, MRM4m >, OpSize, UsesCL; // [mem16] <<= cl
+def SHL32mCL : Im32 <"shl", 0xD3, MRM4m > , UsesCL; // [mem32] <<= cl
+
+def SHL8ri : Ii8 <"shl", 0xC0, MRM4r >; // R8 <<= imm8
+def SHL16ri : Ii8 <"shl", 0xC1, MRM4r >, OpSize; // R16 <<= imm8
+def SHL32ri : Ii8 <"shl", 0xC1, MRM4r >; // R32 <<= imm8
+def SHL8mi : Im8i8 <"shl", 0xC0, MRM4m >; // [mem8] <<= imm8
+def SHL16mi : Im16i8<"shl", 0xC1, MRM4m >, OpSize; // [mem16] <<= imm8
+def SHL32mi : Im32i8<"shl", 0xC1, MRM4m >; // [mem32] <<= imm8
+
+def SHR8rCL : I <"shr", 0xD2, MRM5r > , UsesCL; // R8 >>= cl
+def SHR16rCL : I <"shr", 0xD3, MRM5r >, OpSize, UsesCL; // R16 >>= cl
+def SHR32rCL : I <"shr", 0xD3, MRM5r > , UsesCL; // R32 >>= cl
+def SHR8mCL : Im8 <"shr", 0xD2, MRM5m > , UsesCL; // [mem8] >>= cl
+def SHR16mCL : Im16 <"shr", 0xD3, MRM5m >, OpSize, UsesCL; // [mem16] >>= cl
+def SHR32mCL : Im32 <"shr", 0xD3, MRM5m > , UsesCL; // [mem32] >>= cl
+
+def SHR8ri : Ii8 <"shr", 0xC0, MRM5r >; // R8 >>= imm8
+def SHR16ri : Ii8 <"shr", 0xC1, MRM5r >, OpSize; // R16 >>= imm8
+def SHR32ri : Ii8 <"shr", 0xC1, MRM5r >; // R32 >>= imm8
+def SHR8mi : Im8i8 <"shr", 0xC0, MRM5m >; // [mem8] >>= imm8
+def SHR16mi : Im16i8<"shr", 0xC1, MRM5m >, OpSize; // [mem16] >>= imm8
+def SHR32mi : Im32i8<"shr", 0xC1, MRM5m >; // [mem32] >>= imm8
+
+def SAR8rCL : I <"sar", 0xD2, MRM7r > , UsesCL; // R8 >>>= cl
+def SAR16rCL : I <"sar", 0xD3, MRM7r >, OpSize, UsesCL; // R16 >>>= cl
+def SAR32rCL : I <"sar", 0xD3, MRM7r > , UsesCL; // R32 >>>= cl
+def SAR8mCL : Im8 <"sar", 0xD2, MRM7m > , UsesCL; // [mem8] >>>= cl
+def SAR16mCL : Im16 <"sar", 0xD3, MRM7m >, OpSize, UsesCL; // [mem16] >>>= cl
+def SAR32mCL : Im32 <"sar", 0xD3, MRM7m > , UsesCL; // [mem32] >>>= cl
+
+def SAR8ri : Ii8 <"sar", 0xC0, MRM7r >; // R8 >>>= imm8
+def SAR16ri : Ii8 <"sar", 0xC1, MRM7r >, OpSize; // R16 >>>= imm8
+def SAR32ri : Ii8 <"sar", 0xC1, MRM7r >; // R32 >>>= imm8
+def SAR8mi : Im8i8 <"sar", 0xC0, MRM7m >; // [mem8] >>>= imm8
+def SAR16mi : Im16i8<"sar", 0xC1, MRM7m >, OpSize; // [mem16] >>>= imm8
+def SAR32mi : Im32i8<"sar", 0xC1, MRM7m >; // [mem32] >>>= imm8
+
+def SHLD32rrCL : I <"shld", 0xA5, MRMDestReg>, TB, UsesCL; // R32 <<= R32,R32 cl
+def SHLD32mrCL : I <"shld", 0xA5, MRMDestMem>, TB, UsesCL; // [mem32] <<= [mem32],R32 cl
+def SHLD32rri8 : Ii8 <"shld", 0xA4, MRMDestReg>, TB; // R32 <<= R32,R32 imm8
+def SHLD32mri8 : Ii8 <"shld", 0xA4, MRMDestMem>, TB; // [mem32] <<= [mem32],R32 imm8
+
+def SHRD32rrCL : I <"shrd", 0xAD, MRMDestReg>, TB, UsesCL; // R32 >>= R32,R32 cl
+def SHRD32mrCL : I <"shrd", 0xAD, MRMDestMem>, TB, UsesCL; // [mem32] >>= [mem32],R32 cl
+def SHRD32rri8 : Ii8 <"shrd", 0xAC, MRMDestReg>, TB; // R32 >>= R32,R32 imm8
+def SHRD32mri8 : Ii8 <"shrd", 0xAC, MRMDestMem>, TB; // [mem32] >>= [mem32],R32 imm8
// Arithmetic...
-def ADDrr8 : I <"add", 0x00, MRMDestReg>, Pattern<(set R8 , (plus R8 , R8 ))>;
-def ADDrr16 : I <"add", 0x01, MRMDestReg>, OpSize, Pattern<(set R16, (plus R16, R16))>;
-def ADDrr32 : I <"add", 0x01, MRMDestReg>, Pattern<(set R32, (plus R32, R32))>;
-def ADDmr8 : Im8 <"add", 0x00, MRMDestMem>; // [mem8] += R8
-def ADDmr16 : Im16 <"add", 0x01, MRMDestMem>, OpSize; // [mem16] += R16
-def ADDmr32 : Im32 <"add", 0x01, MRMDestMem>; // [mem32] += R32
-def ADDrm8 : Im8 <"add", 0x02, MRMSrcMem >; // R8 += [mem8]
-def ADDrm16 : Im16 <"add", 0x03, MRMSrcMem >, OpSize; // R16 += [mem16]
-def ADDrm32 : Im32 <"add", 0x03, MRMSrcMem >; // R32 += [mem32]
-
-def ADDri8 : Ii8 <"add", 0x80, MRM0r >, Pattern<(set R8 , (plus R8 , imm))>;
-def ADDri16 : Ii16 <"add", 0x81, MRM0r >, OpSize, Pattern<(set R16, (plus R16, imm))>;
-def ADDri32 : Ii32 <"add", 0x81, MRM0r >, Pattern<(set R32, (plus R32, imm))>;
-def ADDmi8 : Im8i8 <"add", 0x80, MRM0m >; // [mem8] += I8
-def ADDmi16 : Im16i16 <"add", 0x81, MRM0m >, OpSize; // [mem16] += I16
-def ADDmi32 : Im32i32 <"add", 0x81, MRM0m >; // [mem32] += I32
-
-def ADDri16b : Ii8 <"add", 0x83, MRM0r >, OpSize; // ADDri with sign extended 8 bit imm
-def ADDri32b : Ii8 <"add", 0x83, MRM0r >;
-def ADDmi16b : Im16i8<"add", 0x83, MRM0m >, OpSize; // [mem16] += I8
-def ADDmi32b : Im32i8<"add", 0x83, MRM0m >; // [mem32] += I8
-
-def ADCrr32 : I <"adc", 0x11, MRMDestReg>; // R32 += R32+Carry
-def ADCrm32 : Im32 <"adc", 0x11, MRMSrcMem >; // R32 += [mem32]+Carry
-def ADCmr32 : Im32 <"adc", 0x13, MRMDestMem>; // [mem32] += R32+Carry
-
-
-def SUBrr8 : I <"sub", 0x28, MRMDestReg>, Pattern<(set R8 , (minus R8 , R8 ))>;
-def SUBrr16 : I <"sub", 0x29, MRMDestReg>, OpSize, Pattern<(set R16, (minus R16, R16))>;
-def SUBrr32 : I <"sub", 0x29, MRMDestReg>, Pattern<(set R32, (minus R32, R32))>;
-def SUBmr8 : Im8 <"sub", 0x28, MRMDestMem>; // [mem8] -= R8
-def SUBmr16 : Im16 <"sub", 0x29, MRMDestMem>, OpSize; // [mem16] -= R16
-def SUBmr32 : Im32 <"sub", 0x29, MRMDestMem>; // [mem32] -= R32
-def SUBrm8 : Im8 <"sub", 0x2A, MRMSrcMem >; // R8 -= [mem8]
-def SUBrm16 : Im16 <"sub", 0x2B, MRMSrcMem >, OpSize; // R16 -= [mem16]
-def SUBrm32 : Im32 <"sub", 0x2B, MRMSrcMem >; // R32 -= [mem32]
-
-def SUBri8 : Ii8 <"sub", 0x80, MRM5r >, Pattern<(set R8 , (minus R8 , imm))>;
-def SUBri16 : Ii16 <"sub", 0x81, MRM5r >, OpSize, Pattern<(set R16, (minus R16, imm))>;
-def SUBri32 : Ii32 <"sub", 0x81, MRM5r >, Pattern<(set R32, (minus R32, imm))>;
-def SUBmi8 : Im8i8 <"sub", 0x80, MRM5m >; // [mem8] -= I8
-def SUBmi16 : Im16i16 <"sub", 0x81, MRM5m >, OpSize; // [mem16] -= I16
-def SUBmi32 : Im32i32 <"sub", 0x81, MRM5m >; // [mem32] -= I32
-
-def SUBri16b : Ii8 <"sub", 0x83, MRM5r >, OpSize;
-def SUBri32b : Ii8 <"sub", 0x83, MRM5r >;
-def SUBmi16b : Im16i8<"sub", 0x83, MRM5m >, OpSize; // [mem16] -= I8
-def SUBmi32b : Im32i8<"sub", 0x83, MRM5m >; // [mem32] -= I8
-
-def SBBrr32 : I <"sbb", 0x19, MRMDestReg>; // R32 -= R32+Borrow
-def SBBrm32 : Im32 <"sbb", 0x19, MRMSrcMem >; // R32 -= [mem32]+Borrow
-def SBBmr32 : Im32 <"sbb", 0x1B, MRMDestMem>; // [mem32] -= R32+Borrow
-
-def IMULrr16 : I <"imul", 0xAF, MRMSrcReg>, TB, OpSize, Pattern<(set R16, (times R16, R16))>;
-def IMULrr32 : I <"imul", 0xAF, MRMSrcReg>, TB , Pattern<(set R32, (times R32, R32))>;
-def IMULrm16 : Im16 <"imul", 0xAF, MRMSrcMem>, TB, OpSize;
-def IMULrm32 : Im32 <"imul", 0xAF, MRMSrcMem>, TB ;
+def ADD8rr : I <"add", 0x00, MRMDestReg>, Pattern<(set R8 , (plus R8 , R8 ))>;
+def ADD16rr : I <"add", 0x01, MRMDestReg>, OpSize, Pattern<(set R16, (plus R16, R16))>;
+def ADD32rr : I <"add", 0x01, MRMDestReg>, Pattern<(set R32, (plus R32, R32))>;
+def ADD8mr : Im8 <"add", 0x00, MRMDestMem>; // [mem8] += R8
+def ADD16mr : Im16 <"add", 0x01, MRMDestMem>, OpSize; // [mem16] += R16
+def ADD32mr : Im32 <"add", 0x01, MRMDestMem>; // [mem32] += R32
+def ADD8rm : Im8 <"add", 0x02, MRMSrcMem >; // R8 += [mem8]
+def ADD16rm : Im16 <"add", 0x03, MRMSrcMem >, OpSize; // R16 += [mem16]
+def ADD32rm : Im32 <"add", 0x03, MRMSrcMem >; // R32 += [mem32]
+
+def ADD8ri : Ii8 <"add", 0x80, MRM0r >, Pattern<(set R8 , (plus R8 , imm))>;
+def ADD16ri : Ii16 <"add", 0x81, MRM0r >, OpSize, Pattern<(set R16, (plus R16, imm))>;
+def ADD32ri : Ii32 <"add", 0x81, MRM0r >, Pattern<(set R32, (plus R32, imm))>;
+def ADD8mi : Im8i8 <"add", 0x80, MRM0m >; // [mem8] += I8
+def ADD16mi : Im16i16 <"add", 0x81, MRM0m >, OpSize; // [mem16] += I16
+def ADD32mi : Im32i32 <"add", 0x81, MRM0m >; // [mem32] += I32
+
+def ADD16ri8 : Ii8 <"add", 0x83, MRM0r >, OpSize; // ADDri with sign extended 8 bit imm
+def ADD32ri8 : Ii8 <"add", 0x83, MRM0r >;
+def ADD16mi8 : Im16i8<"add", 0x83, MRM0m >, OpSize; // [mem16] += I8
+def ADD32mi8 : Im32i8<"add", 0x83, MRM0m >; // [mem32] += I8
+
+def ADC32rr : I <"adc", 0x11, MRMDestReg>; // R32 += R32+Carry
+def ADC32rm : Im32 <"adc", 0x11, MRMSrcMem >; // R32 += [mem32]+Carry
+def ADC32mr : Im32 <"adc", 0x13, MRMDestMem>; // [mem32] += R32+Carry
+
+
+def SUB8rr : I <"sub", 0x28, MRMDestReg>, Pattern<(set R8 , (minus R8 , R8 ))>;
+def SUB16rr : I <"sub", 0x29, MRMDestReg>, OpSize, Pattern<(set R16, (minus R16, R16))>;
+def SUB32rr : I <"sub", 0x29, MRMDestReg>, Pattern<(set R32, (minus R32, R32))>;
+def SUB8mr : Im8 <"sub", 0x28, MRMDestMem>; // [mem8] -= R8
+def SUB16mr : Im16 <"sub", 0x29, MRMDestMem>, OpSize; // [mem16] -= R16
+def SUB32mr : Im32 <"sub", 0x29, MRMDestMem>; // [mem32] -= R32
+def SUB8rm : Im8 <"sub", 0x2A, MRMSrcMem >; // R8 -= [mem8]
+def SUB16rm : Im16 <"sub", 0x2B, MRMSrcMem >, OpSize; // R16 -= [mem16]
+def SUB32rm : Im32 <"sub", 0x2B, MRMSrcMem >; // R32 -= [mem32]
+
+def SUB8ri : Ii8 <"sub", 0x80, MRM5r >, Pattern<(set R8 , (minus R8 , imm))>;
+def SUB16ri : Ii16 <"sub", 0x81, MRM5r >, OpSize, Pattern<(set R16, (minus R16, imm))>;
+def SUB32ri : Ii32 <"sub", 0x81, MRM5r >, Pattern<(set R32, (minus R32, imm))>;
+def SUB8mi : Im8i8 <"sub", 0x80, MRM5m >; // [mem8] -= I8
+def SUB16mi : Im16i16 <"sub", 0x81, MRM5m >, OpSize; // [mem16] -= I16
+def SUB32mi : Im32i32 <"sub", 0x81, MRM5m >; // [mem32] -= I32
+
+def SUB16ri8 : Ii8 <"sub", 0x83, MRM5r >, OpSize;
+def SUB32ri8 : Ii8 <"sub", 0x83, MRM5r >;
+def SUB16mi8 : Im16i8<"sub", 0x83, MRM5m >, OpSize; // [mem16] -= I8
+def SUB32mi8 : Im32i8<"sub", 0x83, MRM5m >; // [mem32] -= I8
+
+def SBB32rr : I <"sbb", 0x19, MRMDestReg>; // R32 -= R32+Borrow
+def SBB32rm : Im32 <"sbb", 0x19, MRMSrcMem >; // R32 -= [mem32]+Borrow
+def SBB32mr : Im32 <"sbb", 0x1B, MRMDestMem>; // [mem32] -= R32+Borrow
+
+def IMUL16rr : I <"imul", 0xAF, MRMSrcReg>, TB, OpSize, Pattern<(set R16, (times R16, R16))>;
+def IMUL32rr : I <"imul", 0xAF, MRMSrcReg>, TB , Pattern<(set R32, (times R32, R32))>;
+def IMUL16rm : Im16 <"imul", 0xAF, MRMSrcMem>, TB, OpSize;
+def IMUL32rm : Im32 <"imul", 0xAF, MRMSrcMem>, TB ;
} // end Two Address instructions
// These are suprisingly enough not two address instructions!
-def IMULrri16 : Ii16 <"imul", 0x69, MRMSrcReg>, OpSize; // R16 = R16*I16
-def IMULrri32 : Ii32 <"imul", 0x69, MRMSrcReg>; // R32 = R32*I32
-def IMULrri16b : Ii8 <"imul", 0x6B, MRMSrcReg>, OpSize; // R16 = R16*I8
-def IMULrri32b : Ii8 <"imul", 0x6B, MRMSrcReg>; // R32 = R32*I8
-def IMULrmi16 : Im16i16 <"imul", 0x69, MRMSrcMem>, OpSize; // R16 = [mem16]*I16
-def IMULrmi32 : Im32i32 <"imul", 0x69, MRMSrcMem>; // R32 = [mem32]*I32
-def IMULrmi16b : Im16i8<"imul", 0x6B, MRMSrcMem>, OpSize; // R16 = [mem16]*I8
-def IMULrmi32b : Im32i8<"imul", 0x6B, MRMSrcMem>; // R32 = [mem32]*I8
+def IMUL16rri : Ii16 <"imul", 0x69, MRMSrcReg>, OpSize; // R16 = R16*I16
+def IMUL32rri : Ii32 <"imul", 0x69, MRMSrcReg>; // R32 = R32*I32
+def IMUL16rri8 : Ii8 <"imul", 0x6B, MRMSrcReg>, OpSize; // R16 = R16*I8
+def IMUL32rri8 : Ii8 <"imul", 0x6B, MRMSrcReg>; // R32 = R32*I8
+def IMUL16rmi : Im16i16 <"imul", 0x69, MRMSrcMem>, OpSize; // R16 = [mem16]*I16
+def IMUL32rmi : Im32i32 <"imul", 0x69, MRMSrcMem>; // R32 = [mem32]*I32
+def IMUL16rmi8 : Im16i8<"imul", 0x6B, MRMSrcMem>, OpSize; // R16 = [mem16]*I8
+def IMUL32rmi8 : Im32i8<"imul", 0x6B, MRMSrcMem>; // R32 = [mem32]*I8
//===----------------------------------------------------------------------===//
// Test instructions are just like AND, except they don't generate a result.
-def TESTrr8 : I <"test", 0x84, MRMDestReg>; // flags = R8 & R8
-def TESTrr16 : I <"test", 0x85, MRMDestReg>, OpSize; // flags = R16 & R16
-def TESTrr32 : I <"test", 0x85, MRMDestReg>; // flags = R32 & R32
-def TESTmr8 : Im8 <"test", 0x84, MRMDestMem>; // flags = [mem8] & R8
-def TESTmr16 : Im16 <"test", 0x85, MRMDestMem>, OpSize; // flags = [mem16] & R16
-def TESTmr32 : Im32 <"test", 0x85, MRMDestMem>; // flags = [mem32] & R32
-def TESTrm8 : Im8 <"test", 0x84, MRMSrcMem >; // flags = R8 & [mem8]
-def TESTrm16 : Im16 <"test", 0x85, MRMSrcMem >, OpSize; // flags = R16 & [mem16]
-def TESTrm32 : Im32 <"test", 0x85, MRMSrcMem >; // flags = R32 & [mem32]
-
-def TESTri8 : Ii8 <"test", 0xF6, MRM0r >; // flags = R8 & imm8
-def TESTri16 : Ii16 <"test", 0xF7, MRM0r >, OpSize; // flags = R16 & imm16
-def TESTri32 : Ii32 <"test", 0xF7, MRM0r >; // flags = R32 & imm32
-def TESTmi8 : Im8i8 <"test", 0xF6, MRM0m >; // flags = [mem8] & imm8
-def TESTmi16 : Im16i16<"test", 0xF7, MRM0m >, OpSize; // flags = [mem16] & imm16
-def TESTmi32 : Im32i32<"test", 0xF7, MRM0m >; // flags = [mem32] & imm32
+def TEST8rr : I <"test", 0x84, MRMDestReg>; // flags = R8 & R8
+def TEST16rr : I <"test", 0x85, MRMDestReg>, OpSize; // flags = R16 & R16
+def TEST32rr : I <"test", 0x85, MRMDestReg>; // flags = R32 & R32
+def TEST8mr : Im8 <"test", 0x84, MRMDestMem>; // flags = [mem8] & R8
+def TEST16mr : Im16 <"test", 0x85, MRMDestMem>, OpSize; // flags = [mem16] & R16
+def TEST32mr : Im32 <"test", 0x85, MRMDestMem>; // flags = [mem32] & R32
+def TEST8rm : Im8 <"test", 0x84, MRMSrcMem >; // flags = R8 & [mem8]
+def TEST16rm : Im16 <"test", 0x85, MRMSrcMem >, OpSize; // flags = R16 & [mem16]
+def TEST32rm : Im32 <"test", 0x85, MRMSrcMem >; // flags = R32 & [mem32]
+
+def TEST8ri : Ii8 <"test", 0xF6, MRM0r >; // flags = R8 & imm8
+def TEST16ri : Ii16 <"test", 0xF7, MRM0r >, OpSize; // flags = R16 & imm16
+def TEST32ri : Ii32 <"test", 0xF7, MRM0r >; // flags = R32 & imm32
+def TEST8mi : Im8i8 <"test", 0xF6, MRM0m >; // flags = [mem8] & imm8
+def TEST16mi : Im16i16<"test", 0xF7, MRM0m >, OpSize; // flags = [mem16] & imm16
+def TEST32mi : Im32i32<"test", 0xF7, MRM0m >; // flags = [mem32] & imm32
@@ -571,37 +571,37 @@
def SETGm : Im8<"setg" , 0x9F, MRM0m>, TB; // [mem8] = < signed
// Integer comparisons
-def CMPrr8 : I <"cmp", 0x38, MRMDestReg>; // compare R8, R8
-def CMPrr16 : I <"cmp", 0x39, MRMDestReg>, OpSize; // compare R16, R16
-def CMPrr32 : I <"cmp", 0x39, MRMDestReg>, // compare R32, R32
+def CMP8rr : I <"cmp", 0x38, MRMDestReg>; // compare R8, R8
+def CMP16rr : I <"cmp", 0x39, MRMDestReg>, OpSize; // compare R16, R16
+def CMP32rr : I <"cmp", 0x39, MRMDestReg>, // compare R32, R32
Pattern<(isVoid (unspec2 R32, R32))>;
-def CMPmr8 : Im8 <"cmp", 0x38, MRMDestMem>; // compare [mem8], R8
-def CMPmr16 : Im16 <"cmp", 0x39, MRMDestMem>, OpSize; // compare [mem16], R16
-def CMPmr32 : Im32 <"cmp", 0x39, MRMDestMem>; // compare [mem32], R32
-def CMPrm8 : Im8 <"cmp", 0x3A, MRMSrcMem >; // compare R8, [mem8]
-def CMPrm16 : Im16 <"cmp", 0x3B, MRMSrcMem >, OpSize; // compare R16, [mem16]
-def CMPrm32 : Im32 <"cmp", 0x3B, MRMSrcMem >; // compare R32, [mem32]
-def CMPri8 : Ii8 <"cmp", 0x80, MRM7r >; // compare R8, imm8
-def CMPri16 : Ii16 <"cmp", 0x81, MRM7r >, OpSize; // compare R16, imm16
-def CMPri32 : Ii32 <"cmp", 0x81, MRM7r >; // compare R32, imm32
-def CMPmi8 : Im8i8 <"cmp", 0x80, MRM7m >; // compare [mem8], imm8
-def CMPmi16 : Im16i16<"cmp", 0x81, MRM7m >, OpSize; // compare [mem16], imm16
-def CMPmi32 : Im32i32<"cmp", 0x81, MRM7m >; // compare [mem32], imm32
+def CMP8mr : Im8 <"cmp", 0x38, MRMDestMem>; // compare [mem8], R8
+def CMP16mr : Im16 <"cmp", 0x39, MRMDestMem>, OpSize; // compare [mem16], R16
+def CMP32mr : Im32 <"cmp", 0x39, MRMDestMem>; // compare [mem32], R32
+def CMP8rm : Im8 <"cmp", 0x3A, MRMSrcMem >; // compare R8, [mem8]
+def CMP16rm : Im16 <"cmp", 0x3B, MRMSrcMem >, OpSize; // compare R16, [mem16]
+def CMP32rm : Im32 <"cmp", 0x3B, MRMSrcMem >; // compare R32, [mem32]
+def CMP8ri : Ii8 <"cmp", 0x80, MRM7r >; // compare R8, imm8
+def CMP16ri : Ii16 <"cmp", 0x81, MRM7r >, OpSize; // compare R16, imm16
+def CMP32ri : Ii32 <"cmp", 0x81, MRM7r >; // compare R32, imm32
+def CMP8mi : Im8i8 <"cmp", 0x80, MRM7m >; // compare [mem8], imm8
+def CMP16mi : Im16i16<"cmp", 0x81, MRM7m >, OpSize; // compare [mem16], imm16
+def CMP32mi : Im32i32<"cmp", 0x81, MRM7m >; // compare [mem32], imm32
// Sign/Zero extenders
-def MOVSXr16r8 : I <"movsx", 0xBE, MRMSrcReg>, TB, OpSize; // R16 = signext(R8)
-def MOVSXr32r8 : I <"movsx", 0xBE, MRMSrcReg>, TB; // R32 = signext(R8)
-def MOVSXr32r16: I <"movsx", 0xBF, MRMSrcReg>, TB; // R32 = signext(R16)
-def MOVSXr16m8 : Im8 <"movsx", 0xBE, MRMSrcMem>, TB, OpSize; // R16 = signext([mem8])
-def MOVSXr32m8 : Im8 <"movsx", 0xBE, MRMSrcMem>, TB; // R32 = signext([mem8])
-def MOVSXr32m16: Im16<"movsx", 0xBF, MRMSrcMem>, TB; // R32 = signext([mem16])
-
-def MOVZXr16r8 : I <"movzx", 0xB6, MRMSrcReg>, TB, OpSize; // R16 = zeroext(R8)
-def MOVZXr32r8 : I <"movzx", 0xB6, MRMSrcReg>, TB; // R32 = zeroext(R8)
-def MOVZXr32r16: I <"movzx", 0xB7, MRMSrcReg>, TB; // R32 = zeroext(R16)
-def MOVZXr16m8 : Im8 <"movzx", 0xB6, MRMSrcMem>, TB, OpSize; // R16 = zeroext([mem8])
-def MOVZXr32m8 : Im8 <"movzx", 0xB6, MRMSrcMem>, TB; // R32 = zeroext([mem8])
-def MOVZXr32m16: Im16<"movzx", 0xB7, MRMSrcMem>, TB; // R32 = zeroext([mem16])
+def MOVSX16rr8 : I <"movsx", 0xBE, MRMSrcReg>, TB, OpSize; // R16 = signext(R8)
+def MOVSX32rr8 : I <"movsx", 0xBE, MRMSrcReg>, TB; // R32 = signext(R8)
+def MOVSX32rr16: I <"movsx", 0xBF, MRMSrcReg>, TB; // R32 = signext(R16)
+def MOVSX16rm8 : Im8 <"movsx", 0xBE, MRMSrcMem>, TB, OpSize; // R16 = signext([mem8])
+def MOVSX32rm8 : Im8 <"movsx", 0xBE, MRMSrcMem>, TB; // R32 = signext([mem8])
+def MOVSX32rm16: Im16<"movsx", 0xBF, MRMSrcMem>, TB; // R32 = signext([mem16])
+
+def MOVZX16rr8 : I <"movzx", 0xB6, MRMSrcReg>, TB, OpSize; // R16 = zeroext(R8)
+def MOVZX32rr8 : I <"movzx", 0xB6, MRMSrcReg>, TB; // R32 = zeroext(R8)
+def MOVZX32rr16: I <"movzx", 0xB7, MRMSrcReg>, TB; // R32 = zeroext(R16)
+def MOVZX16rm8 : Im8 <"movzx", 0xB6, MRMSrcMem>, TB, OpSize; // R16 = zeroext([mem8])
+def MOVZX32rm8 : Im8 <"movzx", 0xB6, MRMSrcMem>, TB; // R32 = zeroext([mem8])
+def MOVZX32rm16: Im16<"movzx", 0xB7, MRMSrcMem>, TB; // R32 = zeroext([mem16])
//===----------------------------------------------------------------------===//
@@ -618,10 +618,10 @@
class FPIM<string n, bits<8> o, Format F, FPFormat fp, MemType m> : FPInst<n, o, F, fp, m, NoImm>;
-class FPIm16<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem16>;
-class FPIm32<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem32>;
-class FPIm64<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem64>;
-class FPIm80<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem80>;
+class FPI16m<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem16>;
+class FPI32m<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem32>;
+class FPI64m<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem64>;
+class FPI80m<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem80>;
// Pseudo instructions for floating point. We use these pseudo instructions
// because they can be expanded by the fp spackifier into one of many different
@@ -639,26 +639,26 @@
// Floating point loads & stores...
def FLDrr : FPI <"fld" , 0xC0, AddRegFrm, NotFP>, D9; // push(ST(i))
-def FLDm32 : FPIm32 <"fld" , 0xD9, MRM0m , ZeroArgFP>; // load float
-def FLDm64 : FPIm64 <"fld" , 0xDD, MRM0m , ZeroArgFP>; // load double
-def FLDm80 : FPIm80 <"fld" , 0xDB, MRM5m , ZeroArgFP>; // load extended
-def FILDm16 : FPIm16 <"fild" , 0xDF, MRM0m , ZeroArgFP>; // load signed short
-def FILDm32 : FPIm32 <"fild" , 0xDB, MRM0m , ZeroArgFP>; // load signed int
-def FILDm64 : FPIm64 <"fild" , 0xDF, MRM5m , ZeroArgFP>; // load signed long
+def FLD32m : FPI32m <"fld" , 0xD9, MRM0m , ZeroArgFP>; // load float
+def FLD64m : FPI64m <"fld" , 0xDD, MRM0m , ZeroArgFP>; // load double
+def FLD80m : FPI80m <"fld" , 0xDB, MRM5m , ZeroArgFP>; // load extended
+def FILD16m : FPI16m <"fild" , 0xDF, MRM0m , ZeroArgFP>; // load signed short
+def FILD32m : FPI32m <"fild" , 0xDB, MRM0m , ZeroArgFP>; // load signed int
+def FILD64m : FPI64m <"fild" , 0xDF, MRM5m , ZeroArgFP>; // load signed long
def FSTrr : FPI <"fst" , 0xD0, AddRegFrm, NotFP >, DD; // ST(i) = ST(0)
def FSTPrr : FPI <"fstp", 0xD8, AddRegFrm, NotFP >, DD; // ST(i) = ST(0), pop
-def FSTm32 : FPIm32 <"fst" , 0xD9, MRM2m , OneArgFP>; // store float
-def FSTm64 : FPIm64 <"fst" , 0xDD, MRM2m , OneArgFP>; // store double
-def FSTPm32 : FPIm32 <"fstp", 0xD9, MRM3m , OneArgFP>; // store float, pop
-def FSTPm64 : FPIm64 <"fstp", 0xDD, MRM3m , OneArgFP>; // store double, pop
-def FSTPm80 : FPIm80 <"fstp", 0xDB, MRM7m , OneArgFP>; // store extended, pop
-
-def FISTm16 : FPIm16 <"fist", 0xDF, MRM2m , OneArgFP>; // store signed short
-def FISTm32 : FPIm32 <"fist", 0xDB, MRM2m , OneArgFP>; // store signed int
-def FISTPm16 : FPIm16 <"fistp", 0xDF, MRM3m , NotFP >; // store signed short, pop
-def FISTPm32 : FPIm32 <"fistp", 0xDB, MRM3m , NotFP >; // store signed int, pop
-def FISTPm64 : FPIm64 <"fistpll", 0xDF, MRM7m , OneArgFP>; // store signed long, pop
+def FST32m : FPI32m <"fst" , 0xD9, MRM2m , OneArgFP>; // store float
+def FST64m : FPI64m <"fst" , 0xDD, MRM2m , OneArgFP>; // store double
+def FSTP32m : FPI32m <"fstp", 0xD9, MRM3m , OneArgFP>; // store float, pop
+def FSTP64m : FPI64m <"fstp", 0xDD, MRM3m , OneArgFP>; // store double, pop
+def FSTP80m : FPI80m <"fstp", 0xDB, MRM7m , OneArgFP>; // store extended, pop
+
+def FIST16m : FPI16m <"fist", 0xDF, MRM2m , OneArgFP>; // store signed short
+def FIST32m : FPI32m <"fist", 0xDB, MRM2m , OneArgFP>; // store signed int
+def FISTP16m : FPI16m <"fistp", 0xDF, MRM3m , NotFP >; // store signed short, pop
+def FISTP32m : FPI32m <"fistp", 0xDB, MRM3m , NotFP >; // store signed int, pop
+def FISTP64m : FPI64m <"fistpll", 0xDF, MRM7m , OneArgFP>; // store signed long, pop
def FXCH : FPI <"fxch", 0xC8, AddRegFrm, NotFP>, D9; // fxch ST(i), ST(0)
@@ -715,9 +715,9 @@
def FUCOMPPr : I<"fucompp", 0xE9, RawFrm >, DA, Imp<[ST0],[]>; // compare ST(0) with ST(1), pop, pop
// Floating point flag ops
-def FNSTSWr8 : I <"fnstsw" , 0xE0, RawFrm>, DF, Imp<[],[AX]>; // AX = fp flags
-def FNSTCWm16 : Im16<"fnstcw" , 0xD9, MRM7m >; // [mem16] = X87 control world
-def FLDCWm16 : Im16<"fldcw" , 0xD9, MRM5m >; // X87 control world = [mem16]
+def FNSTSW8r : I <"fnstsw" , 0xE0, RawFrm>, DF, Imp<[],[AX]>; // AX = fp flags
+def FNSTCW16m : Im16<"fnstcw" , 0xD9, MRM7m >; // [mem16] = X87 control world
+def FLDCW16m : Im16<"fldcw" , 0xD9, MRM5m >; // X87 control world = [mem16]
//===----------------------------------------------------------------------===//
@@ -725,26 +725,26 @@
//
def RET_R32 : Expander<(ret R32:$reg),
- [(MOVrr32 EAX, R32:$reg),
+ [(MOV32rr EAX, R32:$reg),
(RET)]>;
// FIXME: This should eventually just be implemented by defining a frameidx as a
// value address for a load.
def LOAD_FI16 : Expander<(set R16:$dest, (load frameidx:$fi)),
- [(MOVrm16 R16:$dest, frameidx:$fi, 1, 0/*NoReg*/, 0)]>;
+ [(MOV16rm R16:$dest, frameidx:$fi, 1, 0/*NoReg*/, 0)]>;
def LOAD_FI32 : Expander<(set R32:$dest, (load frameidx:$fi)),
- [(MOVrm32 R32:$dest, frameidx:$fi, 1, 0/*NoReg*/, 0)]>;
+ [(MOV32rm R32:$dest, frameidx:$fi, 1, 0/*NoReg*/, 0)]>;
def LOAD_R16 : Expander<(set R16:$dest, (load R32:$src)),
- [(MOVrm16 R16:$dest, R32:$src, 1, 0/*NoReg*/, 0)]>;
+ [(MOV16rm R16:$dest, R32:$src, 1, 0/*NoReg*/, 0)]>;
def LOAD_R32 : Expander<(set R32:$dest, (load R32:$src)),
- [(MOVrm32 R32:$dest, R32:$src, 1, 0/*NoReg*/, 0)]>;
+ [(MOV32rm R32:$dest, R32:$src, 1, 0/*NoReg*/, 0)]>;
def BR_EQ : Expander<(brcond (seteq R32:$a1, R32:$a2),
basicblock:$d1, basicblock:$d2),
- [(CMPrr32 R32:$a1, R32:$a2),
+ [(CMP32rr R32:$a1, R32:$a2),
(JE basicblock:$d1),
(JMP basicblock:$d2)]>;
Index: llvm/lib/Target/X86/X86InstrInfo.cpp
diff -u llvm/lib/Target/X86/X86InstrInfo.cpp:1.25 llvm/lib/Target/X86/X86InstrInfo.cpp:1.26
--- llvm/lib/Target/X86/X86InstrInfo.cpp:1.25 Sun Feb 29 00:31:44 2004
+++ llvm/lib/Target/X86/X86InstrInfo.cpp Sun Feb 29 02:50:03 2004
@@ -26,7 +26,7 @@
unsigned& sourceReg,
unsigned& destReg) const {
MachineOpCode oc = MI.getOpcode();
- if (oc == X86::MOVrr8 || oc == X86::MOVrr16 || oc == X86::MOVrr32 ||
+ if (oc == X86::MOV8rr || oc == X86::MOV16rr || oc == X86::MOV32rr ||
oc == X86::FpMOV) {
assert(MI.getNumOperands() == 2 &&
MI.getOperand(0).isRegister() &&
Index: llvm/lib/Target/X86/Printer.cpp
diff -u llvm/lib/Target/X86/Printer.cpp:1.89 llvm/lib/Target/X86/Printer.cpp:1.90
--- llvm/lib/Target/X86/Printer.cpp:1.89 Sat Feb 28 17:42:35 2004
+++ llvm/lib/Target/X86/Printer.cpp Sun Feb 29 02:50:03 2004
@@ -772,7 +772,7 @@
// is misassembled by gas in intel_syntax mode as its 32-bit
// equivalent "fstp DWORD PTR [...]". Workaround: Output the raw
// opcode bytes instead of the instruction.
- if (MI->getOpcode() == X86::FSTPm80) {
+ if (MI->getOpcode() == X86::FSTP80m) {
if ((MI->getOperand(0).getReg() == X86::ESP)
&& (MI->getOperand(1).getImmedValue() == 1)) {
if (Op3.isImmediate() &&
@@ -793,7 +793,7 @@
// misassembled by gas in intel_syntax mode as its 32-bit
// equivalent "fld DWORD PTR [...]". Workaround: Output the raw
// opcode bytes instead of the instruction.
- if (MI->getOpcode() == X86::FLDm80 &&
+ if (MI->getOpcode() == X86::FLD80m &&
MI->getOperand(0).getReg() == X86::ESP &&
MI->getOperand(1).getImmedValue() == 1) {
if (Op3.isImmediate() && Op3.getImmedValue() >= -128 &&
@@ -813,7 +813,7 @@
// 64 bit modes." libopcodes disassembles it as "fild DWORD PTR
// [...]", which is wrong. Workaround: Output the raw opcode bytes
// instead of the instruction.
- if (MI->getOpcode() == X86::FILDm64 &&
+ if (MI->getOpcode() == X86::FILD64m &&
MI->getOperand(0).getReg() == X86::ESP &&
MI->getOperand(1).getImmedValue() == 1) {
if (Op3.isImmediate() && Op3.getImmedValue() >= -128 &&
@@ -834,7 +834,7 @@
// "fistpll DWORD PTR [...]", which is wrong. Workaround: Output
// "fistpll DWORD PTR " instead, which is what libopcodes is
// expecting to see.
- if (MI->getOpcode() == X86::FISTPm64) {
+ if (MI->getOpcode() == X86::FISTP64m) {
O << "fistpll DWORD PTR ";
printMemReference(MI, 0);
if (MI->getNumOperands() == 5) {
Index: llvm/lib/Target/X86/PeepholeOptimizer.cpp
diff -u llvm/lib/Target/X86/PeepholeOptimizer.cpp:1.30 llvm/lib/Target/X86/PeepholeOptimizer.cpp:1.31
--- llvm/lib/Target/X86/PeepholeOptimizer.cpp:1.30 Sat Feb 28 16:06:59 2004
+++ llvm/lib/Target/X86/PeepholeOptimizer.cpp Sun Feb 29 02:50:03 2004
@@ -62,9 +62,9 @@
MachineInstr *Next = (NextI != MBB.end()) ? &*NextI : (MachineInstr*)0;
unsigned Size = 0;
switch (MI->getOpcode()) {
- case X86::MOVrr8:
- case X86::MOVrr16:
- case X86::MOVrr32: // Destroy X = X copies...
+ case X86::MOV8rr:
+ case X86::MOV16rr:
+ case X86::MOV32rr: // Destroy X = X copies...
if (MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
I = MBB.erase(I);
return true;
@@ -75,7 +75,7 @@
// immediate despite the fact that the operands are 16 or 32 bits. Because
// this can save three bytes of code size (and icache space), we want to
// shrink them if possible.
- case X86::IMULrri16: case X86::IMULrri32:
+ case X86::IMUL16rri: case X86::IMUL32rri:
assert(MI->getNumOperands() == 3 && "These should all have 3 operands!");
if (MI->getOperand(2).isImmediate()) {
int Val = MI->getOperand(2).getImmedValue();
@@ -84,8 +84,8 @@
unsigned Opcode;
switch (MI->getOpcode()) {
default: assert(0 && "Unknown opcode value!");
- case X86::IMULrri16: Opcode = X86::IMULrri16b; break;
- case X86::IMULrri32: Opcode = X86::IMULrri32b; break;
+ case X86::IMUL16rri: Opcode = X86::IMUL16rri8; break;
+ case X86::IMUL32rri: Opcode = X86::IMUL32rri8; break;
}
unsigned R0 = MI->getOperand(0).getReg();
unsigned R1 = MI->getOperand(1).getReg();
@@ -97,7 +97,7 @@
return false;
#if 0
- case X86::IMULrmi16: case X86::IMULrmi32:
+ case X86::IMUL16rmi: case X86::IMUL32rmi:
assert(MI->getNumOperands() == 6 && "These should all have 6 operands!");
if (MI->getOperand(5).isImmediate()) {
int Val = MI->getOperand(5).getImmedValue();
@@ -106,8 +106,8 @@
unsigned Opcode;
switch (MI->getOpcode()) {
default: assert(0 && "Unknown opcode value!");
- case X86::IMULrmi16: Opcode = X86::IMULrmi16b; break;
- case X86::IMULrmi32: Opcode = X86::IMULrmi32b; break;
+ case X86::IMUL16rmi: Opcode = X86::IMUL16rmi8; break;
+ case X86::IMUL32rmi: Opcode = X86::IMUL32rmi8; break;
}
unsigned R0 = MI->getOperand(0).getReg();
unsigned R1 = MI->getOperand(1).getReg();
@@ -123,11 +123,11 @@
return false;
#endif
- case X86::ADDri16: case X86::ADDri32:
- case X86::SUBri16: case X86::SUBri32:
- case X86::ANDri16: case X86::ANDri32:
- case X86::ORri16: case X86::ORri32:
- case X86::XORri16: case X86::XORri32:
+ case X86::ADD16ri: case X86::ADD32ri:
+ case X86::SUB16ri: case X86::SUB32ri:
+ case X86::AND16ri: case X86::AND32ri:
+ case X86::OR16ri: case X86::OR32ri:
+ case X86::XOR16ri: case X86::XOR32ri:
assert(MI->getNumOperands() == 2 && "These should all have 2 operands!");
if (MI->getOperand(1).isImmediate()) {
int Val = MI->getOperand(1).getImmedValue();
@@ -136,16 +136,16 @@
unsigned Opcode;
switch (MI->getOpcode()) {
default: assert(0 && "Unknown opcode value!");
- case X86::ADDri16: Opcode = X86::ADDri16b; break;
- case X86::ADDri32: Opcode = X86::ADDri32b; break;
- case X86::SUBri16: Opcode = X86::SUBri16b; break;
- case X86::SUBri32: Opcode = X86::SUBri32b; break;
- case X86::ANDri16: Opcode = X86::ANDri16b; break;
- case X86::ANDri32: Opcode = X86::ANDri32b; break;
- case X86::ORri16: Opcode = X86::ORri16b; break;
- case X86::ORri32: Opcode = X86::ORri32b; break;
- case X86::XORri16: Opcode = X86::XORri16b; break;
- case X86::XORri32: Opcode = X86::XORri32b; break;
+ case X86::ADD16ri: Opcode = X86::ADD16ri8; break;
+ case X86::ADD32ri: Opcode = X86::ADD32ri8; break;
+ case X86::SUB16ri: Opcode = X86::SUB16ri8; break;
+ case X86::SUB32ri: Opcode = X86::SUB32ri8; break;
+ case X86::AND16ri: Opcode = X86::AND16ri8; break;
+ case X86::AND32ri: Opcode = X86::AND32ri8; break;
+ case X86::OR16ri: Opcode = X86::OR16ri8; break;
+ case X86::OR32ri: Opcode = X86::OR32ri8; break;
+ case X86::XOR16ri: Opcode = X86::XOR16ri8; break;
+ case X86::XOR32ri: Opcode = X86::XOR32ri8; break;
}
unsigned R0 = MI->getOperand(0).getReg();
I = MBB.insert(MBB.erase(I),
@@ -156,11 +156,11 @@
}
return false;
- case X86::ADDmi16: case X86::ADDmi32:
- case X86::SUBmi16: case X86::SUBmi32:
- case X86::ANDmi16: case X86::ANDmi32:
- case X86::ORmi16: case X86::ORmi32:
- case X86::XORmi16: case X86::XORmi32:
+ case X86::ADD16mi: case X86::ADD32mi:
+ case X86::SUB16mi: case X86::SUB32mi:
+ case X86::AND16mi: case X86::AND32mi:
+ case X86::OR16mi: case X86::OR32mi:
+ case X86::XOR16mi: case X86::XOR32mi:
assert(MI->getNumOperands() == 5 && "These should all have 5 operands!");
if (MI->getOperand(4).isImmediate()) {
int Val = MI->getOperand(4).getImmedValue();
@@ -169,16 +169,16 @@
unsigned Opcode;
switch (MI->getOpcode()) {
default: assert(0 && "Unknown opcode value!");
- case X86::ADDmi16: Opcode = X86::ADDmi16b; break;
- case X86::ADDmi32: Opcode = X86::ADDmi32b; break;
- case X86::SUBmi16: Opcode = X86::SUBmi16b; break;
- case X86::SUBmi32: Opcode = X86::SUBmi32b; break;
- case X86::ANDmi16: Opcode = X86::ANDmi16b; break;
- case X86::ANDmi32: Opcode = X86::ANDmi32b; break;
- case X86::ORmi16: Opcode = X86::ORmi16b; break;
- case X86::ORmi32: Opcode = X86::ORmi32b; break;
- case X86::XORmi16: Opcode = X86::XORmi16b; break;
- case X86::XORmi32: Opcode = X86::XORmi32b; break;
+ case X86::ADD16mi: Opcode = X86::ADD16mi8; break;
+ case X86::ADD32mi: Opcode = X86::ADD32mi8; break;
+ case X86::SUB16mi: Opcode = X86::SUB16mi8; break;
+ case X86::SUB32mi: Opcode = X86::SUB32mi8; break;
+ case X86::AND16mi: Opcode = X86::AND16mi8; break;
+ case X86::AND32mi: Opcode = X86::AND32mi8; break;
+ case X86::OR16mi: Opcode = X86::OR16mi8; break;
+ case X86::OR32mi: Opcode = X86::OR32mi8; break;
+ case X86::XOR16mi: Opcode = X86::XOR16mi8; break;
+ case X86::XOR32mi: Opcode = X86::XOR32mi8; break;
}
unsigned R0 = MI->getOperand(0).getReg();
unsigned Scale = MI->getOperand(1).getImmedValue();
@@ -193,15 +193,15 @@
return false;
#if 0
- case X86::MOVri32: Size++;
- case X86::MOVri16: Size++;
- case X86::MOVri8:
+ case X86::MOV32ri: Size++;
+ case X86::MOV16ri: Size++;
+ case X86::MOV8ri:
// FIXME: We can only do this transformation if we know that flags are not
// used here, because XOR clobbers the flags!
if (MI->getOperand(1).isImmediate()) { // avoid mov EAX, <value>
int Val = MI->getOperand(1).getImmedValue();
if (Val == 0) { // mov EAX, 0 -> xor EAX, EAX
- static const unsigned Opcode[] ={X86::XORrr8,X86::XORrr16,X86::XORrr32};
+ static const unsigned Opcode[] ={X86::XOR8rr,X86::XOR16rr,X86::XOR32rr};
unsigned Reg = MI->getOperand(0).getReg();
I = MBB.insert(MBB.erase(I),
BuildMI(Opcode[Size], 2, Reg).addReg(Reg).addReg(Reg));
@@ -212,8 +212,8 @@
}
return false;
#endif
- case X86::BSWAPr32: // Change bswap EAX, bswap EAX into nothing
- if (Next->getOpcode() == X86::BSWAPr32 &&
+ case X86::BSWAP32r: // Change bswap EAX, bswap EAX into nothing
+ if (Next->getOpcode() == X86::BSWAP32r &&
MI->getOperand(0).getReg() == Next->getOperand(0).getReg()) {
I = MBB.erase(MBB.erase(I));
return true;
@@ -387,7 +387,7 @@
// Attempt to fold instructions used by the base register into the instruction
if (MachineInstr *DefInst = getDefiningInst(BaseRegOp)) {
switch (DefInst->getOpcode()) {
- case X86::MOVri32:
+ case X86::MOV32ri:
// If there is no displacement set for this instruction set one now.
// FIXME: If we can fold two immediates together, we should do so!
if (DisplacementOp.isImmediate() && !DisplacementOp.getImmedValue()) {
@@ -398,7 +398,7 @@
}
break;
- case X86::ADDrr32:
+ case X86::ADD32rr:
// If the source is a register-register add, and we do not yet have an
// index register, fold the add into the memory address.
if (IndexReg == 0) {
@@ -409,7 +409,7 @@
}
break;
- case X86::SHLri32:
+ case X86::SHL32ri:
// If this shift could be folded into the index portion of the address if
// it were the index register, move it to the index register operand now,
// so it will be folded in below.
@@ -427,7 +427,7 @@
// Attempt to fold instructions used by the index into the instruction
if (MachineInstr *DefInst = getDefiningInst(IndexRegOp)) {
switch (DefInst->getOpcode()) {
- case X86::SHLri32: {
+ case X86::SHL32ri: {
// Figure out what the resulting scale would be if we folded this shift.
unsigned ResScale = Scale * (1 << DefInst->getOperand(2).getImmedValue());
if (isValidScaleAmount(ResScale)) {
@@ -478,15 +478,15 @@
switch (MI->getOpcode()) {
// Register to memory stores. Format: <base,scale,indexreg,immdisp>, srcreg
- case X86::MOVmr32: case X86::MOVmr16: case X86::MOVmr8:
- case X86::MOVmi32: case X86::MOVmi16: case X86::MOVmi8:
+ case X86::MOV32mr: case X86::MOV16mr: case X86::MOV8mr:
+ case X86::MOV32mi: case X86::MOV16mi: case X86::MOV8mi:
// Check to see if we can fold the source instruction into this one...
if (MachineInstr *SrcInst = getDefiningInst(MI->getOperand(4))) {
switch (SrcInst->getOpcode()) {
// Fold the immediate value into the store, if possible.
- case X86::MOVri8: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi8);
- case X86::MOVri16: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi16);
- case X86::MOVri32: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi32);
+ case X86::MOV8ri: return Propagate(MI, 4, SrcInst, 1, X86::MOV8mi);
+ case X86::MOV16ri: return Propagate(MI, 4, SrcInst, 1, X86::MOV16mi);
+ case X86::MOV32ri: return Propagate(MI, 4, SrcInst, 1, X86::MOV32mi);
default: break;
}
}
@@ -496,9 +496,9 @@
return true;
break;
- case X86::MOVrm32:
- case X86::MOVrm16:
- case X86::MOVrm8:
+ case X86::MOV32rm:
+ case X86::MOV16rm:
+ case X86::MOV8rm:
// If we can optimize the addressing expression, do so now.
if (OptimizeAddress(MI, 1))
return true;
Index: llvm/lib/Target/X86/InstSelectSimple.cpp
diff -u llvm/lib/Target/X86/InstSelectSimple.cpp:1.186 llvm/lib/Target/X86/InstSelectSimple.cpp:1.187
--- llvm/lib/Target/X86/InstSelectSimple.cpp:1.186 Sun Feb 29 01:22:16 2004
+++ llvm/lib/Target/X86/InstSelectSimple.cpp Sun Feb 29 02:50:03 2004
@@ -310,7 +310,7 @@
RegMap.erase(V); // Assign a new name to this constant if ref'd again
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Move the address of the global into the register
- BuildMI(*MBB, IPt, X86::MOVri32, 1, Reg).addGlobalAddress(GV);
+ BuildMI(*MBB, IPt, X86::MOV32ri, 1, Reg).addGlobalAddress(GV);
RegMap.erase(V); // Assign a new name to this address if ref'd again
}
@@ -427,19 +427,19 @@
if (Class == cLong) {
// Copy the value into the register pair.
uint64_t Val = cast<ConstantInt>(C)->getRawValue();
- BuildMI(*MBB, IP, X86::MOVri32, 1, R).addImm(Val & 0xFFFFFFFF);
- BuildMI(*MBB, IP, X86::MOVri32, 1, R+1).addImm(Val >> 32);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(Val & 0xFFFFFFFF);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R+1).addImm(Val >> 32);
return;
}
assert(Class <= cInt && "Type not handled yet!");
static const unsigned IntegralOpcodeTab[] = {
- X86::MOVri8, X86::MOVri16, X86::MOVri32
+ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
};
if (C->getType() == Type::BoolTy) {
- BuildMI(*MBB, IP, X86::MOVri8, 1, R).addImm(C == ConstantBool::True);
+ BuildMI(*MBB, IP, X86::MOV8ri, 1, R).addImm(C == ConstantBool::True);
} else {
ConstantInt *CI = cast<ConstantInt>(C);
BuildMI(*MBB, IP, IntegralOpcodeTab[Class],1,R).addImm(CI->getRawValue());
@@ -456,15 +456,15 @@
const Type *Ty = CFP->getType();
assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!");
- unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLDm32 : X86::FLDm64;
+ unsigned LoadOpcode = Ty == Type::FloatTy ? X86::FLD32m : X86::FLD64m;
addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 4, R), CPI);
}
} else if (isa<ConstantPointerNull>(C)) {
// Copy zero (null pointer) to the register.
- BuildMI(*MBB, IP, X86::MOVri32, 1, R).addImm(0);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addImm(0);
} else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
- BuildMI(*MBB, IP, X86::MOVri32, 1, R).addGlobalAddress(CPR->getValue());
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, R).addGlobalAddress(CPR->getValue());
} else {
std::cerr << "Offending constant: " << C << "\n";
assert(0 && "Type not handled yet!");
@@ -493,29 +493,29 @@
switch (getClassB(I->getType())) {
case cByte:
FI = MFI->CreateFixedObject(1, ArgOffset);
- addFrameReference(BuildMI(BB, X86::MOVrm8, 4, Reg), FI);
+ addFrameReference(BuildMI(BB, X86::MOV8rm, 4, Reg), FI);
break;
case cShort:
FI = MFI->CreateFixedObject(2, ArgOffset);
- addFrameReference(BuildMI(BB, X86::MOVrm16, 4, Reg), FI);
+ addFrameReference(BuildMI(BB, X86::MOV16rm, 4, Reg), FI);
break;
case cInt:
FI = MFI->CreateFixedObject(4, ArgOffset);
- addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
break;
case cLong:
FI = MFI->CreateFixedObject(8, ArgOffset);
- addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
- addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg+1), FI, 4);
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg), FI);
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, Reg+1), FI, 4);
ArgOffset += 4; // longs require 4 additional bytes
break;
case cFP:
unsigned Opcode;
if (I->getType() == Type::FloatTy) {
- Opcode = X86::FLDm32;
+ Opcode = X86::FLD32m;
FI = MFI->CreateFixedObject(4, ArgOffset);
} else {
- Opcode = X86::FLDm64;
+ Opcode = X86::FLD64m;
FI = MFI->CreateFixedObject(8, ArgOffset);
ArgOffset += 4; // doubles require 4 additional bytes
}
@@ -786,7 +786,7 @@
// !=. These should have been strength reduced already anyway.
if (Op1v == 0 && (CompTy->isSigned() || OpNum < 2)) {
static const unsigned TESTTab[] = {
- X86::TESTrr8, X86::TESTrr16, X86::TESTrr32
+ X86::TEST8rr, X86::TEST16rr, X86::TEST32rr
};
BuildMI(*MBB, IP, TESTTab[Class], 2).addReg(Op0r).addReg(Op0r);
@@ -796,7 +796,7 @@
}
static const unsigned CMPTab[] = {
- X86::CMPri8, X86::CMPri16, X86::CMPri32
+ X86::CMP8ri, X86::CMP16ri, X86::CMP32ri
};
BuildMI(*MBB, IP, CMPTab[Class], 2).addReg(Op0r).addImm(Op1v);
@@ -807,7 +807,7 @@
if (ConstantFP *CFP = dyn_cast<ConstantFP>(Op1))
if (CFP->isExactlyValue(+0.0) || CFP->isExactlyValue(-0.0)) {
BuildMI(*MBB, IP, X86::FTST, 1).addReg(Op0r);
- BuildMI(*MBB, IP, X86::FNSTSWr8, 0);
+ BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
BuildMI(*MBB, IP, X86::SAHF, 1);
return OpNum;
}
@@ -819,17 +819,17 @@
// compare 8-bit with 8-bit, 16-bit with 16-bit, 32-bit with
// 32-bit.
case cByte:
- BuildMI(*MBB, IP, X86::CMPrr8, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::CMP8rr, 2).addReg(Op0r).addReg(Op1r);
break;
case cShort:
- BuildMI(*MBB, IP, X86::CMPrr16, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::CMP16rr, 2).addReg(Op0r).addReg(Op1r);
break;
case cInt:
- BuildMI(*MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
break;
case cFP:
BuildMI(*MBB, IP, X86::FpUCOM, 2).addReg(Op0r).addReg(Op1r);
- BuildMI(*MBB, IP, X86::FNSTSWr8, 0);
+ BuildMI(*MBB, IP, X86::FNSTSW8r, 0);
BuildMI(*MBB, IP, X86::SAHF, 1);
break;
@@ -838,9 +838,9 @@
unsigned LoTmp = makeAnotherReg(Type::IntTy);
unsigned HiTmp = makeAnotherReg(Type::IntTy);
unsigned FinalTmp = makeAnotherReg(Type::IntTy);
- BuildMI(*MBB, IP, X86::XORrr32, 2, LoTmp).addReg(Op0r).addReg(Op1r);
- BuildMI(*MBB, IP, X86::XORrr32, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
- BuildMI(*MBB, IP, X86::ORrr32, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
+ BuildMI(*MBB, IP, X86::XOR32rr, 2, LoTmp).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::XOR32rr, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1);
+ BuildMI(*MBB, IP, X86::OR32rr, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp);
break; // Allow the sete or setne to be generated from flags set by OR
} else {
// Emit a sequence of code which compares the high and low parts once
@@ -856,13 +856,13 @@
// classes! Until then, hardcode registers so that we can deal with their
// aliases (because we don't have conditional byte moves).
//
- BuildMI(*MBB, IP, X86::CMPrr32, 2).addReg(Op0r).addReg(Op1r);
+ BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r).addReg(Op1r);
BuildMI(*MBB, IP, SetCCOpcodeTab[0][OpNum], 0, X86::AL);
- BuildMI(*MBB, IP, X86::CMPrr32, 2).addReg(Op0r+1).addReg(Op1r+1);
+ BuildMI(*MBB, IP, X86::CMP32rr, 2).addReg(Op0r+1).addReg(Op1r+1);
BuildMI(*MBB, IP, SetCCOpcodeTab[CompTy->isSigned()][OpNum], 0, X86::BL);
BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::BH);
BuildMI(*MBB, IP, X86::IMPLICIT_DEF, 0, X86::AH);
- BuildMI(*MBB, IP, X86::CMOVErr16, 2, X86::BX).addReg(X86::BX)
+ BuildMI(*MBB, IP, X86::CMOVE16rr, 2, X86::BX).addReg(X86::BX)
.addReg(X86::AX);
// NOTE: visitSetCondInst knows that the value is dumped into the BL
// register at this point for long values...
@@ -904,7 +904,7 @@
} else {
// Handle long comparisons by copying the value which is already in BL into
// the register we want...
- BuildMI(*MBB, IP, X86::MOVrr8, 1, TargetReg).addReg(X86::BL);
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, TargetReg).addReg(X86::BL);
}
}
@@ -923,20 +923,20 @@
case cByte:
// Extend value into target register (8->32)
if (isUnsigned)
- BuildMI(BB, X86::MOVZXr32r8, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOVZX32rr8, 1, targetReg).addReg(Reg);
else
- BuildMI(BB, X86::MOVSXr32r8, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOVSX32rr8, 1, targetReg).addReg(Reg);
break;
case cShort:
// Extend value into target register (16->32)
if (isUnsigned)
- BuildMI(BB, X86::MOVZXr32r16, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOVZX32rr16, 1, targetReg).addReg(Reg);
else
- BuildMI(BB, X86::MOVSXr32r16, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOVSX32rr16, 1, targetReg).addReg(Reg);
break;
case cInt:
// Move value into target register (32->32)
- BuildMI(BB, X86::MOVrr32, 1, targetReg).addReg(Reg);
+ BuildMI(BB, X86::MOV32rr, 1, targetReg).addReg(Reg);
break;
default:
assert(0 && "Unpromotable operand class in promote32");
@@ -976,8 +976,8 @@
BuildMI(BB, X86::IMPLICIT_USE, 2).addReg(X86::ST0).addReg(X86::ESP);
break;
case cLong:
- BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(RetReg);
- BuildMI(BB, X86::MOVrr32, 1, X86::EDX).addReg(RetReg+1);
+ BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(RetReg);
+ BuildMI(BB, X86::MOV32rr, 1, X86::EDX).addReg(RetReg+1);
// Declare that EAX & EDX are live on exit
BuildMI(BB, X86::IMPLICIT_USE, 3).addReg(X86::EAX).addReg(X86::EDX)
.addReg(X86::ESP);
@@ -1016,7 +1016,7 @@
// Nope, cannot fold setcc into this branch. Emit a branch on a condition
// computed some other way...
unsigned condReg = getReg(BI.getCondition());
- BuildMI(BB, X86::CMPri8, 2).addReg(condReg).addImm(0);
+ BuildMI(BB, X86::CMP8ri, 2).addReg(condReg).addImm(0);
if (BI.getSuccessor(1) == NextBB) {
if (BI.getSuccessor(0) != NextBB)
BuildMI(BB, X86::JNE, 1).addPCDisp(BI.getSuccessor(0));
@@ -1105,29 +1105,29 @@
// Promote arg to 32 bits wide into a temporary register...
unsigned R = makeAnotherReg(Type::UIntTy);
promote32(R, Args[i]);
- addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
+ addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
X86::ESP, ArgOffset).addReg(R);
break;
}
case cInt:
- addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
+ addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
X86::ESP, ArgOffset).addReg(ArgReg);
break;
case cLong:
- addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
+ addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
X86::ESP, ArgOffset).addReg(ArgReg);
- addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
+ addRegOffset(BuildMI(BB, X86::MOV32mr, 5),
X86::ESP, ArgOffset+4).addReg(ArgReg+1);
ArgOffset += 4; // 8 byte entry, not 4.
break;
case cFP:
if (Args[i].Ty == Type::FloatTy) {
- addRegOffset(BuildMI(BB, X86::FSTm32, 5),
+ addRegOffset(BuildMI(BB, X86::FST32m, 5),
X86::ESP, ArgOffset).addReg(ArgReg);
} else {
assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!");
- addRegOffset(BuildMI(BB, X86::FSTm64, 5),
+ addRegOffset(BuildMI(BB, X86::FST64m, 5),
X86::ESP, ArgOffset).addReg(ArgReg);
ArgOffset += 4; // 8 byte entry, not 4.
}
@@ -1157,7 +1157,7 @@
// Integral results are in %eax, or the appropriate portion
// thereof.
static const unsigned regRegMove[] = {
- X86::MOVrr8, X86::MOVrr16, X86::MOVrr32
+ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr
};
static const unsigned AReg[] = { X86::AL, X86::AX, X86::EAX };
BuildMI(BB, regRegMove[DestClass], 1, Ret.Reg).addReg(AReg[DestClass]);
@@ -1167,8 +1167,8 @@
BuildMI(BB, X86::FpGETRESULT, 1, Ret.Reg);
break;
case cLong: // Long values are left in EDX:EAX
- BuildMI(BB, X86::MOVrr32, 1, Ret.Reg).addReg(X86::EAX);
- BuildMI(BB, X86::MOVrr32, 1, Ret.Reg+1).addReg(X86::EDX);
+ BuildMI(BB, X86::MOV32rr, 1, Ret.Reg).addReg(X86::EAX);
+ BuildMI(BB, X86::MOV32rr, 1, Ret.Reg+1).addReg(X86::EDX);
break;
default: assert(0 && "Unknown class!");
}
@@ -1190,7 +1190,7 @@
TheCall = BuildMI(X86::CALLpcrel32, 1).addGlobalAddress(F, true);
} else { // Emit an indirect call...
unsigned Reg = getReg(CI.getCalledValue());
- TheCall = BuildMI(X86::CALLr32, 1).addReg(Reg);
+ TheCall = BuildMI(X86::CALL32r, 1).addReg(Reg);
}
std::vector<ValueRecord> Args;
@@ -1240,13 +1240,13 @@
case Intrinsic::va_start:
// Get the address of the first vararg value...
TmpReg1 = getReg(CI);
- addFrameReference(BuildMI(BB, X86::LEAr32, 5, TmpReg1), VarArgsFrameIndex);
+ addFrameReference(BuildMI(BB, X86::LEA32r, 5, TmpReg1), VarArgsFrameIndex);
return;
case Intrinsic::va_copy:
TmpReg1 = getReg(CI);
TmpReg2 = getReg(CI.getOperand(1));
- BuildMI(BB, X86::MOVrr32, 1, TmpReg1).addReg(TmpReg2);
+ BuildMI(BB, X86::MOV32rr, 1, TmpReg1).addReg(TmpReg2);
return;
case Intrinsic::va_end: return; // Noop on X86
@@ -1256,15 +1256,15 @@
if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
if (ID == Intrinsic::returnaddress) {
// Just load the return address
- addFrameReference(BuildMI(BB, X86::MOVrm32, 4, TmpReg1),
+ addFrameReference(BuildMI(BB, X86::MOV32rm, 4, TmpReg1),
ReturnAddressIndex);
} else {
- addFrameReference(BuildMI(BB, X86::LEAr32, 4, TmpReg1),
+ addFrameReference(BuildMI(BB, X86::LEA32r, 4, TmpReg1),
ReturnAddressIndex, -4);
}
} else {
// Values other than zero are not implemented yet.
- BuildMI(BB, X86::MOVri32, 1, TmpReg1).addImm(0);
+ BuildMI(BB, X86::MOV32ri, 1, TmpReg1).addImm(0);
}
return;
@@ -1286,7 +1286,7 @@
} else {
CountReg = makeAnotherReg(Type::IntTy);
unsigned ByteReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addImm(1);
+ BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
}
Opcode = X86::REP_MOVSW;
break;
@@ -1296,7 +1296,7 @@
} else {
CountReg = makeAnotherReg(Type::IntTy);
unsigned ByteReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addImm(2);
+ BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
}
Opcode = X86::REP_MOVSD;
break;
@@ -1310,9 +1310,9 @@
// destination in EDI, and the count in ECX.
TmpReg1 = getReg(CI.getOperand(1));
TmpReg2 = getReg(CI.getOperand(2));
- BuildMI(BB, X86::MOVrr32, 1, X86::ECX).addReg(CountReg);
- BuildMI(BB, X86::MOVrr32, 1, X86::EDI).addReg(TmpReg1);
- BuildMI(BB, X86::MOVrr32, 1, X86::ESI).addReg(TmpReg2);
+ BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
+ BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
+ BuildMI(BB, X86::MOV32rr, 1, X86::ESI).addReg(TmpReg2);
BuildMI(BB, Opcode, 0);
return;
}
@@ -1338,9 +1338,9 @@
} else {
CountReg = makeAnotherReg(Type::IntTy);
unsigned ByteReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addImm(1);
+ BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(1);
}
- BuildMI(BB, X86::MOVri16, 1, X86::AX).addImm((Val << 8) | Val);
+ BuildMI(BB, X86::MOV16ri, 1, X86::AX).addImm((Val << 8) | Val);
Opcode = X86::REP_STOSW;
break;
case 0: // DWORD aligned
@@ -1349,15 +1349,15 @@
} else {
CountReg = makeAnotherReg(Type::IntTy);
unsigned ByteReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addImm(2);
+ BuildMI(BB, X86::SHR32ri, 2, CountReg).addReg(ByteReg).addImm(2);
}
Val = (Val << 8) | Val;
- BuildMI(BB, X86::MOVri32, 1, X86::EAX).addImm((Val << 16) | Val);
+ BuildMI(BB, X86::MOV32ri, 1, X86::EAX).addImm((Val << 16) | Val);
Opcode = X86::REP_STOSD;
break;
default: // BYTE aligned
CountReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::MOVri8, 1, X86::AL).addImm(Val);
+ BuildMI(BB, X86::MOV8ri, 1, X86::AL).addImm(Val);
Opcode = X86::REP_STOSB;
break;
}
@@ -1365,7 +1365,7 @@
// If it's not a constant value we are storing, just fall back. We could
// try to be clever to form 16 bit and 32 bit values, but we don't yet.
unsigned ValReg = getReg(CI.getOperand(2));
- BuildMI(BB, X86::MOVrr8, 1, X86::AL).addReg(ValReg);
+ BuildMI(BB, X86::MOV8rr, 1, X86::AL).addReg(ValReg);
CountReg = getReg(CI.getOperand(3));
Opcode = X86::REP_STOSB;
}
@@ -1374,8 +1374,8 @@
// destination in EDI, and the count in ECX.
TmpReg1 = getReg(CI.getOperand(1));
//TmpReg2 = getReg(CI.getOperand(2));
- BuildMI(BB, X86::MOVrr32, 1, X86::ECX).addReg(CountReg);
- BuildMI(BB, X86::MOVrr32, 1, X86::EDI).addReg(TmpReg1);
+ BuildMI(BB, X86::MOV32rr, 1, X86::ECX).addReg(CountReg);
+ BuildMI(BB, X86::MOV32rr, 1, X86::EDI).addReg(TmpReg1);
BuildMI(BB, Opcode, 0);
return;
}
@@ -1416,13 +1416,13 @@
switch (Class) {
default: assert(0 && "Unknown class for this function!");
case cByte:
- BuildMI(*MBB, IP, X86::NEGr8, 1, DestReg).addReg(op1Reg);
+ BuildMI(*MBB, IP, X86::NEG8r, 1, DestReg).addReg(op1Reg);
return;
case cShort:
- BuildMI(*MBB, IP, X86::NEGr16, 1, DestReg).addReg(op1Reg);
+ BuildMI(*MBB, IP, X86::NEG16r, 1, DestReg).addReg(op1Reg);
return;
case cInt:
- BuildMI(*MBB, IP, X86::NEGr32, 1, DestReg).addReg(op1Reg);
+ BuildMI(*MBB, IP, X86::NEG32r, 1, DestReg).addReg(op1Reg);
return;
}
}
@@ -1437,13 +1437,13 @@
if (!isa<ConstantInt>(Op1) || Class == cLong) {
static const unsigned OpcodeTab[][4] = {
// Arithmetic operators
- { X86::ADDrr8, X86::ADDrr16, X86::ADDrr32, X86::FpADD }, // ADD
- { X86::SUBrr8, X86::SUBrr16, X86::SUBrr32, X86::FpSUB }, // SUB
+ { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD }, // ADD
+ { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB }, // SUB
// Bitwise operators
- { X86::ANDrr8, X86::ANDrr16, X86::ANDrr32, 0 }, // AND
- { X86:: ORrr8, X86:: ORrr16, X86:: ORrr32, 0 }, // OR
- { X86::XORrr8, X86::XORrr16, X86::XORrr32, 0 }, // XOR
+ { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0 }, // AND
+ { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0 }, // OR
+ { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0 }, // XOR
};
bool isLong = false;
@@ -1460,7 +1460,7 @@
if (isLong) { // Handle the upper 32 bits of long values...
static const unsigned TopTab[] = {
- X86::ADCrr32, X86::SBBrr32, X86::ANDrr32, X86::ORrr32, X86::XORrr32
+ X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
};
BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
@@ -1474,34 +1474,34 @@
// xor X, -1 -> not X
if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
- static unsigned const NOTTab[] = { X86::NOTr8, X86::NOTr16, X86::NOTr32 };
+ static unsigned const NOTTab[] = { X86::NOT8r, X86::NOT16r, X86::NOT32r };
BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
return;
}
// add X, -1 -> dec X
if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
- static unsigned const DECTab[] = { X86::DECr8, X86::DECr16, X86::DECr32 };
+ static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
return;
}
// add X, 1 -> inc X
if (OperatorClass == 0 && Op1C->equalsInt(1)) {
- static unsigned const DECTab[] = { X86::INCr8, X86::INCr16, X86::INCr32 };
+ static unsigned const DECTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
return;
}
static const unsigned OpcodeTab[][3] = {
// Arithmetic operators
- { X86::ADDri8, X86::ADDri16, X86::ADDri32 }, // ADD
- { X86::SUBri8, X86::SUBri16, X86::SUBri32 }, // SUB
+ { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri }, // ADD
+ { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri }, // SUB
// Bitwise operators
- { X86::ANDri8, X86::ANDri16, X86::ANDri32 }, // AND
- { X86:: ORri8, X86:: ORri16, X86:: ORri32 }, // OR
- { X86::XORri8, X86::XORri16, X86::XORri32 }, // XOR
+ { X86::AND8ri, X86::AND16ri, X86::AND32ri }, // AND
+ { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri }, // OR
+ { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri }, // XOR
};
assert(Class < 3 && "General code handles 64-bit integer types!");
@@ -1527,14 +1527,14 @@
return;
case cInt:
case cShort:
- BuildMI(*MBB, MBBI, Class == cInt ? X86::IMULrr32:X86::IMULrr16, 2, DestReg)
+ BuildMI(*MBB, MBBI, Class == cInt ? X86::IMUL32rr:X86::IMUL16rr, 2, DestReg)
.addReg(op0Reg).addReg(op1Reg);
return;
case cByte:
// Must use the MUL instruction, which forces use of AL...
- BuildMI(*MBB, MBBI, X86::MOVrr8, 1, X86::AL).addReg(op0Reg);
- BuildMI(*MBB, MBBI, X86::MULr8, 1).addReg(op1Reg);
- BuildMI(*MBB, MBBI, X86::MOVrr8, 1, DestReg).addReg(X86::AL);
+ BuildMI(*MBB, MBBI, X86::MOV8rr, 1, X86::AL).addReg(op0Reg);
+ BuildMI(*MBB, MBBI, X86::MUL8r, 1).addReg(op1Reg);
+ BuildMI(*MBB, MBBI, X86::MOV8rr, 1, DestReg).addReg(X86::AL);
return;
default:
case cLong: assert(0 && "doMultiply cannot operate on LONG values!");
@@ -1565,28 +1565,28 @@
switch (Class) {
default: assert(0 && "Unknown class for this function!");
case cByte:
- BuildMI(*MBB, IP, X86::SHLri32,2, DestReg).addReg(op0Reg).addImm(Shift-1);
+ BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
case cShort:
- BuildMI(*MBB, IP, X86::SHLri32,2, DestReg).addReg(op0Reg).addImm(Shift-1);
+ BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
case cInt:
- BuildMI(*MBB, IP, X86::SHLri32,2, DestReg).addReg(op0Reg).addImm(Shift-1);
+ BuildMI(*MBB, IP, X86::SHL32ri,2, DestReg).addReg(op0Reg).addImm(Shift-1);
return;
}
}
if (Class == cShort) {
- BuildMI(*MBB, IP, X86::IMULrri16,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
+ BuildMI(*MBB, IP, X86::IMUL16rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
return;
} else if (Class == cInt) {
- BuildMI(*MBB, IP, X86::IMULrri32,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
+ BuildMI(*MBB, IP, X86::IMUL32rri,2,DestReg).addReg(op0Reg).addImm(ConstRHS);
return;
}
// Most general case, emit a normal multiply...
static const unsigned MOVriTab[] = {
- X86::MOVri8, X86::MOVri16, X86::MOVri32
+ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri
};
unsigned TmpReg = makeAnotherReg(DestTy);
@@ -1620,26 +1620,26 @@
// Long value. We have to do things the hard way...
// Multiply the two low parts... capturing carry into EDX
- BuildMI(BB, X86::MOVrr32, 1, X86::EAX).addReg(Op0Reg);
- BuildMI(BB, X86::MULr32, 1).addReg(Op1Reg); // AL*BL
+ BuildMI(BB, X86::MOV32rr, 1, X86::EAX).addReg(Op0Reg);
+ BuildMI(BB, X86::MUL32r, 1).addReg(Op1Reg); // AL*BL
unsigned OverflowReg = makeAnotherReg(Type::UIntTy);
- BuildMI(BB, X86::MOVrr32, 1, DestReg).addReg(X86::EAX); // AL*BL
- BuildMI(BB, X86::MOVrr32, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32
+ BuildMI(BB, X86::MOV32rr, 1, DestReg).addReg(X86::EAX); // AL*BL
+ BuildMI(BB, X86::MOV32rr, 1, OverflowReg).addReg(X86::EDX); // AL*BL >> 32
MachineBasicBlock::iterator MBBI = BB->end();
unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL
- BuildMI(*BB, MBBI, X86::IMULrr32,2,AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
+ BuildMI(*BB, MBBI, X86::IMUL32rr,2,AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg);
unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy);
- BuildMI(*BB, MBBI, X86::ADDrr32, 2, // AH*BL+(AL*BL >> 32)
+ BuildMI(*BB, MBBI, X86::ADD32rr, 2, // AH*BL+(AL*BL >> 32)
AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg);
MBBI = BB->end();
unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH
- BuildMI(*BB, MBBI, X86::IMULrr32,2,ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
+ BuildMI(*BB, MBBI, X86::IMUL32rr,2,ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1);
- BuildMI(*BB, MBBI, X86::ADDrr32, 2, // AL*BH + AH*BL + (AL*BL >> 32)
+ BuildMI(*BB, MBBI, X86::ADD32rr, 2, // AL*BH + AH*BL + (AL*BL >> 32)
DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg);
}
}
@@ -1698,14 +1698,14 @@
}
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
- static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
- static const unsigned SarOpcode[]={ X86::SARri8, X86::SARri16, X86::SARri32 };
- static const unsigned ClrOpcode[]={ X86::MOVri8, X86::MOVri16, X86::MOVri32 };
+ static const unsigned MovOpcode[]={ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr };
+ static const unsigned SarOpcode[]={ X86::SAR8ri, X86::SAR16ri, X86::SAR32ri };
+ static const unsigned ClrOpcode[]={ X86::MOV8ri, X86::MOV16ri, X86::MOV32ri };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
static const unsigned DivOpcode[][4] = {
- { X86::DIVr8 , X86::DIVr16 , X86::DIVr32 , 0 }, // Unsigned division
- { X86::IDIVr8, X86::IDIVr16, X86::IDIVr32, 0 }, // Signed division
+ { X86::DIV8r , X86::DIV16r , X86::DIV32r , 0 }, // Unsigned division
+ { X86::IDIV8r, X86::IDIV16r, X86::IDIV32r, 0 }, // Signed division
};
bool isSigned = Ty->isSigned();
@@ -1759,17 +1759,17 @@
unsigned Class = getClass (ResultTy);
static const unsigned ConstantOperand[][4] = {
- { X86::SHRri8, X86::SHRri16, X86::SHRri32, X86::SHRDrr32i8 }, // SHR
- { X86::SARri8, X86::SARri16, X86::SARri32, X86::SHRDrr32i8 }, // SAR
- { X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDrr32i8 }, // SHL
- { X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDrr32i8 }, // SAL = SHL
+ { X86::SHR8ri, X86::SHR16ri, X86::SHR32ri, X86::SHRD32rri8 }, // SHR
+ { X86::SAR8ri, X86::SAR16ri, X86::SAR32ri, X86::SHRD32rri8 }, // SAR
+ { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SHL
+ { X86::SHL8ri, X86::SHL16ri, X86::SHL32ri, X86::SHLD32rri8 }, // SAL = SHL
};
static const unsigned NonConstantOperand[][4] = {
- { X86::SHRrCL8, X86::SHRrCL16, X86::SHRrCL32 }, // SHR
- { X86::SARrCL8, X86::SARrCL16, X86::SARrCL32 }, // SAR
- { X86::SHLrCL8, X86::SHLrCL16, X86::SHLrCL32 }, // SHL
- { X86::SHLrCL8, X86::SHLrCL16, X86::SHLrCL32 }, // SAL = SHL
+ { X86::SHR8rCL, X86::SHR16rCL, X86::SHR32rCL }, // SHR
+ { X86::SAR8rCL, X86::SAR16rCL, X86::SAR32rCL }, // SAR
+ { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SHL
+ { X86::SHL8rCL, X86::SHL16rCL, X86::SHL32rCL }, // SAL = SHL
};
// Longs, as usual, are handled specially...
@@ -1793,14 +1793,14 @@
} else { // Shifting more than 32 bits
Amount -= 32;
if (isLeftShift) {
- BuildMI(*MBB, IP, X86::SHLri32, 2,
+ BuildMI(*MBB, IP, X86::SHL32ri, 2,
DestReg + 1).addReg(SrcReg).addImm(Amount);
- BuildMI(*MBB, IP, X86::MOVri32, 1,
+ BuildMI(*MBB, IP, X86::MOV32ri, 1,
DestReg).addImm(0);
} else {
- unsigned Opcode = isSigned ? X86::SARri32 : X86::SHRri32;
+ unsigned Opcode = isSigned ? X86::SAR32ri : X86::SHR32ri;
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addImm(Amount);
- BuildMI(*MBB, IP, X86::MOVri32, 1, DestReg+1).addImm(0);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
}
}
} else {
@@ -1810,52 +1810,52 @@
// If this is a SHR of a Long, then we need to do funny sign extension
// stuff. TmpReg gets the value to use as the high-part if we are
// shifting more than 32 bits.
- BuildMI(*MBB, IP, X86::SARri32, 2, TmpReg).addReg(SrcReg).addImm(31);
+ BuildMI(*MBB, IP, X86::SAR32ri, 2, TmpReg).addReg(SrcReg).addImm(31);
} else {
// Other shifts use a fixed zero value if the shift is more than 32
// bits.
- BuildMI(*MBB, IP, X86::MOVri32, 1, TmpReg).addImm(0);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, TmpReg).addImm(0);
}
// Initialize CL with the shift amount...
unsigned ShiftAmountReg = getReg(ShiftAmount, MBB, IP);
- BuildMI(*MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
unsigned TmpReg2 = makeAnotherReg(Type::IntTy);
unsigned TmpReg3 = makeAnotherReg(Type::IntTy);
if (isLeftShift) {
// TmpReg2 = shld inHi, inLo
- BuildMI(*MBB, IP, X86::SHLDrrCL32,2,TmpReg2).addReg(SrcReg+1)
+ BuildMI(*MBB, IP, X86::SHLD32rrCL,2,TmpReg2).addReg(SrcReg+1)
.addReg(SrcReg);
// TmpReg3 = shl inLo, CL
- BuildMI(*MBB, IP, X86::SHLrCL32, 1, TmpReg3).addReg(SrcReg);
+ BuildMI(*MBB, IP, X86::SHL32rCL, 1, TmpReg3).addReg(SrcReg);
// Set the flags to indicate whether the shift was by more than 32 bits.
- BuildMI(*MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addImm(32);
+ BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
// DestHi = (>32) ? TmpReg3 : TmpReg2;
- BuildMI(*MBB, IP, X86::CMOVNErr32, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg+1).addReg(TmpReg2).addReg(TmpReg3);
// DestLo = (>32) ? TmpReg : TmpReg3;
- BuildMI(*MBB, IP, X86::CMOVNErr32, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg).addReg(TmpReg3).addReg(TmpReg);
} else {
// TmpReg2 = shrd inLo, inHi
- BuildMI(*MBB, IP, X86::SHRDrrCL32,2,TmpReg2).addReg(SrcReg)
+ BuildMI(*MBB, IP, X86::SHRD32rrCL,2,TmpReg2).addReg(SrcReg)
.addReg(SrcReg+1);
// TmpReg3 = s[ah]r inHi, CL
- BuildMI(*MBB, IP, isSigned ? X86::SARrCL32 : X86::SHRrCL32, 1, TmpReg3)
+ BuildMI(*MBB, IP, isSigned ? X86::SAR32rCL : X86::SHR32rCL, 1, TmpReg3)
.addReg(SrcReg+1);
// Set the flags to indicate whether the shift was by more than 32 bits.
- BuildMI(*MBB, IP, X86::TESTri8, 2).addReg(X86::CL).addImm(32);
+ BuildMI(*MBB, IP, X86::TEST8ri, 2).addReg(X86::CL).addImm(32);
// DestLo = (>32) ? TmpReg3 : TmpReg2;
- BuildMI(*MBB, IP, X86::CMOVNErr32, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg).addReg(TmpReg2).addReg(TmpReg3);
// DestHi = (>32) ? TmpReg : TmpReg3;
- BuildMI(*MBB, IP, X86::CMOVNErr32, 2,
+ BuildMI(*MBB, IP, X86::CMOVNE32rr, 2,
DestReg+1).addReg(TmpReg3).addReg(TmpReg);
}
}
@@ -1871,7 +1871,7 @@
DestReg).addReg(SrcReg).addImm(CUI->getValue());
} else { // The shift amount is non-constant.
unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP);
- BuildMI(*MBB, IP, X86::MOVrr8, 1, X86::CL).addReg(ShiftAmountReg);
+ BuildMI(*MBB, IP, X86::MOV8rr, 1, X86::CL).addReg(ShiftAmountReg);
const unsigned *Opc = NonConstantOperand[isLeftShift*2+isSigned];
BuildMI(*MBB, IP, Opc[Class], 1, DestReg).addReg(SrcReg);
@@ -1906,18 +1906,18 @@
unsigned Class = getClassB(I.getType());
if (Class == cLong) {
- addFullAddress(BuildMI(BB, X86::MOVrm32, 4, DestReg),
+ addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
BaseReg, Scale, IndexReg, Disp);
- addFullAddress(BuildMI(BB, X86::MOVrm32, 4, DestReg+1),
+ addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
BaseReg, Scale, IndexReg, Disp+4);
return;
}
static const unsigned Opcodes[] = {
- X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FLDm32
+ X86::MOV8rm, X86::MOV16rm, X86::MOV32rm, X86::FLD32m
};
unsigned Opcode = Opcodes[Class];
- if (I.getType() == Type::DoubleTy) Opcode = X86::FLDm64;
+ if (I.getType() == Type::DoubleTy) Opcode = X86::FLD64m;
addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
BaseReg, Scale, IndexReg, Disp);
}
@@ -1951,35 +1951,35 @@
if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(0))) {
uint64_t Val = CI->getRawValue();
if (Class == cLong) {
- addFullAddress(BuildMI(BB, X86::MOVmi32, 5),
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
- addFullAddress(BuildMI(BB, X86::MOVmi32, 5),
+ addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
} else {
static const unsigned Opcodes[] = {
- X86::MOVmi8, X86::MOVmi16, X86::MOVmi32
+ X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
};
unsigned Opcode = Opcodes[Class];
addFullAddress(BuildMI(BB, Opcode, 5),
BaseReg, Scale, IndexReg, Disp).addImm(Val);
}
} else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
- addFullAddress(BuildMI(BB, X86::MOVmi8, 5),
+ addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
} else {
if (Class == cLong) {
unsigned ValReg = getReg(I.getOperand(0));
- addFullAddress(BuildMI(BB, X86::MOVmr32, 5),
+ addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
- addFullAddress(BuildMI(BB, X86::MOVmr32, 5),
+ addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
} else {
unsigned ValReg = getReg(I.getOperand(0));
static const unsigned Opcodes[] = {
- X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FSTm32
+ X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
};
unsigned Opcode = Opcodes[Class];
- if (ValTy == Type::DoubleTy) Opcode = X86::FSTm64;
+ if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
addFullAddress(BuildMI(BB, Opcode, 1+4),
BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
}
@@ -2028,22 +2028,22 @@
if (DestTy == Type::BoolTy) {
switch (SrcClass) {
case cByte:
- BuildMI(*BB, IP, X86::TESTrr8, 2).addReg(SrcReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::TEST8rr, 2).addReg(SrcReg).addReg(SrcReg);
break;
case cShort:
- BuildMI(*BB, IP, X86::TESTrr16, 2).addReg(SrcReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::TEST16rr, 2).addReg(SrcReg).addReg(SrcReg);
break;
case cInt:
- BuildMI(*BB, IP, X86::TESTrr32, 2).addReg(SrcReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg).addReg(SrcReg);
break;
case cLong: {
unsigned TmpReg = makeAnotherReg(Type::IntTy);
- BuildMI(*BB, IP, X86::ORrr32, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
+ BuildMI(*BB, IP, X86::OR32rr, 2, TmpReg).addReg(SrcReg).addReg(SrcReg+1);
break;
}
case cFP:
BuildMI(*BB, IP, X86::FTST, 1).addReg(SrcReg);
- BuildMI(*BB, IP, X86::FNSTSWr8, 0);
+ BuildMI(*BB, IP, X86::FNSTSW8r, 0);
BuildMI(*BB, IP, X86::SAHF, 1);
break;
}
@@ -2055,7 +2055,7 @@
}
static const unsigned RegRegMove[] = {
- X86::MOVrr8, X86::MOVrr16, X86::MOVrr32, X86::FpMOV, X86::MOVrr32
+ X86::MOV8rr, X86::MOV16rr, X86::MOV32rr, X86::FpMOV, X86::MOV32rr
};
// Implement casts between values of the same type class (as determined by
@@ -2074,12 +2074,12 @@
// reading it back.
unsigned FltAlign = TM.getTargetData().getFloatAlignment();
int FrameIdx = F->getFrameInfo()->CreateStackObject(4, FltAlign);
- addFrameReference(BuildMI(*BB, IP, X86::FSTm32, 5), FrameIdx).addReg(SrcReg);
- addFrameReference(BuildMI(*BB, IP, X86::FLDm32, 5, DestReg), FrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::FST32m, 5), FrameIdx).addReg(SrcReg);
+ addFrameReference(BuildMI(*BB, IP, X86::FLD32m, 5, DestReg), FrameIdx);
}
} else if (SrcClass == cLong) {
- BuildMI(*BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
- BuildMI(*BB, IP, X86::MOVrr32, 1, DestReg+1).addReg(SrcReg+1);
+ BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg+1).addReg(SrcReg+1);
} else {
assert(0 && "Cannot handle this type of cast instruction!");
abort();
@@ -2095,8 +2095,8 @@
if (isLong) DestClass = cInt;
static const unsigned Opc[][4] = {
- { X86::MOVSXr16r8, X86::MOVSXr32r8, X86::MOVSXr32r16, X86::MOVrr32 }, // s
- { X86::MOVZXr16r8, X86::MOVZXr32r8, X86::MOVZXr32r16, X86::MOVrr32 } // u
+ { X86::MOVSX16rr8, X86::MOVSX32rr8, X86::MOVSX32rr16, X86::MOV32rr }, // s
+ { X86::MOVZX16rr8, X86::MOVZX32rr8, X86::MOVZX32rr16, X86::MOV32rr } // u
};
bool isUnsigned = SrcTy->isUnsigned();
@@ -2105,16 +2105,16 @@
if (isLong) { // Handle upper 32 bits as appropriate...
if (isUnsigned) // Zero out top bits...
- BuildMI(*BB, IP, X86::MOVri32, 1, DestReg+1).addImm(0);
+ BuildMI(*BB, IP, X86::MOV32ri, 1, DestReg+1).addImm(0);
else // Sign extend bottom half...
- BuildMI(*BB, IP, X86::SARri32, 2, DestReg+1).addReg(DestReg).addImm(31);
+ BuildMI(*BB, IP, X86::SAR32ri, 2, DestReg+1).addReg(DestReg).addImm(31);
}
return;
}
// Special case long -> int ...
if (SrcClass == cLong && DestClass == cInt) {
- BuildMI(*BB, IP, X86::MOVrr32, 1, DestReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::MOV32rr, 1, DestReg).addReg(SrcReg);
return;
}
@@ -2143,21 +2143,21 @@
// We don't have the facilities for directly loading byte sized data from
// memory (even signed). Promote it to 16 bits.
PromoteType = Type::ShortTy;
- PromoteOpcode = X86::MOVSXr16r8;
+ PromoteOpcode = X86::MOVSX16rr8;
break;
case Type::UByteTyID:
PromoteType = Type::ShortTy;
- PromoteOpcode = X86::MOVZXr16r8;
+ PromoteOpcode = X86::MOVZX16rr8;
break;
case Type::UShortTyID:
PromoteType = Type::IntTy;
- PromoteOpcode = X86::MOVZXr32r16;
+ PromoteOpcode = X86::MOVZX32rr16;
break;
case Type::UIntTyID: {
// Make a 64 bit temporary... and zero out the top of it...
unsigned TmpReg = makeAnotherReg(Type::LongTy);
- BuildMI(*BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg);
- BuildMI(*BB, IP, X86::MOVri32, 1, TmpReg+1).addImm(0);
+ BuildMI(*BB, IP, X86::MOV32rr, 1, TmpReg).addReg(SrcReg);
+ BuildMI(*BB, IP, X86::MOV32ri, 1, TmpReg+1).addImm(0);
SrcTy = Type::LongTy;
SrcClass = cLong;
SrcReg = TmpReg;
@@ -2173,7 +2173,7 @@
if (PromoteType) {
unsigned TmpReg = makeAnotherReg(PromoteType);
- unsigned Opc = SrcTy->isSigned() ? X86::MOVSXr16r8 : X86::MOVZXr16r8;
+ unsigned Opc = SrcTy->isSigned() ? X86::MOVSX16rr8 : X86::MOVZX16rr8;
BuildMI(*BB, IP, Opc, 1, TmpReg).addReg(SrcReg);
SrcTy = PromoteType;
SrcClass = getClass(PromoteType);
@@ -2185,18 +2185,18 @@
F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
if (SrcClass == cLong) {
- addFrameReference(BuildMI(*BB, IP, X86::MOVmr32, 5),
+ addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
FrameIdx).addReg(SrcReg);
- addFrameReference(BuildMI(*BB, IP, X86::MOVmr32, 5),
+ addFrameReference(BuildMI(*BB, IP, X86::MOV32mr, 5),
FrameIdx, 4).addReg(SrcReg+1);
} else {
- static const unsigned Op1[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
+ static const unsigned Op1[] = { X86::MOV8mr, X86::MOV16mr, X86::MOV32mr };
addFrameReference(BuildMI(*BB, IP, Op1[SrcClass], 5),
FrameIdx).addReg(SrcReg);
}
static const unsigned Op2[] =
- { 0/*byte*/, X86::FILDm16, X86::FILDm32, 0/*FP*/, X86::FILDm64 };
+ { 0/*byte*/, X86::FILD16m, X86::FILD32m, 0/*FP*/, X86::FILD64m };
addFrameReference(BuildMI(*BB, IP, Op2[SrcClass], 5, DestReg), FrameIdx);
// We need special handling for unsigned 64-bit integer sources. If the
@@ -2204,27 +2204,27 @@
// negative 64-bit number. In this case, add an offset value.
if (SrcTy == Type::ULongTy) {
// Emit a test instruction to see if the dynamic input value was signed.
- BuildMI(*BB, IP, X86::TESTrr32, 2).addReg(SrcReg+1).addReg(SrcReg+1);
+ BuildMI(*BB, IP, X86::TEST32rr, 2).addReg(SrcReg+1).addReg(SrcReg+1);
// If the sign bit is set, get a pointer to an offset, otherwise get a
// pointer to a zero.
MachineConstantPool *CP = F->getConstantPool();
unsigned Zero = makeAnotherReg(Type::IntTy);
Constant *Null = Constant::getNullValue(Type::UIntTy);
- addConstantPoolReference(BuildMI(*BB, IP, X86::LEAr32, 5, Zero),
+ addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Zero),
CP->getConstantPoolIndex(Null));
unsigned Offset = makeAnotherReg(Type::IntTy);
Constant *OffsetCst = ConstantUInt::get(Type::UIntTy, 0x5f800000);
- addConstantPoolReference(BuildMI(*BB, IP, X86::LEAr32, 5, Offset),
+ addConstantPoolReference(BuildMI(*BB, IP, X86::LEA32r, 5, Offset),
CP->getConstantPoolIndex(OffsetCst));
unsigned Addr = makeAnotherReg(Type::IntTy);
- BuildMI(*BB, IP, X86::CMOVSrr32, 2, Addr).addReg(Zero).addReg(Offset);
+ BuildMI(*BB, IP, X86::CMOVS32rr, 2, Addr).addReg(Zero).addReg(Offset);
// Load the constant for an add. FIXME: this could make an 'fadd' that
// reads directly from memory, but we don't support these yet.
unsigned ConstReg = makeAnotherReg(Type::DoubleTy);
- addDirectMem(BuildMI(*BB, IP, X86::FLDm32, 4, ConstReg), Addr);
+ addDirectMem(BuildMI(*BB, IP, X86::FLD32m, 4, ConstReg), Addr);
BuildMI(*BB, IP, X86::FpADD, 2, RealDestReg)
.addReg(ConstReg).addReg(DestReg);
@@ -2239,22 +2239,22 @@
// mode when truncating to an integer value.
//
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
- addFrameReference(BuildMI(*BB, IP, X86::FNSTCWm16, 4), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::FNSTCW16m, 4), CWFrameIdx);
// Load the old value of the high byte of the control word...
unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
- addFrameReference(BuildMI(*BB, IP, X86::MOVrm8, 4, HighPartOfCW),
+ addFrameReference(BuildMI(*BB, IP, X86::MOV8rm, 4, HighPartOfCW),
CWFrameIdx, 1);
// Set the high part to be round to zero...
- addFrameReference(BuildMI(*BB, IP, X86::MOVmi8, 5),
+ addFrameReference(BuildMI(*BB, IP, X86::MOV8mi, 5),
CWFrameIdx, 1).addImm(12);
// Reload the modified control word now...
- addFrameReference(BuildMI(*BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
// Restore the memory image of control word to original value
- addFrameReference(BuildMI(*BB, IP, X86::MOVmr8, 5),
+ addFrameReference(BuildMI(*BB, IP, X86::MOV8mr, 5),
CWFrameIdx, 1).addReg(HighPartOfCW);
// We don't have the facilities for directly storing byte sized data to
@@ -2279,21 +2279,21 @@
F->getFrameInfo()->CreateStackObject(StoreTy, TM.getTargetData());
static const unsigned Op1[] =
- { 0, X86::FISTm16, X86::FISTm32, 0, X86::FISTPm64 };
+ { 0, X86::FIST16m, X86::FIST32m, 0, X86::FISTP64m };
addFrameReference(BuildMI(*BB, IP, Op1[StoreClass], 5),
FrameIdx).addReg(SrcReg);
if (DestClass == cLong) {
- addFrameReference(BuildMI(*BB, IP, X86::MOVrm32, 4, DestReg), FrameIdx);
- addFrameReference(BuildMI(*BB, IP, X86::MOVrm32, 4, DestReg+1),
+ addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg), FrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::MOV32rm, 4, DestReg+1),
FrameIdx, 4);
} else {
- static const unsigned Op2[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
+ static const unsigned Op2[] = { X86::MOV8rm, X86::MOV16rm, X86::MOV32rm };
addFrameReference(BuildMI(*BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
}
// Reload the original control word now...
- addFrameReference(BuildMI(*BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, IP, X86::FLDCW16m, 4), CWFrameIdx);
return;
}
@@ -2327,7 +2327,7 @@
}
// Increment the VAList pointer...
- BuildMI(BB, X86::ADDri32, 2, DestReg).addReg(VAList).addImm(Size);
+ BuildMI(BB, X86::ADD32ri, 2, DestReg).addReg(VAList).addImm(Size);
}
void ISel::visitVAArgInst(VAArgInst &I) {
@@ -2342,15 +2342,15 @@
case Type::PointerTyID:
case Type::UIntTyID:
case Type::IntTyID:
- addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
+ addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
break;
case Type::ULongTyID:
case Type::LongTyID:
- addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
- addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), VAList, 4);
+ addDirectMem(BuildMI(BB, X86::MOV32rm, 4, DestReg), VAList);
+ addRegOffset(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), VAList, 4);
break;
case Type::DoubleTyID:
- addDirectMem(BuildMI(BB, X86::FLDm64, 4, DestReg), VAList);
+ addDirectMem(BuildMI(BB, X86::FLD64m, 4, DestReg), VAList);
break;
}
}
@@ -2532,9 +2532,9 @@
}
if (IndexReg == 0 && Disp == 0)
- BuildMI(*MBB, IP, X86::MOVrr32, 1, TargetReg).addReg(BaseReg);
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
else
- addFullAddress(BuildMI(*MBB, IP, X86::LEAr32, 5, TargetReg),
+ addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
BaseReg, Scale, IndexReg, Disp);
--IP;
TargetReg = NextTarget;
@@ -2543,10 +2543,10 @@
// all operands are consumed but the base pointer. If so, just load it
// into the register.
if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps[0])) {
- BuildMI(*MBB, IP, X86::MOVri32, 1, TargetReg).addGlobalAddress(GV);
+ BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(GV);
} else {
unsigned BaseReg = getReg(GEPOps[0], MBB, IP);
- BuildMI(*MBB, IP, X86::MOVrr32, 1, TargetReg).addReg(BaseReg);
+ BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
}
break; // we are now done
@@ -2580,7 +2580,7 @@
if (!CSI->isNullValue()) {
unsigned Offset = elementSize*CSI->getValue();
unsigned Reg = makeAnotherReg(Type::UIntTy);
- BuildMI(*MBB, IP, X86::ADDri32, 2, TargetReg)
+ BuildMI(*MBB, IP, X86::ADD32ri, 2, TargetReg)
.addReg(Reg).addImm(Offset);
--IP; // Insert the next instruction before this one.
TargetReg = Reg; // Codegen the rest of the GEP into this
@@ -2589,7 +2589,7 @@
// If the element size is 1, we don't have to multiply, just add
unsigned idxReg = getReg(idx, MBB, IP);
unsigned Reg = makeAnotherReg(Type::UIntTy);
- BuildMI(*MBB, IP, X86::ADDrr32, 2,TargetReg).addReg(Reg).addReg(idxReg);
+ BuildMI(*MBB, IP, X86::ADD32rr, 2,TargetReg).addReg(Reg).addReg(idxReg);
--IP; // Insert the next instruction before this one.
TargetReg = Reg; // Codegen the rest of the GEP into this
} else {
@@ -2607,7 +2607,7 @@
// Emit an ADD to add OffsetReg to the basePtr.
unsigned Reg = makeAnotherReg(Type::UIntTy);
- BuildMI(*MBB, IP, X86::ADDrr32, 2, TargetReg)
+ BuildMI(*MBB, IP, X86::ADD32rr, 2, TargetReg)
.addReg(Reg).addReg(OffsetReg);
// Step to the first instruction of the multiply.
@@ -2641,7 +2641,7 @@
// Create a new stack object using the frame manager...
int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment);
- addFrameReference(BuildMI(BB, X86::LEAr32, 5, getReg(I)), FrameIdx);
+ addFrameReference(BuildMI(BB, X86::LEA32r, 5, getReg(I)), FrameIdx);
return;
}
}
@@ -2657,18 +2657,18 @@
// AddedSize = add <TotalSizeReg>, 15
unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy);
- BuildMI(BB, X86::ADDri32, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
+ BuildMI(BB, X86::ADD32ri, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15);
// AlignedSize = and <AddedSize>, ~15
unsigned AlignedSize = makeAnotherReg(Type::UIntTy);
- BuildMI(BB, X86::ANDri32, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
+ BuildMI(BB, X86::AND32ri, 2, AlignedSize).addReg(AddedSizeReg).addImm(~15);
// Subtract size from stack pointer, thereby allocating some space.
- BuildMI(BB, X86::SUBrr32, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
+ BuildMI(BB, X86::SUB32rr, 2, X86::ESP).addReg(X86::ESP).addReg(AlignedSize);
// Put a pointer to the space into the result register, by copying
// the stack pointer.
- BuildMI(BB, X86::MOVrr32, 1, getReg(I)).addReg(X86::ESP);
+ BuildMI(BB, X86::MOV32rr, 1, getReg(I)).addReg(X86::ESP);
// Inform the Frame Information that we have just allocated a variable-sized
// object.
Index: llvm/lib/Target/X86/FloatingPoint.cpp
diff -u llvm/lib/Target/X86/FloatingPoint.cpp:1.24 llvm/lib/Target/X86/FloatingPoint.cpp:1.25
--- llvm/lib/Target/X86/FloatingPoint.cpp:1.24 Sat Feb 28 17:42:35 2004
+++ llvm/lib/Target/X86/FloatingPoint.cpp Sun Feb 29 02:50:03 2004
@@ -321,13 +321,13 @@
{ X86::FDIVRrST0, X86::FDIVRPrST0 },
{ X86::FDIVrST0 , X86::FDIVPrST0 },
- { X86::FISTm16 , X86::FISTPm16 },
- { X86::FISTm32 , X86::FISTPm32 },
+ { X86::FIST16m , X86::FISTP16m },
+ { X86::FIST32m , X86::FISTP32m },
{ X86::FMULrST0 , X86::FMULPrST0 },
- { X86::FSTm32 , X86::FSTPm32 },
- { X86::FSTm64 , X86::FSTPm64 },
+ { X86::FST32m , X86::FSTP32m },
+ { X86::FST64m , X86::FSTP64m },
{ X86::FSTrr , X86::FSTPrr },
{ X86::FSUBRrST0, X86::FSUBRPrST0 },
@@ -398,20 +398,20 @@
E = LV->killed_end(MI); KI != E; ++KI)
KillsSrc |= KI->second == X86::FP0+Reg;
- // FSTPr80 and FISTPr64 are strange because there are no non-popping versions.
+ // FSTP80r and FISTP64r are strange because there are no non-popping versions.
// If we have one _and_ we don't want to pop the operand, duplicate the value
// on the stack instead of moving it. This ensure that popping the value is
// always ok.
//
- if ((MI->getOpcode() == X86::FSTPm80 ||
- MI->getOpcode() == X86::FISTPm64) && !KillsSrc) {
+ if ((MI->getOpcode() == X86::FSTP80m ||
+ MI->getOpcode() == X86::FISTP64m) && !KillsSrc) {
duplicateToTop(Reg, 7 /*temp register*/, I);
} else {
moveToTop(Reg, I); // Move to the top of the stack...
}
MI->RemoveOperand(MI->getNumOperands()-1); // Remove explicit ST(0) operand
- if (MI->getOpcode() == X86::FSTPm80 || MI->getOpcode() == X86::FISTPm64) {
+ if (MI->getOpcode() == X86::FSTP80m || MI->getOpcode() == X86::FISTP64m) {
assert(StackTop > 0 && "Stack empty??");
--StackTop;
} else if (KillsSrc) { // Last use of operand?
More information about the llvm-commits
mailing list