[llvm-commits] CVS: llvm/lib/Target/X86/X86InstrInfo.td X86InstrInfo.h X86CodeEmitter.cpp X86.td Printer.cpp

Alkis Evlogimenos alkis at cs.uiuc.edu
Sat Feb 28 16:03:02 PST 2004


Changes in directory llvm/lib/Target/X86:

X86InstrInfo.td updated: 1.51 -> 1.52
X86InstrInfo.h updated: 1.33 -> 1.34
X86CodeEmitter.cpp updated: 1.54 -> 1.55
X86.td updated: 1.7 -> 1.8
Printer.cpp updated: 1.87 -> 1.88

---
Log message:

Each instruction now has both an ImmType and a MemType. This describes
the size of the immediate and the memory operand on instructions that
use them. This resolves problems with instructions that take both a
memory and an immediate operand but their sizes differ (i.e. ADDmi32b).


---
Diffs of the changes:  (+533 -489)

Index: llvm/lib/Target/X86/X86InstrInfo.td
diff -u llvm/lib/Target/X86/X86InstrInfo.td:1.51 llvm/lib/Target/X86/X86InstrInfo.td:1.52
--- llvm/lib/Target/X86/X86InstrInfo.td:1.51	Sat Feb 28 00:01:43 2004
+++ llvm/lib/Target/X86/X86InstrInfo.td	Sat Feb 28 16:02:05 2004
@@ -31,20 +31,30 @@
 def MRM3m  : Format<27>; def MRM4m  : Format<28>; def MRM5m  : Format<29>;
 def MRM6m  : Format<30>; def MRM7m  : Format<31>;
 
-// ArgType - This specifies the argument type used by an instruction. This is
+// ImmType - This specifies the immediate type used by an instruction. This is
 // part of the ad-hoc solution used to emit machine instruction encodings by our
 // machine code emitter.
-class ArgType<bits<3> val> {
+class ImmType<bits<2> val> {
+  bits<2> Value = val;
+}
+def NoImm  : ImmType<0>;
+def Imm8   : ImmType<1>;
+def Imm16  : ImmType<2>;
+def Imm32  : ImmType<3>;
+
+// MemType - This specifies the immediate type used by an instruction. This is
+// part of the ad-hoc solution used to emit machine instruction encodings by our
+// machine code emitter.
+class MemType<bits<3> val> {
   bits<3> Value = val;
 }
-def NoArg  : ArgType<0>;
-def Arg8   : ArgType<1>;
-def Arg16  : ArgType<2>;
-def Arg32  : ArgType<3>;
-def Arg64  : ArgType<4>;   // 64 bit int argument for FILD64
-def ArgF32 : ArgType<5>;
-def ArgF64 : ArgType<6>;
-def ArgF80 : ArgType<6>;
+def NoMem  : MemType<0>;
+def Mem8   : MemType<1>;
+def Mem16  : MemType<2>;
+def Mem32  : MemType<3>;
+def Mem64  : MemType<4>;
+def Mem80  : MemType<4>;
+def Mem128 : MemType<6>;
 
 // FPFormat - This specifies what form this FP instruction has.  This is used by
 // the Floating-Point stackifier pass.
@@ -59,15 +69,17 @@
 def SpecialFP  : FPFormat<5>;
 
 
-class X86Inst<string nam, bits<8> opcod, Format f, ArgType a> : Instruction {
+class X86Inst<string nam, bits<8> opcod, Format f, MemType m, ImmType i> : Instruction {
   let Namespace = "X86";
 
   let Name = nam;
   bits<8> Opcode = opcod;
   Format Form = f;
   bits<5> FormBits = Form.Value;
-  ArgType Type = a;
-  bits<3> TypeBits = Type.Value;
+  MemType MemT = m;
+  bits<3> MemTypeBits = MemT.Value;
+  ImmType ImmT = i;
+  bits<2> ImmTypeBits = ImmT.Value;
 
   // Attributes specific to X86 instructions...
   bit hasOpSizePrefix = 0; // Does this inst have a 0x66 prefix?
@@ -103,33 +115,57 @@
 class DF     { bits<4> Prefix = 10; }
 
 
+//===----------------------------------------------------------------------===//
+// Instruction templates...
+
+class I<string n, bits<8> o, Format f> : X86Inst<n, o, f, NoMem, NoImm>;
+
+class IM<string n, bits<8> o, Format f, MemType m> : X86Inst<n, o, f, m, NoImm>;
+class IM8 <string n, bits<8> o, Format f> : IM<n, o, f, Mem8 >;
+class IM16<string n, bits<8> o, Format f> : IM<n, o, f, Mem16>;
+class IM32<string n, bits<8> o, Format f> : IM<n, o, f, Mem32>;
+
+class II<string n, bits<8> o, Format f, ImmType i> : X86Inst<n, o, f, NoMem, i>;
+class II8 <string n, bits<8> o, Format f> : II<n, o, f, Imm8 >;
+class II16<string n, bits<8> o, Format f> : II<n, o, f, Imm16>;
+class II32<string n, bits<8> o, Format f> : II<n, o, f, Imm32>;
+
+class I8MI <string n, bits<8> o, Format f> : X86Inst<n, o, f, Mem8 , Imm8 >;
+class I16MI<string n, bits<8> o, Format f> : X86Inst<n, o, f, Mem16, Imm16>;
+class I32MI<string n, bits<8> o, Format f> : X86Inst<n, o, f, Mem32, Imm32>;
+
+class IM16I8<string n, bits<8> o, Format f> : X86Inst<n, o, f, Mem16, Imm8>;
+class IM32I8<string n, bits<8> o, Format f> : X86Inst<n, o, f, Mem32, Imm8>;
+
+// Helper for shift instructions
+class UsesCL { list<Register> Uses = [CL]; bit printImplicitUses = 1; }
 
 //===----------------------------------------------------------------------===//
 // Instruction list...
 //
 
-def PHI : X86Inst<"PHI", 0, Pseudo, NoArg>;          // PHI node...
+def PHI : I<"PHI", 0, Pseudo>;          // PHI node...
 
-def NOOP : X86Inst<"nop", 0x90, RawFrm, NoArg>;    // nop
+def NOOP : I<"nop", 0x90, RawFrm>;    // nop
 
-def ADJCALLSTACKDOWN : X86Inst<"ADJCALLSTACKDOWN", 0, Pseudo, NoArg>;
-def ADJCALLSTACKUP   : X86Inst<"ADJCALLSTACKUP",   0, Pseudo, NoArg>;
-def IMPLICIT_USE     : X86Inst<"IMPLICIT_USE",     0, Pseudo, NoArg>;
-def IMPLICIT_DEF     : X86Inst<"IMPLICIT_DEF",     0, Pseudo, NoArg>;
+def ADJCALLSTACKDOWN : I<"ADJCALLSTACKDOWN", 0, Pseudo>;
+def ADJCALLSTACKUP   : I<"ADJCALLSTACKUP",   0, Pseudo>;
+def IMPLICIT_USE     : I<"IMPLICIT_USE",     0, Pseudo>;
+def IMPLICIT_DEF     : I<"IMPLICIT_DEF",     0, Pseudo>;
 let isTerminator = 1 in
   let Defs = [FP0, FP1, FP2, FP3, FP4, FP5, FP6] in
-    def FP_REG_KILL    : X86Inst<"FP_REG_KILL",      0, Pseudo, NoArg>;
+    def FP_REG_KILL    : I<"FP_REG_KILL",      0, Pseudo>;
 //===----------------------------------------------------------------------===//
 //  Control Flow Instructions...
 //
 
 // Return instruction...
 let isTerminator = 1, isReturn = 1 in
-  def RET : X86Inst<"ret", 0xC3, RawFrm, NoArg>, Pattern<(retvoid)>;
+  def RET : I<"ret", 0xC3, RawFrm>, Pattern<(retvoid)>;
 
 // All branches are RawFrm, Void, Branch, and Terminators
 let isBranch = 1, isTerminator = 1 in
-  class IBr<string name, bits<8> opcode> : X86Inst<name, opcode, RawFrm, NoArg>;
+  class IBr<string name, bits<8> opcode> : I<name, opcode, RawFrm>;
 
 def JMP : IBr<"jmp", 0xE9>, Pattern<(br basicblock)>;
 def JB  : IBr<"jb" , 0x82>, TB;
@@ -152,426 +188,420 @@
 let isCall = 1 in
   // All calls clobber the non-callee saved registers...
   let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6] in {
-    def CALLpcrel32 : X86Inst<"call", 0xE8, RawFrm, NoArg>;
-    def CALLr32     : X86Inst<"call", 0xFF, MRM2r , Arg32>;
-    def CALLm32     : X86Inst<"call", 0xFF, MRM2m , Arg32>;
+    def CALLpcrel32 : I <"call", 0xE8, RawFrm>;
+    def CALLr32     : I <"call", 0xFF, MRM2r>;
+    def CALLm32     : IM32<"call", 0xFF, MRM2m>;
   }
 
        
 //===----------------------------------------------------------------------===//
 //  Miscellaneous Instructions...
 //
-def LEAVE    : X86Inst<"leave", 0xC9, RawFrm, NoArg>, Imp<[EBP,ESP],[EBP,ESP]>;
-def POPr32   : X86Inst<"pop",   0x58, AddRegFrm, Arg32>, Imp<[ESP],[ESP]>;
+def LEAVE    : I<"leave", 0xC9, RawFrm>, Imp<[EBP,ESP],[EBP,ESP]>;
+def POPr32   : I<"pop",   0x58, AddRegFrm>, Imp<[ESP],[ESP]>;
 
-let isTwoAddress = 1 in                                      // R32 = bswap R32
-  def BSWAPr32 : X86Inst<"bswap", 0xC8, AddRegFrm, Arg32>, TB;
+let isTwoAddress = 1 in                                    // R32 = bswap R32
+  def BSWAPr32 : I<"bswap", 0xC8, AddRegFrm>, TB;
 
-def XCHGrr8  : X86Inst<"xchg", 0x86, MRMDestReg, Arg8>;         // xchg R8, R8
-def XCHGrr16 : X86Inst<"xchg", 0x87, MRMDestReg, Arg16>, OpSize;// xchg R16, R16
-def XCHGrr32 : X86Inst<"xchg", 0x87, MRMDestReg, Arg32>;        // xchg R32, R32
-def XCHGmr8  : X86Inst<"xchg", 0x86, MRMDestMem, Arg8>;         // xchg [mem8], R8
-def XCHGmr16 : X86Inst<"xchg", 0x87, MRMDestMem, Arg16>, OpSize;// xchg [mem16], R16
-def XCHGmr32 : X86Inst<"xchg", 0x87, MRMDestMem, Arg32>;        // xchg [mem32], R32
-def XCHGrm8  : X86Inst<"xchg", 0x86, MRMSrcMem , Arg8>;         // xchg R8, [mem8]
-def XCHGrm16 : X86Inst<"xchg", 0x87, MRMSrcMem , Arg16>, OpSize;// xchg R16, [mem16]
-def XCHGrm32 : X86Inst<"xchg", 0x87, MRMSrcMem , Arg32>;        // xchg R32, [mem32]
+def XCHGrr8  : I <"xchg", 0x86, MRMDestReg>;               // xchg R8, R8
+def XCHGrr16 : I <"xchg", 0x87, MRMDestReg>, OpSize;       // xchg R16, R16
+def XCHGrr32 : I <"xchg", 0x87, MRMDestReg>;               // xchg R32, R32
+def XCHGmr8  : IM8 <"xchg", 0x86, MRMDestMem>;             // xchg [mem8], R8
+def XCHGmr16 : IM16<"xchg", 0x87, MRMDestMem>, OpSize;     // xchg [mem16], R16
+def XCHGmr32 : IM32<"xchg", 0x87, MRMDestMem>;             // xchg [mem32], R32
+def XCHGrm8  : IM8 <"xchg", 0x86, MRMSrcMem >;             // xchg R8, [mem8]
+def XCHGrm16 : IM16<"xchg", 0x87, MRMSrcMem >, OpSize;     // xchg R16, [mem16]
+def XCHGrm32 : IM32<"xchg", 0x87, MRMSrcMem >;             // xchg R32, [mem32]
 
-def LEAr16 : X86Inst<"lea", 0x8D, MRMSrcMem, Arg16>, OpSize; // R16 = lea [mem]
-def LEAr32 : X86Inst<"lea", 0x8D, MRMSrcMem, Arg32>;         // R32 = lea [mem]
+def LEAr16   : IM32<"lea", 0x8D, MRMSrcMem>, OpSize;          // R16 = lea [mem]
+def LEAr32   : IM32<"lea", 0x8D, MRMSrcMem>;                  // R32 = lea [mem]
 
 
-def REP_MOVSB : X86Inst<"rep movsb", 0xA4, RawFrm, NoArg>, REP,
+def REP_MOVSB : I<"rep movsb", 0xA4, RawFrm>, REP,
                 Imp<[ECX,EDI,ESI], [ECX,EDI,ESI]>;
-def REP_MOVSW : X86Inst<"rep movsw", 0xA5, RawFrm, NoArg>, REP, OpSize,
+def REP_MOVSW : I<"rep movsw", 0xA5, RawFrm>, REP, OpSize,
                 Imp<[ECX,EDI,ESI], [ECX,EDI,ESI]>;
-def REP_MOVSD : X86Inst<"rep movsd", 0xA5, RawFrm, NoArg>, REP,
+def REP_MOVSD : I<"rep movsd", 0xA5, RawFrm>, REP,
                 Imp<[ECX,EDI,ESI], [ECX,EDI,ESI]>;
 
-def REP_STOSB : X86Inst<"rep stosb", 0xAA, RawFrm, NoArg>, REP,
+def REP_STOSB : I<"rep stosb", 0xAA, RawFrm>, REP,
                 Imp<[AL,ECX,EDI], [ECX,EDI]>;
-def REP_STOSW : X86Inst<"rep stosw", 0xAB, RawFrm, NoArg>, REP, OpSize,
+def REP_STOSW : I<"rep stosw", 0xAB, RawFrm>, REP, OpSize,
                 Imp<[AX,ECX,EDI], [ECX,EDI]>;
-def REP_STOSD : X86Inst<"rep stosd", 0xAB, RawFrm, NoArg>, REP,
+def REP_STOSD : I<"rep stosd", 0xAB, RawFrm>, REP,
                 Imp<[EAX,ECX,EDI], [ECX,EDI]>;
 
 //===----------------------------------------------------------------------===//
 //  Move Instructions...
 //
-def MOVrr8  : X86Inst<"mov", 0x88, MRMDestReg, Arg8>,          Pattern<(set R8 , R8 )>;
-def MOVrr16 : X86Inst<"mov", 0x89, MRMDestReg, Arg16>, OpSize, Pattern<(set R16, R16)>;
-def MOVrr32 : X86Inst<"mov", 0x89, MRMDestReg, Arg32>,         Pattern<(set R32, R32)>;
-def MOVri8  : X86Inst<"mov", 0xB0, AddRegFrm , Arg8>,          Pattern<(set R8 , imm )>;
-def MOVri16 : X86Inst<"mov", 0xB8, AddRegFrm , Arg16>, OpSize, Pattern<(set R16, imm)>;
-def MOVri32 : X86Inst<"mov", 0xB8, AddRegFrm , Arg32>,         Pattern<(set R32, imm)>;
-def MOVmi8  : X86Inst<"mov", 0xC6, MRM0m     , Arg8>;             // [mem8] = imm8
-def MOVmi16 : X86Inst<"mov", 0xC7, MRM0m     , Arg16>, OpSize;    // [mem16] = imm16
-def MOVmi32 : X86Inst<"mov", 0xC7, MRM0m     , Arg32>;            // [mem32] = imm32
+def MOVrr8  : I    <"mov", 0x88, MRMDestReg>,         Pattern<(set R8 , R8 )>;
+def MOVrr16 : I    <"mov", 0x89, MRMDestReg>, OpSize, Pattern<(set R16, R16)>;
+def MOVrr32 : I    <"mov", 0x89, MRMDestReg>,         Pattern<(set R32, R32)>;
+def MOVri8  : II8  <"mov", 0xB0, AddRegFrm >,         Pattern<(set R8 , imm )>;
+def MOVri16 : II16 <"mov", 0xB8, AddRegFrm >, OpSize, Pattern<(set R16, imm)>;
+def MOVri32 : II32 <"mov", 0xB8, AddRegFrm >,         Pattern<(set R32, imm)>;
+def MOVmi8  : I8MI <"mov", 0xC6, MRM0m     >;         // [mem8] = imm8
+def MOVmi16 : I16MI<"mov", 0xC7, MRM0m     >, OpSize; // [mem16] = imm16
+def MOVmi32 : I32MI<"mov", 0xC7, MRM0m     >;         // [mem32] = imm32
 
-def MOVrm8  : X86Inst<"mov", 0x8A, MRMSrcMem , Arg8>;             // R8  = [mem8]
-def MOVrm16 : X86Inst<"mov", 0x8B, MRMSrcMem , Arg16>, OpSize,    // R16 = [mem16]
+def MOVrm8  : IM8  <"mov", 0x8A, MRMSrcMem>;          // R8  = [mem8]
+def MOVrm16 : IM16 <"mov", 0x8B, MRMSrcMem>, OpSize,  // R16 = [mem16]
               Pattern<(set R16, (load (plus R32, (plus (times imm, R32), imm))))>;
-def MOVrm32 : X86Inst<"mov", 0x8B, MRMSrcMem , Arg32>,            // R32 = [mem32]
+def MOVrm32 : IM32 <"mov", 0x8B, MRMSrcMem>,          // R32 = [mem32]
               Pattern<(set R32, (load (plus R32, (plus (times imm, R32), imm))))>;
 
-def MOVmr8  : X86Inst<"mov", 0x88, MRMDestMem, Arg8>;             // [mem8] = R8
-def MOVmr16 : X86Inst<"mov", 0x89, MRMDestMem, Arg16>, OpSize;    // [mem16] = R16
-def MOVmr32 : X86Inst<"mov", 0x89, MRMDestMem, Arg32>;            // [mem32] = R32
+def MOVmr8  : IM8  <"mov", 0x88, MRMDestMem>;         // [mem8] = R8
+def MOVmr16 : IM16 <"mov", 0x89, MRMDestMem>, OpSize; // [mem16] = R16
+def MOVmr32 : IM32 <"mov", 0x89, MRMDestMem>;         // [mem32] = R32
 
 //===----------------------------------------------------------------------===//
 //  Fixed-Register Multiplication and Division Instructions...
 //
 
 // Extra precision multiplication
-def MULr8  : X86Inst<"mul", 0xF6, MRM4r , Arg8 >, Imp<[AL],[AX]>;               // AL,AH = AL*R8
-def MULr16 : X86Inst<"mul", 0xF7, MRM4r , Arg16>, Imp<[AX],[AX,DX]>, OpSize;    // AX,DX = AX*R16
-def MULr32 : X86Inst<"mul", 0xF7, MRM4r , Arg32>, Imp<[EAX],[EAX,EDX]>;         // EAX,EDX = EAX*R32
-def MULm8  : X86Inst<"mul", 0xF6, MRM4m , Arg8 >, Imp<[AL],[AX]>;               // AL,AH = AL*[mem8]
-def MULm16 : X86Inst<"mul", 0xF7, MRM4m , Arg16>, Imp<[AX],[AX,DX]>, OpSize;    // AX,DX = AX*[mem16]
-def MULm32 : X86Inst<"mul", 0xF7, MRM4m , Arg32>, Imp<[EAX],[EAX,EDX]>;         // EAX,EDX = EAX*[mem32]
+def MULr8  : I   <"mul", 0xF6, MRM4r>, Imp<[AL],[AX]>;               // AL,AH = AL*R8
+def MULr16 : I   <"mul", 0xF7, MRM4r>, Imp<[AX],[AX,DX]>, OpSize;    // AX,DX = AX*R16
+def MULr32 : I   <"mul", 0xF7, MRM4r>, Imp<[EAX],[EAX,EDX]>;         // EAX,EDX = EAX*R32
+def MULm8  : IM8 <"mul", 0xF6, MRM4m>, Imp<[AL],[AX]>;               // AL,AH = AL*[mem8]
+def MULm16 : IM16<"mul", 0xF7, MRM4m>, Imp<[AX],[AX,DX]>, OpSize;    // AX,DX = AX*[mem16]
+def MULm32 : IM32<"mul", 0xF7, MRM4m>, Imp<[EAX],[EAX,EDX]>;         // EAX,EDX = EAX*[mem32]
 
 // unsigned division/remainder
-def DIVr8  : X86Inst<"div", 0xF6, MRM6r , Arg8 >, Imp<[AX],[AX]>;               // AX/r8 = AL,AH
-def DIVr16 : X86Inst<"div", 0xF7, MRM6r , Arg16>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/r16 = AX,DX
-def DIVr32 : X86Inst<"div", 0xF7, MRM6r , Arg32>, Imp<[EAX,EDX],[EAX,EDX]>;     // EDX:EAX/r32 = EAX,EDX
-def DIVm8  : X86Inst<"div", 0xF6, MRM6m , Arg8 >, Imp<[AX],[AX]>;               // AX/[mem8] = AL,AH
-def DIVm16 : X86Inst<"div", 0xF7, MRM6m , Arg16>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/[mem16] = AX,DX
-def DIVm32 : X86Inst<"div", 0xF7, MRM6m , Arg32>, Imp<[EAX,EDX],[EAX,EDX]>;     // EDX:EAX/[mem32] = EAX,EDX
+def DIVr8  : I   <"div", 0xF6, MRM6r>, Imp<[AX],[AX]>;               // AX/r8 = AL,AH
+def DIVr16 : I   <"div", 0xF7, MRM6r>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/r16 = AX,DX
+def DIVr32 : I   <"div", 0xF7, MRM6r>, Imp<[EAX,EDX],[EAX,EDX]>;     // EDX:EAX/r32 = EAX,EDX
+def DIVm8  : IM8 <"div", 0xF6, MRM6m>, Imp<[AX],[AX]>;               // AX/[mem8] = AL,AH
+def DIVm16 : IM16<"div", 0xF7, MRM6m>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/[mem16] = AX,DX
+def DIVm32 : IM32<"div", 0xF7, MRM6m>, Imp<[EAX,EDX],[EAX,EDX]>;     // EDX:EAX/[mem32] = EAX,EDX
 
 // signed division/remainder
-def IDIVr8 : X86Inst<"idiv",0xF6, MRM7r , Arg8 >, Imp<[AX],[AX]>;               // AX/r8 = AL,AH
-def IDIVr16: X86Inst<"idiv",0xF7, MRM7r , Arg16>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/r16 = AX,DX
-def IDIVr32: X86Inst<"idiv",0xF7, MRM7r , Arg32>, Imp<[EAX,EDX],[EAX,EDX]>;     // EDX:EAX/r32 = EAX,EDX
-def IDIVm8 : X86Inst<"idiv",0xF6, MRM7m , Arg8 >, Imp<[AX],[AX]>;               // AX/[mem8] = AL,AH
-def IDIVm16: X86Inst<"idiv",0xF7, MRM7m , Arg16>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/[mem16] = AX,DX
-def IDIVm32: X86Inst<"idiv",0xF7, MRM7m , Arg32>, Imp<[EAX,EDX],[EAX,EDX]>;     // EDX:EAX/[mem32] = EAX,EDX
+def IDIVr8 : I   <"idiv",0xF6, MRM7r>, Imp<[AX],[AX]>;               // AX/r8 = AL,AH
+def IDIVr16: I   <"idiv",0xF7, MRM7r>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/r16 = AX,DX
+def IDIVr32: I   <"idiv",0xF7, MRM7r>, Imp<[EAX,EDX],[EAX,EDX]>;     // EDX:EAX/r32 = EAX,EDX
+def IDIVm8 : IM8 <"idiv",0xF6, MRM7m>, Imp<[AX],[AX]>;               // AX/[mem8] = AL,AH
+def IDIVm16: IM16<"idiv",0xF7, MRM7m>, Imp<[AX,DX],[AX,DX]>, OpSize; // DX:AX/[mem16] = AX,DX
+def IDIVm32: IM32<"idiv",0xF7, MRM7m>, Imp<[EAX,EDX],[EAX,EDX]>;     // EDX:EAX/[mem32] = EAX,EDX
 
 // Sign-extenders for division
-def CBW    : X86Inst<"cbw", 0x98, RawFrm, Arg8 >, Imp<[AL],[AH]>;               // AX = signext(AL)
-def CWD    : X86Inst<"cwd", 0x99, RawFrm, Arg8 >, Imp<[AX],[DX]>;               // DX:AX = signext(AX)
-def CDQ    : X86Inst<"cdq", 0x99, RawFrm, Arg8 >, Imp<[EAX],[EDX]>;             // EDX:EAX = signext(EAX)
+def CBW    : I<"cbw", 0x98, RawFrm >, Imp<[AL],[AH]>;                // AX = signext(AL)
+def CWD    : I<"cwd", 0x99, RawFrm >, Imp<[AX],[DX]>;                // DX:AX = signext(AX)
+def CDQ    : I<"cdq", 0x99, RawFrm >, Imp<[EAX],[EDX]>;              // EDX:EAX = signext(EAX)
 
 //===----------------------------------------------------------------------===//
 //  Two address Instructions...
 //
-let isTwoAddress = 1 in {  // Define some helper classes to make defs shorter.
-  class I2A8 <string n, bits<8> o, Format F> : X86Inst<n, o, F, Arg8>;
-  class I2A16<string n, bits<8> o, Format F> : X86Inst<n, o, F, Arg16>;
-  class I2A32<string n, bits<8> o, Format F> : X86Inst<n, o, F, Arg32>;
-}
+let isTwoAddress = 1 in {
+
+// Conditional moves.  These are modelled as X = cmovXX Y, Z.  Eventually
+// register allocated to cmovXX XY, Z
+def CMOVErr16 : I<"cmove", 0x44, MRMSrcReg>, TB, OpSize;        // if ==, R16 = R16
+def CMOVNErr32: I<"cmovne",0x45, MRMSrcReg>, TB;                // if !=, R32 = R32
+def CMOVSrr32 : I<"cmovs", 0x48, MRMSrcReg>, TB;                // if signed, R32 = R32
 
 // unary instructions
-def NEGr8  : I2A8 <"neg", 0xF6, MRM3r >;         // R8  = -R8  = 0-R8
-def NEGr16 : I2A16<"neg", 0xF7, MRM3r >, OpSize; // R16 = -R16 = 0-R16
-def NEGr32 : I2A32<"neg", 0xF7, MRM3r >;         // R32 = -R32 = 0-R32
-def NEGm8  : I2A8 <"neg", 0xF6, MRM3m >;         // [mem8]  = -[mem8]  = 0-[mem8]
-def NEGm16 : I2A16<"neg", 0xF7, MRM3m >, OpSize; // [mem16] = -[mem16] = 0-[mem16]
-def NEGm32 : I2A32<"neg", 0xF7, MRM3m >;         // [mem32] = -[mem32] = 0-[mem32]
-
-def NOTr8  : I2A8 <"not", 0xF6, MRM2r >;         // R8  = ~R8  = R8^-1
-def NOTr16 : I2A16<"not", 0xF7, MRM2r >, OpSize; // R16 = ~R16 = R16^-1
-def NOTr32 : I2A32<"not", 0xF7, MRM2r >;         // R32 = ~R32 = R32^-1
-def NOTm8  : I2A8 <"not", 0xF6, MRM2m >;         // [mem8]  = ~[mem8]  = [mem8^-1]
-def NOTm16 : I2A16<"not", 0xF7, MRM2m >, OpSize; // [mem16] = ~[mem16] = [mem16^-1]
-def NOTm32 : I2A32<"not", 0xF7, MRM2m >;         // [mem32] = ~[mem32] = [mem32^-1]
-
-def INCr8  : I2A8 <"inc", 0xFE, MRM0r >;         // ++R8
-def INCr16 : I2A16<"inc", 0xFF, MRM0r >, OpSize; // ++R16
-def INCr32 : I2A32<"inc", 0xFF, MRM0r >;         // ++R32
-def INCm8  : I2A8 <"inc", 0xFE, MRM0m >;         // ++R8
-def INCm16 : I2A16<"inc", 0xFF, MRM0m >, OpSize; // ++R16
-def INCm32 : I2A32<"inc", 0xFF, MRM0m >;         // ++R32
-
-def DECr8  : I2A8 <"dec", 0xFE, MRM1r >;         // --R8
-def DECr16 : I2A16<"dec", 0xFF, MRM1r >, OpSize; // --R16
-def DECr32 : I2A32<"dec", 0xFF, MRM1r >;         // --R32
-def DECm8  : I2A8 <"dec", 0xFE, MRM1m >;         // --[mem8]
-def DECm16 : I2A16<"dec", 0xFF, MRM1m >, OpSize; // --[mem16]
-def DECm32 : I2A32<"dec", 0xFF, MRM1m >;         // --[mem32]
+def NEGr8  : I   <"neg", 0xF6, MRM3r>;         // R8  = -R8  = 0-R8
+def NEGr16 : I   <"neg", 0xF7, MRM3r>, OpSize; // R16 = -R16 = 0-R16
+def NEGr32 : I   <"neg", 0xF7, MRM3r>;         // R32 = -R32 = 0-R32
+def NEGm8  : IM8 <"neg", 0xF6, MRM3m>;         // [mem8]  = -[mem8]  = 0-[mem8]
+def NEGm16 : IM16<"neg", 0xF7, MRM3m>, OpSize; // [mem16] = -[mem16] = 0-[mem16]
+def NEGm32 : IM32<"neg", 0xF7, MRM3m>;         // [mem32] = -[mem32] = 0-[mem32]
+
+def NOTr8  : I   <"not", 0xF6, MRM2r>;         // R8  = ~R8  = R8^-1
+def NOTr16 : I   <"not", 0xF7, MRM2r>, OpSize; // R16 = ~R16 = R16^-1
+def NOTr32 : I   <"not", 0xF7, MRM2r>;         // R32 = ~R32 = R32^-1
+def NOTm8  : IM8 <"not", 0xF6, MRM2m>;         // [mem8]  = ~[mem8]  = [mem8^-1]
+def NOTm16 : IM16<"not", 0xF7, MRM2m>, OpSize; // [mem16] = ~[mem16] = [mem16^-1]
+def NOTm32 : IM32<"not", 0xF7, MRM2m>;         // [mem32] = ~[mem32] = [mem32^-1]
+
+def INCr8  : I   <"inc", 0xFE, MRM0r>;         // ++R8
+def INCr16 : I   <"inc", 0xFF, MRM0r>, OpSize; // ++R16
+def INCr32 : I   <"inc", 0xFF, MRM0r>;         // ++R32
+def INCm8  : IM8 <"inc", 0xFE, MRM0m>;         // ++R8
+def INCm16 : IM16<"inc", 0xFF, MRM0m>, OpSize; // ++R16
+def INCm32 : IM32<"inc", 0xFF, MRM0m>;         // ++R32
+
+def DECr8  : I   <"dec", 0xFE, MRM1r>;         // --R8
+def DECr16 : I   <"dec", 0xFF, MRM1r>, OpSize; // --R16
+def DECr32 : I   <"dec", 0xFF, MRM1r>;         // --R32
+def DECm8  : IM8 <"dec", 0xFE, MRM1m>;         // --[mem8]
+def DECm16 : IM16<"dec", 0xFF, MRM1m>, OpSize; // --[mem16]
+def DECm32 : IM32<"dec", 0xFF, MRM1m>;         // --[mem32]
 
+// Logical operators...
+def ANDrr8   : I     <"and", 0x20, MRMDestReg>,         Pattern<(set R8 , (and R8 , R8 ))>;
+def ANDrr16  : I     <"and", 0x21, MRMDestReg>, OpSize, Pattern<(set R16, (and R16, R16))>;
+def ANDrr32  : I     <"and", 0x21, MRMDestReg>,         Pattern<(set R32, (and R32, R32))>;
+def ANDmr8   : IM8   <"and", 0x20, MRMDestMem>;            // [mem8]  &= R8
+def ANDmr16  : IM16  <"and", 0x21, MRMDestMem>, OpSize;    // [mem16] &= R16
+def ANDmr32  : IM32  <"and", 0x21, MRMDestMem>;            // [mem32] &= R32
+def ANDrm8   : IM8   <"and", 0x22, MRMSrcMem >;            // R8  &= [mem8]
+def ANDrm16  : IM16  <"and", 0x23, MRMSrcMem >, OpSize;    // R16 &= [mem16]
+def ANDrm32  : IM32  <"and", 0x23, MRMSrcMem >;            // R32 &= [mem32]
+
+def ANDri8   : II8   <"and", 0x80, MRM4r     >,         Pattern<(set R8 , (and R8 , imm))>;
+def ANDri16  : II16  <"and", 0x81, MRM4r     >, OpSize, Pattern<(set R16, (and R16, imm))>;
+def ANDri32  : II32  <"and", 0x81, MRM4r     >,         Pattern<(set R32, (and R32, imm))>;
+def ANDmi8   : I8MI  <"and", 0x80, MRM4m     >;            // [mem8]  &= imm8
+def ANDmi16  : I16MI <"and", 0x81, MRM4m     >, OpSize;    // [mem16] &= imm16
+def ANDmi32  : I32MI <"and", 0x81, MRM4m     >;            // [mem32] &= imm32
+
+def ANDri16b : II8   <"and", 0x83, MRM4r     >, OpSize;    // R16 &= imm8
+def ANDri32b : II8   <"and", 0x83, MRM4r     >;            // R32 &= imm8
+def ANDmi16b : IM16I8<"and", 0x83, MRM4m     >, OpSize;    // [mem16] &= imm8
+def ANDmi32b : IM32I8<"and", 0x83, MRM4m     >;            // [mem32] &= imm8
+
+
+def ORrr8    : I     <"or" , 0x08, MRMDestReg>,         Pattern<(set R8 , (or  R8 , R8 ))>;
+def ORrr16   : I     <"or" , 0x09, MRMDestReg>, OpSize, Pattern<(set R16, (or  R16, R16))>;
+def ORrr32   : I     <"or" , 0x09, MRMDestReg>,         Pattern<(set R32, (or  R32, R32))>;
+def ORmr8    : IM8   <"or" , 0x08, MRMDestMem>;            // [mem8]  |= R8
+def ORmr16   : IM16  <"or" , 0x09, MRMDestMem>, OpSize;    // [mem16] |= R16
+def ORmr32   : IM32  <"or" , 0x09, MRMDestMem>;            // [mem32] |= R32
+def ORrm8    : IM8   <"or" , 0x0A, MRMSrcMem >;            // R8  |= [mem8]
+def ORrm16   : IM16  <"or" , 0x0B, MRMSrcMem >, OpSize;    // R16 |= [mem16]
+def ORrm32   : IM32  <"or" , 0x0B, MRMSrcMem >;            // R32 |= [mem32]
+
+def ORri8    : II8   <"or" , 0x80, MRM1r     >,         Pattern<(set R8 , (or  R8 , imm))>;
+def ORri16   : II16  <"or" , 0x81, MRM1r     >, OpSize, Pattern<(set R16, (or  R16, imm))>;
+def ORri32   : II32  <"or" , 0x81, MRM1r     >,         Pattern<(set R32, (or  R32, imm))>;
+def ORmi8    : I8MI  <"or" , 0x80, MRM1m     >;            // [mem8]  |= imm8
+def ORmi16   : I16MI <"or" , 0x81, MRM1m     >, OpSize;    // [mem16] |= imm16
+def ORmi32   : I32MI <"or" , 0x81, MRM1m     >;            // [mem32] |= imm32
+
+def ORri16b  : II8   <"or" , 0x83, MRM1r     >, OpSize;    // R16 |= imm8
+def ORri32b  : II8   <"or" , 0x83, MRM1r     >;            // R32 |= imm8
+def ORmi16b  : IM16I8<"or" , 0x83, MRM1m     >, OpSize;    // [mem16] |= imm8
+def ORmi32b  : IM32I8<"or" , 0x83, MRM1m     >;            // [mem32] |= imm8
+
+
+def XORrr8   : I     <"xor", 0x30, MRMDestReg>,         Pattern<(set R8 , (xor R8 , R8 ))>;
+def XORrr16  : I     <"xor", 0x31, MRMDestReg>, OpSize, Pattern<(set R16, (xor R16, R16))>;
+def XORrr32  : I     <"xor", 0x31, MRMDestReg>,         Pattern<(set R32, (xor R32, R32))>;
+def XORmr8   : IM8   <"xor", 0x30, MRMDestMem>;            // [mem8]  ^= R8
+def XORmr16  : IM16  <"xor", 0x31, MRMDestMem>, OpSize;    // [mem16] ^= R16
+def XORmr32  : IM32  <"xor", 0x31, MRMDestMem>;            // [mem32] ^= R32
+def XORrm8   : IM8   <"xor", 0x32, MRMSrcMem >;            // R8  ^= [mem8]
+def XORrm16  : IM16  <"xor", 0x33, MRMSrcMem >, OpSize;    // R16 ^= [mem16]
+def XORrm32  : IM32  <"xor", 0x33, MRMSrcMem >;            // R32 ^= [mem32]
+
+def XORri8   : II8   <"xor", 0x80, MRM6r     >,         Pattern<(set R8 , (xor R8 , imm))>;
+def XORri16  : II16  <"xor", 0x81, MRM6r     >, OpSize, Pattern<(set R16, (xor R16, imm))>;
+def XORri32  : II32  <"xor", 0x81, MRM6r     >,         Pattern<(set R32, (xor R32, imm))>;
+def XORmi8   : I8MI  <"xor", 0x80, MRM6m     >;            // [mem8] ^= R8
+def XORmi16  : I16MI <"xor", 0x81, MRM6m     >, OpSize;    // [mem16] ^= R16
+def XORmi32  : I32MI <"xor", 0x81, MRM6m     >;            // [mem32] ^= R32
+
+def XORri16b : II8   <"xor", 0x83, MRM6r     >, OpSize;    // R16 ^= imm8
+def XORri32b : II8   <"xor", 0x83, MRM6r     >;            // R32 ^= imm8
+def XORmi16b : IM16I8<"xor", 0x83, MRM6m     >, OpSize;    // [mem16] ^= imm8
+def XORmi32b : IM32I8<"xor", 0x83, MRM6m     >;            // [mem32] ^= imm8
+
+// Shift instructions
+def SHLrCL8  : I     <"shl", 0xD2, MRM4r     >        , UsesCL; // R8  <<= cl
+def SHLrCL16 : I     <"shl", 0xD3, MRM4r     >, OpSize, UsesCL; // R16 <<= cl
+def SHLrCL32 : I     <"shl", 0xD3, MRM4r     >        , UsesCL; // R32 <<= cl
+def SHLmCL8  : IM8   <"shl", 0xD2, MRM4m     >        , UsesCL; // [mem8]  <<= cl
+def SHLmCL16 : IM16  <"shl", 0xD3, MRM4m     >, OpSize, UsesCL; // [mem16] <<= cl
+def SHLmCL32 : IM32  <"shl", 0xD3, MRM4m     >        , UsesCL; // [mem32] <<= cl
+
+def SHLri8   : II8   <"shl", 0xC0, MRM4r     >;                 // R8  <<= imm8
+def SHLri16  : II8   <"shl", 0xC1, MRM4r     >, OpSize;         // R16 <<= imm8
+def SHLri32  : II8   <"shl", 0xC1, MRM4r     >;                 // R32 <<= imm8
+def SHLmi8   : I8MI  <"shl", 0xC0, MRM4m     >;                 // [mem8]  <<= imm8
+def SHLmi16  : IM16I8<"shl", 0xC1, MRM4m     >, OpSize;         // [mem16] <<= imm8
+def SHLmi32  : IM32I8<"shl", 0xC1, MRM4m     >;                 // [mem32] <<= imm8
+
+def SHRrCL8  : I     <"shr", 0xD2, MRM5r     >        , UsesCL; // R8  >>= cl
+def SHRrCL16 : I     <"shr", 0xD3, MRM5r     >, OpSize, UsesCL; // R16 >>= cl
+def SHRrCL32 : I     <"shr", 0xD3, MRM5r     >        , UsesCL; // R32 >>= cl
+def SHRmCL8  : IM8   <"shr", 0xD2, MRM5m     >        , UsesCL; // [mem8]  >>= cl
+def SHRmCL16 : IM16  <"shr", 0xD3, MRM5m     >, OpSize, UsesCL; // [mem16] >>= cl
+def SHRmCL32 : IM32  <"shr", 0xD3, MRM5m     >        , UsesCL; // [mem32] >>= cl
+
+def SHRri8   : II8   <"shr", 0xC0, MRM5r     >;                 // R8  >>= imm8
+def SHRri16  : II8   <"shr", 0xC1, MRM5r     >, OpSize;         // R16 >>= imm8
+def SHRri32  : II8   <"shr", 0xC1, MRM5r     >;                 // R32 >>= imm8
+def SHRmi8   : I8MI  <"shr", 0xC0, MRM5m     >;                 // [mem8]  >>= imm8
+def SHRmi16  : IM16I8<"shr", 0xC1, MRM5m     >, OpSize;         // [mem16] >>= imm8
+def SHRmi32  : IM32I8<"shr", 0xC1, MRM5m     >;                 // [mem32] >>= imm8
+
+def SARrCL8  : I     <"sar", 0xD2, MRM7r     >        , UsesCL; // R8  >>>= cl
+def SARrCL16 : I     <"sar", 0xD3, MRM7r     >, OpSize, UsesCL; // R16 >>>= cl
+def SARrCL32 : I     <"sar", 0xD3, MRM7r     >        , UsesCL; // R32 >>>= cl
+def SARmCL8  : IM8   <"sar", 0xD2, MRM7m     >        , UsesCL; // [mem8]  >>>= cl
+def SARmCL16 : IM16  <"sar", 0xD3, MRM7m     >, OpSize, UsesCL; // [mem16] >>>= cl
+def SARmCL32 : IM32  <"sar", 0xD3, MRM7m     >        , UsesCL; // [mem32] >>>= cl
+
+def SARri8   : II8   <"sar", 0xC0, MRM7r     >;                 // R8  >>>= imm8
+def SARri16  : II8   <"sar", 0xC1, MRM7r     >, OpSize;         // R16 >>>= imm8
+def SARri32  : II8   <"sar", 0xC1, MRM7r     >;                 // R32 >>>= imm8
+def SARmi8   : I8MI  <"sar", 0xC0, MRM7m     >;                 // [mem8]  >>>= imm8
+def SARmi16  : IM16I8<"sar", 0xC1, MRM7m     >, OpSize;         // [mem16] >>>= imm8
+def SARmi32  : IM32I8<"sar", 0xC1, MRM7m     >;                 // [mem32] >>>= imm8
+
+def SHLDrrCL32 : I   <"shld", 0xA5, MRMDestReg>, TB, UsesCL;    // R32 <<= R32,R32 cl
+def SHLDmrCL32 : I   <"shld", 0xA5, MRMDestMem>, TB, UsesCL;    // [mem32] <<= [mem32],R32 cl
+def SHLDrri32  : II8 <"shld", 0xA4, MRMDestReg>, TB;            // R32 <<= R32,R32 imm8
+def SHLDmri32  : II8 <"shld", 0xA4, MRMDestMem>, TB;            // [mem32] <<= [mem32],R32 imm8
+
+def SHRDrrCL32 : I   <"shrd", 0xAD, MRMDestReg>, TB, UsesCL;    // R32 >>= R32,R32 cl
+def SHRDmrCL32 : I   <"shrd", 0xAD, MRMDestMem>, TB, UsesCL;    // [mem32] >>= [mem32],R32 cl
+def SHRDrri32  : II8 <"shrd", 0xAC, MRMDestReg>, TB;            // R32 >>= R32,R32 imm8
+def SHRDmri32  : II8 <"shrd", 0xAC, MRMDestMem>, TB;            // [mem32] >>= [mem32],R32 imm8
 
 
 // Arithmetic...
-def ADDrr8   : I2A8 <"add", 0x00, MRMDestReg>,         Pattern<(set R8 , (plus R8 , R8 ))>;
-def ADDrr16  : I2A16<"add", 0x01, MRMDestReg>, OpSize, Pattern<(set R16, (plus R16, R16))>;
-def ADDrr32  : I2A32<"add", 0x01, MRMDestReg>,         Pattern<(set R32, (plus R32, R32))>;
-def ADDmr8   : I2A8 <"add", 0x00, MRMDestMem>;         // [mem8]  += R8
-def ADDmr16  : I2A16<"add", 0x01, MRMDestMem>, OpSize; // [mem16] += R16
-def ADDmr32  : I2A32<"add", 0x01, MRMDestMem>;         // [mem32] += R32
-def ADDrm8   : I2A8 <"add", 0x02, MRMSrcMem >;         // R8  += [mem8]
-def ADDrm16  : I2A16<"add", 0x03, MRMSrcMem >, OpSize; // R16 += [mem16]
-def ADDrm32  : I2A32<"add", 0x03, MRMSrcMem >;         // R32 += [mem32]
-
-def ADDri8   : I2A8 <"add", 0x80, MRM0r     >,         Pattern<(set R8 , (plus R8 , imm))>;
-def ADDri16  : I2A16<"add", 0x81, MRM0r     >, OpSize, Pattern<(set R16, (plus R16, imm))>;
-def ADDri32  : I2A32<"add", 0x81, MRM0r     >,         Pattern<(set R32, (plus R32, imm))>;
-def ADDmi8   : I2A8 <"add", 0x80, MRM0m     >;         // [mem8] += I8
-def ADDmi16  : I2A16<"add", 0x81, MRM0m     >, OpSize; // [mem16] += I16
-def ADDmi32  : I2A32<"add", 0x81, MRM0m     >;         // [mem32] += I32
-
-def ADDri16b : I2A8 <"add", 0x83, MRM0r     >, OpSize;   // ADDri with sign extended 8 bit imm
-def ADDri32b : I2A8 <"add", 0x83, MRM0r     >;
-// def ADDmi16b : I2A8 <"add", 0x83, MRM0m     >, OpSize; // [mem16] += I8
-// def ADDmi32b : I2A8 <"add", 0x83, MRM0m     >;         // [mem32] += I8
-
-def ADCrr32  : I2A32<"adc", 0x11, MRMDestReg>;         // R32 += R32+Carry
-def ADCrm32  : I2A32<"adc", 0x11, MRMSrcMem >;         // R32 += [mem32]+Carry
-def ADCmr32  : I2A32<"adc", 0x13, MRMDestMem>;         // [mem32] += R32+Carry
-
-def SUBrr8   : I2A8 <"sub", 0x28, MRMDestReg>,         Pattern<(set R8 , (minus R8 , R8 ))>;
-def SUBrr16  : I2A16<"sub", 0x29, MRMDestReg>, OpSize, Pattern<(set R16, (minus R16, R16))>;
-def SUBrr32  : I2A32<"sub", 0x29, MRMDestReg>,         Pattern<(set R32, (minus R32, R32))>;
-def SUBmr8   : I2A8 <"sub", 0x28, MRMDestMem>;         // [mem8]  -= R8
-def SUBmr16  : I2A16<"sub", 0x29, MRMDestMem>, OpSize; // [mem16] -= R16
-def SUBmr32  : I2A32<"sub", 0x29, MRMDestMem>;         // [mem32] -= R32
-def SUBrm8   : I2A8 <"sub", 0x2A, MRMSrcMem >;         // R8  -= [mem8]
-def SUBrm16  : I2A16<"sub", 0x2B, MRMSrcMem >, OpSize; // R16 -= [mem16]
-def SUBrm32  : I2A32<"sub", 0x2B, MRMSrcMem >;         // R32 -= [mem32]
-
-def SUBri8   : I2A8 <"sub", 0x80, MRM5r     >,         Pattern<(set R8 , (minus R8 , imm))>;
-def SUBri16  : I2A16<"sub", 0x81, MRM5r     >, OpSize, Pattern<(set R16, (minus R16, imm))>;
-def SUBri32  : I2A32<"sub", 0x81, MRM5r     >,         Pattern<(set R32, (minus R32, imm))>;
-def SUBmi8   : I2A8 <"sub", 0x80, MRM5m     >;         // [mem8] -= I8
-def SUBmi16  : I2A16<"sub", 0x81, MRM5m     >, OpSize; // [mem16] -= I16
-def SUBmi32  : I2A32<"sub", 0x81, MRM5m     >;         // [mem32] -= I32
-
-def SUBri16b : I2A8 <"sub", 0x83, MRM5r     >, OpSize;
-def SUBri32b : I2A8 <"sub", 0x83, MRM5r     >;
-// def SUBmi16b : I2A8 <"sub", 0x83, MRM5m     >, OpSize; // [mem16] -= I8
-// def SUBmi32b : I2A8 <"sub", 0x83, MRM5m     >;         // [mem32] -= I8
-
-def SBBrr32  : I2A32<"sbb", 0x19, MRMDestReg>;         // R32 -= R32+Borrow
-def SBBrm32  : I2A32<"sbb", 0x19, MRMSrcMem >;         // R32 -= [mem32]+Borrow
-def SBBmr32  : I2A32<"sbb", 0x1B, MRMDestMem>;         // [mem32] -= R32+Borrow
-
-def IMULrr16 : I2A16<"imul", 0xAF, MRMSrcReg>, TB, OpSize, Pattern<(set R16, (times R16, R16))>;
-def IMULrr32 : I2A32<"imul", 0xAF, MRMSrcReg>, TB        , Pattern<(set R32, (times R32, R32))>;
-def IMULrm16 : I2A16<"imul", 0xAF, MRMSrcMem>, TB, OpSize;
-def IMULrm32 : I2A32<"imul", 0xAF, MRMSrcMem>, TB        ;
+def ADDrr8   : I     <"add", 0x00, MRMDestReg>,         Pattern<(set R8 , (plus R8 , R8 ))>;
+def ADDrr16  : I     <"add", 0x01, MRMDestReg>, OpSize, Pattern<(set R16, (plus R16, R16))>;
+def ADDrr32  : I     <"add", 0x01, MRMDestReg>,         Pattern<(set R32, (plus R32, R32))>;
+def ADDmr8   : IM8   <"add", 0x00, MRMDestMem>;         // [mem8]  += R8
+def ADDmr16  : IM16  <"add", 0x01, MRMDestMem>, OpSize; // [mem16] += R16
+def ADDmr32  : IM32  <"add", 0x01, MRMDestMem>;         // [mem32] += R32
+def ADDrm8   : IM8   <"add", 0x02, MRMSrcMem >;         // R8  += [mem8]
+def ADDrm16  : IM16  <"add", 0x03, MRMSrcMem >, OpSize; // R16 += [mem16]
+def ADDrm32  : IM32  <"add", 0x03, MRMSrcMem >;         // R32 += [mem32]
+
+def ADDri8   : II8   <"add", 0x80, MRM0r     >,         Pattern<(set R8 , (plus R8 , imm))>;
+def ADDri16  : II16  <"add", 0x81, MRM0r     >, OpSize, Pattern<(set R16, (plus R16, imm))>;
+def ADDri32  : II32  <"add", 0x81, MRM0r     >,         Pattern<(set R32, (plus R32, imm))>;
+def ADDmi8   : I8MI  <"add", 0x80, MRM0m     >;         // [mem8] += I8
+def ADDmi16  : I16MI <"add", 0x81, MRM0m     >, OpSize; // [mem16] += I16
+def ADDmi32  : I32MI <"add", 0x81, MRM0m     >;         // [mem32] += I32
+
+def ADDri16b : II8   <"add", 0x83, MRM0r     >, OpSize;   // ADDri with sign extended 8 bit imm
+def ADDri32b : II8   <"add", 0x83, MRM0r     >;
+def ADDmi16b : IM16I8<"add", 0x83, MRM0m     >, OpSize; // [mem16] += I8
+def ADDmi32b : IM32I8<"add", 0x83, MRM0m     >;         // [mem32] += I8
+
+def ADCrr32  : I     <"adc", 0x11, MRMDestReg>;         // R32 += R32+Carry
+def ADCrm32  : I     <"adc", 0x11, MRMSrcMem >;         // R32 += [mem32]+Carry
+def ADCmr32  : I     <"adc", 0x13, MRMDestMem>;         // [mem32] += R32+Carry
+
+
+def SUBrr8   : I     <"sub", 0x28, MRMDestReg>,         Pattern<(set R8 , (minus R8 , R8 ))>;
+def SUBrr16  : I     <"sub", 0x29, MRMDestReg>, OpSize, Pattern<(set R16, (minus R16, R16))>;
+def SUBrr32  : I     <"sub", 0x29, MRMDestReg>,         Pattern<(set R32, (minus R32, R32))>;
+def SUBmr8   : IM8   <"sub", 0x28, MRMDestMem>;         // [mem8]  -= R8
+def SUBmr16  : IM16  <"sub", 0x29, MRMDestMem>, OpSize; // [mem16] -= R16
+def SUBmr32  : IM32  <"sub", 0x29, MRMDestMem>;         // [mem32] -= R32
+def SUBrm8   : IM8   <"sub", 0x2A, MRMSrcMem >;         // R8  -= [mem8]
+def SUBrm16  : IM16  <"sub", 0x2B, MRMSrcMem >, OpSize; // R16 -= [mem16]
+def SUBrm32  : IM32  <"sub", 0x2B, MRMSrcMem >;         // R32 -= [mem32]
+
+def SUBri8   : II8   <"sub", 0x80, MRM5r     >,         Pattern<(set R8 , (minus R8 , imm))>;
+def SUBri16  : II16  <"sub", 0x81, MRM5r     >, OpSize, Pattern<(set R16, (minus R16, imm))>;
+def SUBri32  : II32  <"sub", 0x81, MRM5r     >,         Pattern<(set R32, (minus R32, imm))>;
+def SUBmi8   : I8MI  <"sub", 0x80, MRM5m     >;         // [mem8] -= I8
+def SUBmi16  : I16MI <"sub", 0x81, MRM5m     >, OpSize; // [mem16] -= I16
+def SUBmi32  : I32MI <"sub", 0x81, MRM5m     >;         // [mem32] -= I32
+
+def SUBri16b : II8   <"sub", 0x83, MRM5r     >, OpSize;
+def SUBri32b : II8   <"sub", 0x83, MRM5r     >;
+def SUBmi16b : IM16I8<"sub", 0x83, MRM5m     >, OpSize; // [mem16] -= I8
+def SUBmi32b : IM32I8<"sub", 0x83, MRM5m     >;         // [mem32] -= I8
+
+def SBBrr32  : I     <"sbb", 0x19, MRMDestReg>;         // R32 -= R32+Borrow
+def SBBrm32  : IM32  <"sbb", 0x19, MRMSrcMem >;         // R32 -= [mem32]+Borrow
+def SBBmr32  : IM32  <"sbb", 0x1B, MRMDestMem>;         // [mem32] -= R32+Borrow
+
+def IMULrr16 : I     <"imul", 0xAF, MRMSrcReg>, TB, OpSize, Pattern<(set R16, (times R16, R16))>;
+def IMULrr32 : I     <"imul", 0xAF, MRMSrcReg>, TB        , Pattern<(set R32, (times R32, R32))>;
+def IMULrm16 : IM16  <"imul", 0xAF, MRMSrcMem>, TB, OpSize;
+def IMULrm32 : IM32  <"imul", 0xAF, MRMSrcMem>, TB        ;
 
+} // end Two Address instructions
 
 // These are suprisingly enough not two address instructions!
-def IMULrri16  : X86Inst<"imul", 0x69, MRMSrcReg, Arg16>,     OpSize;  // R16 = R16*I16
-def IMULrri32  : X86Inst<"imul", 0x69, MRMSrcReg, Arg32>;              // R32 = R32*I32
-def IMULrri16b : X86Inst<"imul", 0x6B, MRMSrcReg, Arg8 >,     OpSize;  // R16 = R16*I8
-def IMULrri32b : X86Inst<"imul", 0x6B, MRMSrcReg, Arg8 >;              // R32 = R32*I8
-def IMULrmi16  : X86Inst<"imul", 0x69, MRMSrcMem, Arg16>,     OpSize;  // R16 = [mem16]*I16
-def IMULrmi32  : X86Inst<"imul", 0x69, MRMSrcMem, Arg32>;              // R32 = [mem32]*I32
-// def IMULrmi16b : X86Inst<"imul", 0x6B, MRMSrcMem, Arg8 >,     OpSize;  // R16 = [mem16]*I8
-// def IMULrmi32b : X86Inst<"imul", 0x6B, MRMSrcMem, Arg8 >;              // R32 = [mem32]*I8
-
-
-
-// Logical operators...
-def ANDrr8   : I2A8 <"and", 0x20, MRMDestReg>,         Pattern<(set R8 , (and R8 , R8 ))>;
-def ANDrr16  : I2A16<"and", 0x21, MRMDestReg>, OpSize, Pattern<(set R16, (and R16, R16))>;
-def ANDrr32  : I2A32<"and", 0x21, MRMDestReg>,         Pattern<(set R32, (and R32, R32))>;
-def ANDmr8   : I2A8 <"and", 0x20, MRMDestMem>;            // [mem8]  &= R8
-def ANDmr16  : I2A16<"and", 0x21, MRMDestMem>, OpSize;    // [mem16] &= R16
-def ANDmr32  : I2A32<"and", 0x21, MRMDestMem>;            // [mem32] &= R32
-def ANDrm8   : I2A8 <"and", 0x22, MRMSrcMem >;            // R8  &= [mem8]
-def ANDrm16  : I2A16<"and", 0x23, MRMSrcMem >, OpSize;    // R16 &= [mem16]
-def ANDrm32  : I2A32<"and", 0x23, MRMSrcMem >;            // R32 &= [mem32]
-
-def ANDri8   : I2A8 <"and", 0x80, MRM4r     >,         Pattern<(set R8 , (and R8 , imm))>;
-def ANDri16  : I2A16<"and", 0x81, MRM4r     >, OpSize, Pattern<(set R16, (and R16, imm))>;
-def ANDri32  : I2A32<"and", 0x81, MRM4r     >,         Pattern<(set R32, (and R32, imm))>;
-def ANDmi8   : I2A8 <"and", 0x80, MRM4m     >;            // [mem8]  &= imm8
-def ANDmi16  : I2A16<"and", 0x81, MRM4m     >, OpSize;    // [mem16] &= imm16
-def ANDmi32  : I2A32<"and", 0x81, MRM4m     >;            // [mem32] &= imm32
-
-def ANDri16b : I2A8 <"and", 0x83, MRM4r     >, OpSize;    // R16 &= imm8
-def ANDri32b : I2A8 <"and", 0x83, MRM4r     >;            // R32 &= imm8
-// def ANDmi16b : I2A8 <"and", 0x83, MRM4m     >, OpSize;    // [mem16] &= imm8
-// def ANDmi32b : I2A8 <"and", 0x83, MRM4m     >;            // [mem32] &= imm8
-
-
-
-
-def ORrr8    : I2A8 <"or" , 0x08, MRMDestReg>,         Pattern<(set R8 , (or  R8 , R8 ))>;
-def ORrr16   : I2A16<"or" , 0x09, MRMDestReg>, OpSize, Pattern<(set R16, (or  R16, R16))>;
-def ORrr32   : I2A32<"or" , 0x09, MRMDestReg>,         Pattern<(set R32, (or  R32, R32))>;
-def ORmr8    : I2A8 <"or" , 0x08, MRMDestMem>;            // [mem8]  |= R8
-def ORmr16   : I2A16<"or" , 0x09, MRMDestMem>, OpSize;    // [mem16] |= R16
-def ORmr32   : I2A32<"or" , 0x09, MRMDestMem>;            // [mem32] |= R32
-def ORrm8    : I2A8 <"or" , 0x0A, MRMSrcMem >;            // R8  |= [mem8]
-def ORrm16   : I2A16<"or" , 0x0B, MRMSrcMem >, OpSize;    // R16 |= [mem16]
-def ORrm32   : I2A32<"or" , 0x0B, MRMSrcMem >;            // R32 |= [mem32]
-
-def ORri8    : I2A8 <"or" , 0x80, MRM1r     >,         Pattern<(set R8 , (or  R8 , imm))>;
-def ORri16   : I2A16<"or" , 0x81, MRM1r     >, OpSize, Pattern<(set R16, (or  R16, imm))>;
-def ORri32   : I2A32<"or" , 0x81, MRM1r     >,         Pattern<(set R32, (or  R32, imm))>;
-def ORmi8    : I2A8 <"or" , 0x80, MRM1m     >;            // [mem8]  |= imm8
-def ORmi16   : I2A16<"or" , 0x81, MRM1m     >, OpSize;    // [mem16] |= imm16
-def ORmi32   : I2A32<"or" , 0x81, MRM1m     >;            // [mem32] |= imm32
-
-def ORri16b  : I2A8 <"or" , 0x83, MRM1r     >, OpSize;    // R16 |= imm8
-def ORri32b  : I2A8 <"or" , 0x83, MRM1r     >;            // R32 |= imm8
-// def ORmi16b  : I2A8 <"or" , 0x83, MRM1m     >, OpSize;    // [mem16] |= imm8
-// def ORmi32b  : I2A8 <"or" , 0x83, MRM1m     >;            // [mem32] |= imm8
-
-
-def XORrr8   : I2A8 <"xor", 0x30, MRMDestReg>,         Pattern<(set R8 , (xor R8 , R8 ))>;
-def XORrr16  : I2A16<"xor", 0x31, MRMDestReg>, OpSize, Pattern<(set R16, (xor R16, R16))>;
-def XORrr32  : I2A32<"xor", 0x31, MRMDestReg>,         Pattern<(set R32, (xor R32, R32))>;
-def XORmr8   : I2A8 <"xor", 0x30, MRMDestMem>;            // [mem8]  ^= R8
-def XORmr16  : I2A16<"xor", 0x31, MRMDestMem>, OpSize;    // [mem16] ^= R16
-def XORmr32  : I2A32<"xor", 0x31, MRMDestMem>;            // [mem32] ^= R32
-def XORrm8   : I2A8 <"xor", 0x32, MRMSrcMem >;            // R8  ^= [mem8]
-def XORrm16  : I2A16<"xor", 0x33, MRMSrcMem >, OpSize;    // R16 ^= [mem16]
-def XORrm32  : I2A32<"xor", 0x33, MRMSrcMem >;            // R32 ^= [mem32]
-
-def XORri8   : I2A8 <"xor", 0x80, MRM6r     >,         Pattern<(set R8 , (xor R8 , imm))>;
-def XORri16  : I2A16<"xor", 0x81, MRM6r     >, OpSize, Pattern<(set R16, (xor R16, imm))>;
-def XORri32  : I2A32<"xor", 0x81, MRM6r     >,         Pattern<(set R32, (xor R32, imm))>;
-def XORmi8   : I2A8 <"xor", 0x80, MRM6m     >;            // [mem8] ^= R8
-def XORmi16  : I2A16<"xor", 0x81, MRM6m     >, OpSize;    // [mem16] ^= R16
-def XORmi32  : I2A32<"xor", 0x81, MRM6m     >;            // [mem32] ^= R32
-
-def XORri16b : I2A8 <"xor", 0x83, MRM6r     >, OpSize;    // R16 ^= imm8
-def XORri32b : I2A8 <"xor", 0x83, MRM6r     >;            // R32 ^= imm8
-// def XORmi16b : I2A8 <"xor", 0x83, MRM6m     >, OpSize;    // [mem16] ^= imm8
-// def XORmi32b : I2A8 <"xor", 0x83, MRM6m     >;            // [mem32] ^= imm8
+def IMULrri16  : II16  <"imul", 0x69, MRMSrcReg>,     OpSize;  // R16 = R16*I16
+def IMULrri32  : II32  <"imul", 0x69, MRMSrcReg>;              // R32 = R32*I32
+def IMULrri16b : II8   <"imul", 0x6B, MRMSrcReg>,     OpSize;  // R16 = R16*I8
+def IMULrri32b : II8   <"imul", 0x6B, MRMSrcReg>;              // R32 = R32*I8
+def IMULrmi16  : I16MI <"imul", 0x69, MRMSrcMem>,     OpSize;  // R16 = [mem16]*I16
+def IMULrmi32  : I32MI <"imul", 0x69, MRMSrcMem>;              // R32 = [mem32]*I32
+def IMULrmi16b : IM16I8<"imul", 0x6B, MRMSrcMem>,     OpSize;  // R16 = [mem16]*I8
+def IMULrmi32b : IM32I8<"imul", 0x6B, MRMSrcMem>;              // R32 = [mem32]*I8
 
+//===----------------------------------------------------------------------===//
 // Test instructions are just like AND, except they don't generate a result.
-def TESTrr8  : X86Inst<"test", 0x84, MRMDestReg, Arg8 >;          // flags = R8  & R8
-def TESTrr16 : X86Inst<"test", 0x85, MRMDestReg, Arg16>, OpSize;  // flags = R16 & R16
-def TESTrr32 : X86Inst<"test", 0x85, MRMDestReg, Arg32>;          // flags = R32 & R32
-def TESTmr8  : X86Inst<"test", 0x84, MRMDestMem, Arg8 >;          // flags = [mem8]  & R8
-def TESTmr16 : X86Inst<"test", 0x85, MRMDestMem, Arg16>, OpSize;  // flags = [mem16] & R16
-def TESTmr32 : X86Inst<"test", 0x85, MRMDestMem, Arg32>;          // flags = [mem32] & R32
-def TESTrm8  : X86Inst<"test", 0x84, MRMSrcMem , Arg8 >;          // flags = R8  & [mem8]
-def TESTrm16 : X86Inst<"test", 0x85, MRMSrcMem , Arg16>, OpSize;  // flags = R16 & [mem16]
-def TESTrm32 : X86Inst<"test", 0x85, MRMSrcMem , Arg32>;          // flags = R32 & [mem32]
-
-def TESTri8  : X86Inst<"test", 0xF6, MRM0r     , Arg8 >;          // flags = R8  & imm8
-def TESTri16 : X86Inst<"test", 0xF7, MRM0r     , Arg16>, OpSize;  // flags = R16 & imm16
-def TESTri32 : X86Inst<"test", 0xF7, MRM0r     , Arg32>;          // flags = R32 & imm32
-def TESTmi8  : X86Inst<"test", 0xF6, MRM0m     , Arg8 >;          // flags = [mem8]  & imm8
-def TESTmi16 : X86Inst<"test", 0xF7, MRM0m     , Arg16>, OpSize;  // flags = [mem16] & imm16
-def TESTmi32 : X86Inst<"test", 0xF7, MRM0m     , Arg32>;          // flags = [mem32] & imm32
+def TESTrr8  : I    <"test", 0x84, MRMDestReg>;          // flags = R8  & R8
+def TESTrr16 : I    <"test", 0x85, MRMDestReg>, OpSize;  // flags = R16 & R16
+def TESTrr32 : I    <"test", 0x85, MRMDestReg>;          // flags = R32 & R32
+def TESTmr8  : IM8  <"test", 0x84, MRMDestMem>;          // flags = [mem8]  & R8
+def TESTmr16 : IM16 <"test", 0x85, MRMDestMem>, OpSize;  // flags = [mem16] & R16
+def TESTmr32 : IM32 <"test", 0x85, MRMDestMem>;          // flags = [mem32] & R32
+def TESTrm8  : IM8  <"test", 0x84, MRMSrcMem >;          // flags = R8  & [mem8]
+def TESTrm16 : IM16 <"test", 0x85, MRMSrcMem >, OpSize;  // flags = R16 & [mem16]
+def TESTrm32 : IM32 <"test", 0x85, MRMSrcMem >;          // flags = R32 & [mem32]
+
+def TESTri8  : II8  <"test", 0xF6, MRM0r     >;          // flags = R8  & imm8
+def TESTri16 : II16 <"test", 0xF7, MRM0r     >, OpSize;  // flags = R16 & imm16
+def TESTri32 : II32 <"test", 0xF7, MRM0r     >;          // flags = R32 & imm32
+def TESTmi8  : I8MI <"test", 0xF6, MRM0m     >;          // flags = [mem8]  & imm8
+def TESTmi16 : I16MI<"test", 0xF7, MRM0m     >, OpSize;  // flags = [mem16] & imm16
+def TESTmi32 : I32MI<"test", 0xF7, MRM0m     >;          // flags = [mem32] & imm32
 
-// Shift instructions
-class UsesCL { list<Register> Uses = [CL]; bit printImplicitUses = 1; }
 
-def SHLrCL8  : I2A8 <"shl", 0xD2, MRM4r     >        , UsesCL; // R8  <<= cl
-def SHLrCL16 : I2A16<"shl", 0xD3, MRM4r     >, OpSize, UsesCL; // R16 <<= cl
-def SHLrCL32 : I2A32<"shl", 0xD3, MRM4r     >        , UsesCL; // R32 <<= cl
-def SHLmCL8  : I2A8 <"shl", 0xD2, MRM4m     >        , UsesCL; // [mem8]  <<= cl
-def SHLmCL16 : I2A16<"shl", 0xD3, MRM4m     >, OpSize, UsesCL; // [mem16] <<= cl
-def SHLmCL32 : I2A32<"shl", 0xD3, MRM4m     >        , UsesCL; // [mem32] <<= cl
-
-def SHLri8   : I2A8 <"shl", 0xC0, MRM4r     >;                 // R8  <<= imm8
-def SHLri16  : I2A8 <"shl", 0xC1, MRM4r     >, OpSize;         // R16 <<= imm8
-def SHLri32  : I2A8 <"shl", 0xC1, MRM4r     >;                 // R32 <<= imm8
-// def SHLmi8   : I2A8 <"shl", 0xC0, MRM4m     >;                 // [mem8]  <<= imm8
-// def SHLmi16  : I2A8 <"shl", 0xC1, MRM4m     >, OpSize;         // [mem16] <<= imm8
-// def SHLmi32  : I2A8 <"shl", 0xC1, MRM4m     >;                 // [mem32] <<= imm8
-
-def SHRrCL8  : I2A8 <"shr", 0xD2, MRM5r     >        , UsesCL; // R8  >>= cl
-def SHRrCL16 : I2A16<"shr", 0xD3, MRM5r     >, OpSize, UsesCL; // R16 >>= cl
-def SHRrCL32 : I2A32<"shr", 0xD3, MRM5r     >        , UsesCL; // R32 >>= cl
-def SHRmCL8  : I2A8 <"shr", 0xD2, MRM5m     >        , UsesCL; // [mem8]  >>= cl
-def SHRmCL16 : I2A16<"shr", 0xD3, MRM5m     >, OpSize, UsesCL; // [mem16] >>= cl
-def SHRmCL32 : I2A32<"shr", 0xD3, MRM5m     >        , UsesCL; // [mem32] >>= cl
-
-def SHRri8   : I2A8 <"shr", 0xC0, MRM5r     >;                 // R8  >>= imm8
-def SHRri16  : I2A8 <"shr", 0xC1, MRM5r     >, OpSize;         // R16 >>= imm8
-def SHRri32  : I2A8 <"shr", 0xC1, MRM5r     >;                 // R32 >>= imm8
-// def SHRmi8   : I2A8 <"shr", 0xC0, MRM5m     >;                 // [mem8]  >>= imm8
-// def SHRmi16  : I2A8 <"shr", 0xC1, MRM5m     >, OpSize;         // [mem16] >>= imm8
-// def SHRmi32  : I2A8 <"shr", 0xC1, MRM5m     >;                 // [mem32] >>= imm8
-
-def SARrCL8  : I2A8 <"sar", 0xD2, MRM7r     >        , UsesCL; // R8  >>>= cl
-def SARrCL16 : I2A16<"sar", 0xD3, MRM7r     >, OpSize, UsesCL; // R16 >>>= cl
-def SARrCL32 : I2A32<"sar", 0xD3, MRM7r     >        , UsesCL; // R32 >>>= cl
-def SARmCL8  : I2A8 <"sar", 0xD2, MRM7m     >        , UsesCL; // [mem8]  >>>= cl
-def SARmCL16 : I2A16<"sar", 0xD3, MRM7m     >, OpSize, UsesCL; // [mem16] >>>= cl
-def SARmCL32 : I2A32<"sar", 0xD3, MRM7m     >        , UsesCL; // [mem32] >>>= cl
-
-def SARri8   : I2A8 <"sar", 0xC0, MRM7r     >;                 // R8  >>>= imm8
-def SARri16  : I2A8 <"sar", 0xC1, MRM7r     >, OpSize;         // R16 >>>= imm8
-def SARri32  : I2A8 <"sar", 0xC1, MRM7r     >;                 // R32 >>>= imm8
-// def SARmi8   : I2A8 <"sar", 0xC0, MRM7m     >;                 // [mem8]  >>>= imm8
-// def SARmi16  : I2A8 <"sar", 0xC1, MRM7m     >, OpSize;         // [mem16] >>>= imm8
-// def SARmi32  : I2A8 <"sar", 0xC1, MRM7m     >;                 // [mem32] >>>= imm8
-
-def SHLDrrCL32 : I2A32<"shld", 0xA5, MRMDestReg>, TB, UsesCL;   // R32 <<= R32,R32 cl
-def SHLDmrCL32 : I2A32<"shld", 0xA5, MRMDestMem>, TB, UsesCL;   // [mem32] <<= [mem32],R32 cl
-def SHLDrri32  : I2A8 <"shld", 0xA4, MRMDestReg>, TB;           // R32 <<= R32,R32 imm8
-// def SHLDmri32  : I2A8 <"shld", 0xA4, MRMDestMem>, TB;           // [mem32] <<= [mem32],R32 imm8
-
-def SHRDrrCL32 : I2A32<"shrd", 0xAD, MRMDestReg>, TB, UsesCL;   // R32 >>= R32,R32 cl
-def SHRDmrCL32 : I2A32<"shrd", 0xAD, MRMDestMem>, TB, UsesCL;   // [mem32] >>= [mem32],R32 cl
-def SHRDrri32  : I2A8 <"shrd", 0xAC, MRMDestReg>, TB;           // R32 >>= R32,R32 imm8
-// def SHRDmri32  : I2A8 <"shrd", 0xAC, MRMDestMem>, TB;           // [mem32] >>= [mem32],R32 imm8
 
 // Condition code ops, incl. set if equal/not equal/...
-def SAHF     : X86Inst<"sahf" , 0x9E, RawFrm, Arg8>, Imp<[AH],[]>;  // flags = AH
+def SAHF     : I  <"sahf" , 0x9E, RawFrm>, Imp<[AH],[]>;  // flags = AH
 
-def SETBr    : X86Inst<"setb" , 0x92, MRM0r , Arg8>, TB;            // R8 = <  unsign
-def SETBm    : X86Inst<"setb" , 0x92, MRM0m , Arg8>, TB;            // [mem8] = <  unsign
-def SETAEr   : X86Inst<"setae", 0x93, MRM0r , Arg8>, TB;            // R8 = >= unsign
-def SETAEm   : X86Inst<"setae", 0x93, MRM0m , Arg8>, TB;            // [mem8] = >= unsign
-def SETEr    : X86Inst<"sete" , 0x94, MRM0r , Arg8>, TB;            // R8 = ==
-def SETEm    : X86Inst<"sete" , 0x94, MRM0m , Arg8>, TB;            // [mem8] = ==
-def SETNEr   : X86Inst<"setne", 0x95, MRM0r , Arg8>, TB;            // R8 = !=
-def SETNEm   : X86Inst<"setne", 0x95, MRM0m , Arg8>, TB;            // [mem8] = !=
-def SETBEr   : X86Inst<"setbe", 0x96, MRM0r , Arg8>, TB;            // R8 = <= unsign
-def SETBEm   : X86Inst<"setbe", 0x96, MRM0m , Arg8>, TB;            // [mem8] = <= unsign
-def SETAr    : X86Inst<"seta" , 0x97, MRM0r , Arg8>, TB;            // R8 = >  signed
-def SETAm    : X86Inst<"seta" , 0x97, MRM0m , Arg8>, TB;            // [mem8] = >  signed
-def SETSr    : X86Inst<"sets" , 0x98, MRM0r , Arg8>, TB;            // R8 = <sign bit>
-def SETSm    : X86Inst<"sets" , 0x98, MRM0m , Arg8>, TB;            // [mem8] = <sign bit>
-def SETNSr   : X86Inst<"setns", 0x99, MRM0r , Arg8>, TB;            // R8 = !<sign bit>
-def SETNSm   : X86Inst<"setns", 0x99, MRM0m , Arg8>, TB;            // [mem8] = !<sign bit>
-def SETLr    : X86Inst<"setl" , 0x9C, MRM0r , Arg8>, TB;            // R8 = <  signed
-def SETLm    : X86Inst<"setl" , 0x9C, MRM0m , Arg8>, TB;            // [mem8] = <  signed
-def SETGEr   : X86Inst<"setge", 0x9D, MRM0r , Arg8>, TB;            // R8 = >= signed
-def SETGEm   : X86Inst<"setge", 0x9D, MRM0m , Arg8>, TB;            // [mem8] = >= signed
-def SETLEr   : X86Inst<"setle", 0x9E, MRM0r , Arg8>, TB;            // R8 = <= signed
-def SETLEm   : X86Inst<"setle", 0x9E, MRM0m , Arg8>, TB;            // [mem8] = <= signed
-def SETGr    : X86Inst<"setg" , 0x9F, MRM0r , Arg8>, TB;            // R8 = <  signed
-def SETGm    : X86Inst<"setg" , 0x9F, MRM0m , Arg8>, TB;            // [mem8] = <  signed
-
-// Conditional moves.  These are modelled as X = cmovXX Y, Z.  Eventually
-// register allocated to cmovXX XY, Z
-def CMOVErr16 : I2A16<"cmove", 0x44, MRMSrcReg>, TB, OpSize;        // if ==, R16 = R16
-def CMOVNErr32: I2A32<"cmovne",0x45, MRMSrcReg>, TB;                // if !=, R32 = R32
-def CMOVSrr32 : I2A32<"cmovs", 0x48, MRMSrcReg>, TB;                // if signed, R32 = R32
+def SETBr    : I  <"setb" , 0x92, MRM0r>, TB;            // R8 = <  unsign
+def SETBm    : IM8<"setb" , 0x92, MRM0m>, TB;            // [mem8] = <  unsign
+def SETAEr   : I  <"setae", 0x93, MRM0r>, TB;            // R8 = >= unsign
+def SETAEm   : IM8<"setae", 0x93, MRM0m>, TB;            // [mem8] = >= unsign
+def SETEr    : I  <"sete" , 0x94, MRM0r>, TB;            // R8 = ==
+def SETEm    : IM8<"sete" , 0x94, MRM0m>, TB;            // [mem8] = ==
+def SETNEr   : I  <"setne", 0x95, MRM0r>, TB;            // R8 = !=
+def SETNEm   : IM8<"setne", 0x95, MRM0m>, TB;            // [mem8] = !=
+def SETBEr   : I  <"setbe", 0x96, MRM0r>, TB;            // R8 = <= unsign
+def SETBEm   : IM8<"setbe", 0x96, MRM0m>, TB;            // [mem8] = <= unsign
+def SETAr    : I  <"seta" , 0x97, MRM0r>, TB;            // R8 = >  signed
+def SETAm    : IM8<"seta" , 0x97, MRM0m>, TB;            // [mem8] = >  signed
+def SETSr    : I  <"sets" , 0x98, MRM0r>, TB;            // R8 = <sign bit>
+def SETSm    : IM8<"sets" , 0x98, MRM0m>, TB;            // [mem8] = <sign bit>
+def SETNSr   : I  <"setns", 0x99, MRM0r>, TB;            // R8 = !<sign bit>
+def SETNSm   : IM8<"setns", 0x99, MRM0m>, TB;            // [mem8] = !<sign bit>
+def SETLr    : I  <"setl" , 0x9C, MRM0r>, TB;            // R8 = <  signed
+def SETLm    : IM8<"setl" , 0x9C, MRM0m>, TB;            // [mem8] = <  signed
+def SETGEr   : I  <"setge", 0x9D, MRM0r>, TB;            // R8 = >= signed
+def SETGEm   : IM8<"setge", 0x9D, MRM0m>, TB;            // [mem8] = >= signed
+def SETLEr   : I  <"setle", 0x9E, MRM0r>, TB;            // R8 = <= signed
+def SETLEm   : IM8<"setle", 0x9E, MRM0m>, TB;            // [mem8] = <= signed
+def SETGr    : I  <"setg" , 0x9F, MRM0r>, TB;            // R8 = <  signed
+def SETGm    : IM8<"setg" , 0x9F, MRM0m>, TB;            // [mem8] = <  signed
 
 // Integer comparisons
-def CMPrr8  : X86Inst<"cmp", 0x38, MRMDestReg, Arg8 >;              // compare R8, R8
-def CMPrr16 : X86Inst<"cmp", 0x39, MRMDestReg, Arg16>, OpSize;      // compare R16, R16
-def CMPrr32 : X86Inst<"cmp", 0x39, MRMDestReg, Arg32>,              // compare R32, R32
+def CMPrr8  : I    <"cmp", 0x38, MRMDestReg>;              // compare R8, R8
+def CMPrr16 : I    <"cmp", 0x39, MRMDestReg>, OpSize;      // compare R16, R16
+def CMPrr32 : I    <"cmp", 0x39, MRMDestReg>,              // compare R32, R32
               Pattern<(isVoid (unspec2 R32, R32))>;
-def CMPmr8  : X86Inst<"cmp", 0x38, MRMDestMem, Arg8 >;              // compare [mem8], R8
-def CMPmr16 : X86Inst<"cmp", 0x39, MRMDestMem, Arg16>, OpSize;      // compare [mem16], R16
-def CMPmr32 : X86Inst<"cmp", 0x39, MRMDestMem, Arg32>;              // compare [mem32], R32
-def CMPrm8  : X86Inst<"cmp", 0x3A, MRMSrcMem , Arg8 >;              // compare R8, [mem8]
-def CMPrm16 : X86Inst<"cmp", 0x3B, MRMSrcMem , Arg16>, OpSize;      // compare R16, [mem16]
-def CMPrm32 : X86Inst<"cmp", 0x3B, MRMSrcMem , Arg32>;              // compare R32, [mem32]
-def CMPri8  : X86Inst<"cmp", 0x80, MRM7r     , Arg8 >;              // compare R8, imm8
-def CMPri16 : X86Inst<"cmp", 0x81, MRM7r     , Arg16>, OpSize;      // compare R16, imm16
-def CMPri32 : X86Inst<"cmp", 0x81, MRM7r     , Arg32>;              // compare R32, imm32
-def CMPmi8  : X86Inst<"cmp", 0x80, MRM7m     , Arg8 >;              // compare [mem8], imm8
-def CMPmi16 : X86Inst<"cmp", 0x81, MRM7m     , Arg16>, OpSize;      // compare [mem16], imm16
-def CMPmi32 : X86Inst<"cmp", 0x81, MRM7m     , Arg32>;              // compare [mem32], imm32
+def CMPmr8  : IM8  <"cmp", 0x38, MRMDestMem>;              // compare [mem8], R8
+def CMPmr16 : IM16 <"cmp", 0x39, MRMDestMem>, OpSize;      // compare [mem16], R16
+def CMPmr32 : IM32 <"cmp", 0x39, MRMDestMem>;              // compare [mem32], R32
+def CMPrm8  : IM8  <"cmp", 0x3A, MRMSrcMem >;              // compare R8, [mem8]
+def CMPrm16 : IM16 <"cmp", 0x3B, MRMSrcMem >, OpSize;      // compare R16, [mem16]
+def CMPrm32 : IM32 <"cmp", 0x3B, MRMSrcMem >;              // compare R32, [mem32]
+def CMPri8  : II8  <"cmp", 0x80, MRM7r     >;              // compare R8, imm8
+def CMPri16 : II16 <"cmp", 0x81, MRM7r     >, OpSize;      // compare R16, imm16
+def CMPri32 : II32 <"cmp", 0x81, MRM7r     >;              // compare R32, imm32
+def CMPmi8  : I8MI <"cmp", 0x80, MRM7m     >;              // compare [mem8], imm8
+def CMPmi16 : I16MI<"cmp", 0x81, MRM7m     >, OpSize;      // compare [mem16], imm16
+def CMPmi32 : I32MI<"cmp", 0x81, MRM7m     >;              // compare [mem32], imm32
 
 // Sign/Zero extenders
-def MOVSXr16r8 : X86Inst<"movsx", 0xBE, MRMSrcReg, Arg8>, TB, OpSize; // R16 = signext(R8)
-def MOVSXr32r8 : X86Inst<"movsx", 0xBE, MRMSrcReg, Arg8>, TB;         // R32 = signext(R8)
-def MOVSXr32r16: X86Inst<"movsx", 0xBF, MRMSrcReg, Arg16>,TB;         // R32 = signext(R16)
-def MOVSXr16m8 : X86Inst<"movsx", 0xBE, MRMSrcMem, Arg8>, TB, OpSize; // R16 = signext([mem8])
-def MOVSXr32m8 : X86Inst<"movsx", 0xBE, MRMSrcMem, Arg8>, TB;         // R32 = signext([mem8])
-def MOVSXr32m16: X86Inst<"movsx", 0xBF, MRMSrcMem, Arg16>,TB;         // R32 = signext([mem16])
-
-def MOVZXr16r8 : X86Inst<"movzx", 0xB6, MRMSrcReg, Arg8>, TB, OpSize; // R16 = zeroext(R8)
-def MOVZXr32r8 : X86Inst<"movzx", 0xB6, MRMSrcReg, Arg8>, TB;         // R32 = zeroext(R8)
-def MOVZXr32r16: X86Inst<"movzx", 0xB7, MRMSrcReg, Arg16>,TB;         // R32 = zeroext(R16)
-def MOVZXr16m8 : X86Inst<"movzx", 0xB6, MRMSrcMem, Arg8>, TB, OpSize; // R16 = zeroext([mem8])
-def MOVZXr32m8 : X86Inst<"movzx", 0xB6, MRMSrcMem, Arg8>, TB;         // R32 = zeroext([mem8])
-def MOVZXr32m16: X86Inst<"movzx", 0xB7, MRMSrcMem, Arg16>,TB;         // R32 = zeroext([mem16])
+def MOVSXr16r8 : I   <"movsx", 0xBE, MRMSrcReg>, TB, OpSize; // R16 = signext(R8)
+def MOVSXr32r8 : I   <"movsx", 0xBE, MRMSrcReg>, TB;         // R32 = signext(R8)
+def MOVSXr32r16: I   <"movsx", 0xBF, MRMSrcReg>, TB;         // R32 = signext(R16)
+def MOVSXr16m8 : IM8 <"movsx", 0xBE, MRMSrcMem>, TB, OpSize; // R16 = signext([mem8])
+def MOVSXr32m8 : IM8 <"movsx", 0xBE, MRMSrcMem>, TB;         // R32 = signext([mem8])
+def MOVSXr32m16: IM16<"movsx", 0xBF, MRMSrcMem>, TB;         // R32 = signext([mem16])
+
+def MOVZXr16r8 : I   <"movzx", 0xB6, MRMSrcReg>, TB, OpSize; // R16 = zeroext(R8)
+def MOVZXr32r8 : I   <"movzx", 0xB6, MRMSrcReg>, TB;         // R32 = zeroext(R8)
+def MOVZXr32r16: I   <"movzx", 0xB7, MRMSrcReg>, TB;         // R32 = zeroext(R16)
+def MOVZXr16m8 : IM8 <"movzx", 0xB6, MRMSrcMem>, TB, OpSize; // R16 = zeroext([mem8])
+def MOVZXr32m8 : IM8 <"movzx", 0xB6, MRMSrcMem>, TB;         // R32 = zeroext([mem8])
+def MOVZXr32m16: IM16<"movzx", 0xB7, MRMSrcMem>, TB;         // R32 = zeroext([mem16])
 
 
 //===----------------------------------------------------------------------===//
@@ -580,74 +610,78 @@
 
 // FIXME: These need to indicate mod/ref sets for FP regs... & FP 'TOP'
 
-// Floating point pseudo instructions...
-class FPInst<string n, bits<8> o, Format F, ArgType t, FPFormat fp>
-  : X86Inst<n, o, F, t> { let FPForm = fp; let FPFormBits = FPForm.Value; }
+// Floating point instruction templates
+class FPInst<string n, bits<8> o, Format F, FPFormat fp, MemType m, ImmType i>
+  : X86Inst<n, o, F, m, i> { let FPForm = fp; let FPFormBits = FPForm.Value; }
+
+class FPI<string n, bits<8> o, Format F, FPFormat fp> : FPInst<n, o, F, fp, NoMem, NoImm>;
+
+class FPIM<string n, bits<8> o, Format F, FPFormat fp, MemType m> : FPInst<n, o, F, fp, m, NoImm>;
+
+class FPIM16<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem16>;
+class FPIM32<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem32>;
+class FPIM64<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem64>;
+class FPIM80<string n, bits<8> o, Format F, FPFormat fp> : FPIM<n, o, F, fp, Mem80>;
 
 // Pseudo instructions for floating point.  We use these pseudo instructions
 // because they can be expanded by the fp spackifier into one of many different
 // forms of instructions for doing these operations.  Until the stackifier runs,
 // we prefer to be abstract.
-def FpMOV : FPInst<"FMOV", 0, Pseudo, ArgF80, SpecialFP>;   // f1 = fmov f2
-def FpADD : FPInst<"FADD", 0, Pseudo, ArgF80, TwoArgFP>;    // f1 = fadd f2, f3
-def FpSUB : FPInst<"FSUB", 0, Pseudo, ArgF80, TwoArgFP>;    // f1 = fsub f2, f3
-def FpMUL : FPInst<"FMUL", 0, Pseudo, ArgF80, TwoArgFP>;    // f1 = fmul f2, f3
-def FpDIV : FPInst<"FDIV", 0, Pseudo, ArgF80, TwoArgFP>;    // f1 = fdiv f2, f3
-
-def FpUCOM : FPInst<"FUCOM", 0, Pseudo, ArgF80, TwoArgFP>;  // FPSW = fucom f1, f2
-
-def FpGETRESULT : FPInst<"FGETRESULT",0, Pseudo, ArgF80, SpecialFP>;  // FPR = ST(0)
-
-def FpSETRESULT : FPInst<"FSETRESULT",0, Pseudo, ArgF80, SpecialFP>;  // ST(0) = FPR
+def FpMOV : FPI<"FMOV", 0, Pseudo, SpecialFP>;   // f1 = fmov f2
+def FpADD : FPI<"FADD", 0, Pseudo, TwoArgFP>;    // f1 = fadd f2, f3
+def FpSUB : FPI<"FSUB", 0, Pseudo, TwoArgFP>;    // f1 = fsub f2, f3
+def FpMUL : FPI<"FMUL", 0, Pseudo, TwoArgFP>;    // f1 = fmul f2, f3
+def FpDIV : FPI<"FDIV", 0, Pseudo, TwoArgFP>;    // f1 = fdiv f2, f3
+
+def FpUCOM : FPI<"FUCOM", 0, Pseudo, TwoArgFP>;  // FPSW = fucom f1, f2
+def FpGETRESULT : FPI<"FGETRESULT",0, Pseudo, SpecialFP>;  // FPR = ST(0)
+def FpSETRESULT : FPI<"FSETRESULT",0, Pseudo, SpecialFP>;  // ST(0) = FPR
 
 // Floating point loads & stores...
-def FLDrr   : FPInst<"fld"   , 0xC0, AddRegFrm, ArgF80, NotFP>, D9;   // push(ST(i))
-def FLDr32  : FPInst<"fld"   , 0xD9, MRM0m    , ArgF32, ZeroArgFP>;        // load float
-def FLDr64  : FPInst<"fld"   , 0xDD, MRM0m    , ArgF64, ZeroArgFP>;        // load double
-def FLDr80  : FPInst<"fld"   , 0xDB, MRM5m    , ArgF80, ZeroArgFP>;        // load extended
-def FILDr16 : FPInst<"fild"  , 0xDF, MRM0m    , Arg16 , ZeroArgFP>;        // load signed short
-def FILDr32 : FPInst<"fild"  , 0xDB, MRM0m    , Arg32 , ZeroArgFP>;        // load signed int
-def FILDr64 : FPInst<"fild"  , 0xDF, MRM5m    , Arg64 , ZeroArgFP>;        // load signed long
-
-def FSTr32   : FPInst<"fst" , 0xD9, MRM2m    , ArgF32, OneArgFP>;          // store float
-def FSTr64   : FPInst<"fst" , 0xDD, MRM2m    , ArgF64, OneArgFP>;          // store double
-def FSTPr32  : FPInst<"fstp", 0xD9, MRM3m    , ArgF32, OneArgFP>;          // store float, pop
-def FSTPr64  : FPInst<"fstp", 0xDD, MRM3m    , ArgF64, OneArgFP>;          // store double, pop
-def FSTPr80  : FPInst<"fstp", 0xDB, MRM7m    , ArgF80, OneArgFP>;          // store extended, pop
-def FSTrr    : FPInst<"fst" , 0xD0, AddRegFrm, ArgF80, NotFP   >, DD;      // ST(i) = ST(0)
-def FSTPrr   : FPInst<"fstp", 0xD8, AddRegFrm, ArgF80, NotFP   >, DD;      // ST(i) = ST(0), pop
-
-def FISTr16  : FPInst<"fist",    0xDF, MRM2m , Arg16 , OneArgFP>;          // store signed short
-def FISTr32  : FPInst<"fist",    0xDB, MRM2m , Arg32 , OneArgFP>;          // store signed int
-def FISTPr16 : FPInst<"fistp",   0xDF, MRM3m , Arg16 , NotFP   >;          // store signed short, pop
-def FISTPr32 : FPInst<"fistp",   0xDB, MRM3m , Arg32 , NotFP   >;          // store signed int, pop
-def FISTPr64 : FPInst<"fistpll", 0xDF, MRM7m , Arg64 , OneArgFP>;          // store signed long, pop
+def FLDrr   : FPI    <"fld"   , 0xC0, AddRegFrm, NotFP>, D9;        // push(ST(i))
+def FLDr32  : FPIM32 <"fld"   , 0xD9, MRM0m    , ZeroArgFP>;        // load float
+def FLDr64  : FPIM64 <"fld"   , 0xDD, MRM0m    , ZeroArgFP>;        // load double
+def FLDr80  : FPIM80 <"fld"   , 0xDB, MRM5m    , ZeroArgFP>;        // load extended
+def FILDr16 : FPIM16 <"fild"  , 0xDF, MRM0m    , ZeroArgFP>;        // load signed short
+def FILDr32 : FPIM32 <"fild"  , 0xDB, MRM0m    , ZeroArgFP>;        // load signed int
+def FILDr64 : FPIM64 <"fild"  , 0xDF, MRM5m    , ZeroArgFP>;        // load signed long
+
+def FSTrr    : FPI    <"fst" , 0xD0, AddRegFrm, NotFP   >, DD;      // ST(i) = ST(0)
+def FSTPrr   : FPI    <"fstp", 0xD8, AddRegFrm, NotFP   >, DD;      // ST(i) = ST(0), pop
+def FSTr32   : FPIM32 <"fst" , 0xD9, MRM2m    , OneArgFP>;          // store float
+def FSTr64   : FPIM64 <"fst" , 0xDD, MRM2m    , OneArgFP>;          // store double
+def FSTPr32  : FPIM32 <"fstp", 0xD9, MRM3m    , OneArgFP>;          // store float, pop
+def FSTPr64  : FPIM64 <"fstp", 0xDD, MRM3m    , OneArgFP>;          // store double, pop
+def FSTPr80  : FPIM80 <"fstp", 0xDB, MRM7m    , OneArgFP>;          // store extended, pop
+
+def FISTr16  : FPIM16 <"fist",    0xDF, MRM2m , OneArgFP>;          // store signed short
+def FISTr32  : FPIM32 <"fist",    0xDB, MRM2m , OneArgFP>;          // store signed int
+def FISTPr16 : FPIM16 <"fistp",   0xDF, MRM3m , NotFP   >;          // store signed short, pop
+def FISTPr32 : FPIM32 <"fistp",   0xDB, MRM3m , NotFP   >;          // store signed int, pop
+def FISTPr64 : FPIM64 <"fistpll", 0xDF, MRM7m , OneArgFP>;          // store signed long, pop
 
-def FXCH     : FPInst<"fxch",    0xC8, AddRegFrm, ArgF80, NotFP>, D9;      // fxch ST(i), ST(0)
+def FXCH     : FPI    <"fxch",    0xC8, AddRegFrm, NotFP>, D9;      // fxch ST(i), ST(0)
 
 // Floating point constant loads...
-def FLD0 : FPInst<"fldz", 0xEE, RawFrm, ArgF80, ZeroArgFP>, D9;
-def FLD1 : FPInst<"fld1", 0xE8, RawFrm, ArgF80, ZeroArgFP>, D9;
+def FLD0 : FPI<"fldz", 0xEE, RawFrm, ZeroArgFP>, D9;
+def FLD1 : FPI<"fld1", 0xE8, RawFrm, ZeroArgFP>, D9;
 
 
 // Unary operations...
-def FCHS : FPInst<"fchs", 0xE0, RawFrm, ArgF80, OneArgFPRW>, D9;           // f1 = fchs f2
+def FCHS : FPI<"fchs", 0xE0, RawFrm, OneArgFPRW>, D9;           // f1 = fchs f2
 
-def FTST : FPInst<"ftst", 0xE4, RawFrm, ArgF80, OneArgFP>, D9;             // ftst ST(0)
+def FTST : FPI<"ftst", 0xE4, RawFrm, OneArgFP>, D9;             // ftst ST(0)
 
 // Binary arithmetic operations...
-class FPST0rInst<string n, bits<8> o>
-  : X86Inst<n, o, AddRegFrm, ArgF80>, D8 {
+class FPST0rInst<string n, bits<8> o> : I<n, o, AddRegFrm>, D8 {
   list<Register> Uses = [ST0];
   list<Register> Defs = [ST0];
 }
-class FPrST0Inst<string n, bits<8> o>
-  : X86Inst<n, o, AddRegFrm, ArgF80>, DC {
+class FPrST0Inst<string n, bits<8> o> : I<n, o, AddRegFrm>, DC {
   bit printImplicitUses = 1;
   list<Register> Uses = [ST0];
 }
-class FPrST0PInst<string n, bits<8> o>
-  : X86Inst<n, o, AddRegFrm, ArgF80>, DE {
+class FPrST0PInst<string n, bits<8> o> : I<n, o, AddRegFrm>, DE {
   list<Register> Uses = [ST0];
 }
 
@@ -676,14 +710,14 @@
 def FDIVRPrST0 : FPrST0PInst<"fdivrp",  0xF0>;   // ST(i) = ST(0) / ST(i), pop
 
 // Floating point compares
-def FUCOMr    : X86Inst<"fucom"  , 0xE0, AddRegFrm, ArgF80>, DD, Imp<[ST0],[]>;  // FPSW = compare ST(0) with ST(i)
-def FUCOMPr   : X86Inst<"fucomp" , 0xE8, AddRegFrm, ArgF80>, DD, Imp<[ST0],[]>;  // FPSW = compare ST(0) with ST(i), pop
-def FUCOMPPr  : X86Inst<"fucompp", 0xE9, RawFrm   , ArgF80>, DA, Imp<[ST0],[]>;  // compare ST(0) with ST(1), pop, pop
+def FUCOMr    : I<"fucom"  , 0xE0, AddRegFrm>, DD, Imp<[ST0],[]>;  // FPSW = compare ST(0) with ST(i)
+def FUCOMPr   : I<"fucomp" , 0xE8, AddRegFrm>, DD, Imp<[ST0],[]>;  // FPSW = compare ST(0) with ST(i), pop
+def FUCOMPPr  : I<"fucompp", 0xE9, RawFrm   >, DA, Imp<[ST0],[]>;  // compare ST(0) with ST(1), pop, pop
 
 // Floating point flag ops
-def FNSTSWr8  : X86Inst<"fnstsw" , 0xE0, RawFrm   , ArgF80>, DF, Imp<[],[AX]>;   // AX = fp flags
-def FNSTCWm16 : X86Inst<"fnstcw" , 0xD9, MRM7m    , Arg16 >;                     // [mem16] = X87 control world
-def FLDCWm16  : X86Inst<"fldcw"  , 0xD9, MRM5m    , Arg16 >;                     // X87 control world = [mem16]
+def FNSTSWr8  : I   <"fnstsw" , 0xE0, RawFrm>, DF, Imp<[],[AX]>;   // AX = fp flags
+def FNSTCWm16 : IM16<"fnstcw" , 0xD9, MRM7m >;                     // [mem16] = X87 control world
+def FLDCWm16  : IM16<"fldcw"  , 0xD9, MRM5m >;                     // X87 control world = [mem16]
 
 
 //===----------------------------------------------------------------------===//


Index: llvm/lib/Target/X86/X86InstrInfo.h
diff -u llvm/lib/Target/X86/X86InstrInfo.h:1.33 llvm/lib/Target/X86/X86InstrInfo.h:1.34
--- llvm/lib/Target/X86/X86InstrInfo.h:1.33	Fri Feb 27 12:55:12 2004
+++ llvm/lib/Target/X86/X86InstrInfo.h	Sat Feb 28 16:02:05 2004
@@ -111,21 +111,29 @@
     //===------------------------------------------------------------------===//
     // This three-bit field describes the size of a memory operand.  Zero is
     // unused so that we can tell if we forgot to set a value.
-    ArgShift = 10,
-    ArgMask  = 7 << ArgShift,
-    Arg8     = 1 << ArgShift,
-    Arg16    = 2 << ArgShift,
-    Arg32    = 3 << ArgShift,
-    Arg64    = 4 << ArgShift,  // 64 bit int argument for FILD64
-    ArgF32   = 5 << ArgShift,
-    ArgF64   = 6 << ArgShift,
-    ArgF80   = 7 << ArgShift,
+    MemShift = 10,
+    MemMask  = 7 << MemShift,
+    Mem8     = 1 << MemShift,
+    Mem16    = 2 << MemShift,
+    Mem32    = 3 << MemShift,
+    Mem64    = 4 << MemShift,
+    Mem80    = 5 << MemShift,
+    Mem128   = 6 << MemShift,
+
+    //===------------------------------------------------------------------===//
+    // This tow-bit field describes the size of an immediate operand.  Zero is
+    // unused so that we can tell if we forgot to set a value.
+    ImmShift = 13,
+    ImmMask  = 7 << ImmShift,
+    Imm8     = 1 << ImmShift,
+    Imm16    = 2 << ImmShift,
+    Imm32    = 3 << ImmShift,
 
     //===------------------------------------------------------------------===//
     // FP Instruction Classification...  Zero is non-fp instruction.
 
     // FPTypeMask - Mask for all of the FP types...
-    FPTypeShift = 13,
+    FPTypeShift = 15,
     FPTypeMask  = 7 << FPTypeShift,
 
     // NotFP - The default, set for instructions that do not use FP registers.
@@ -151,9 +159,9 @@
     SpecialFP  = 5 << FPTypeShift,
 
     // PrintImplUses - Print out implicit uses in the assembly output.
-    PrintImplUses = 1 << 16,
+    PrintImplUses = 1 << 18,
 
-    OpcodeShift   = 17,
+    OpcodeShift   = 19,
     OpcodeMask    = 0xFF << OpcodeShift,
     // Bits 25 -> 31 are unused
   };


Index: llvm/lib/Target/X86/X86CodeEmitter.cpp
diff -u llvm/lib/Target/X86/X86CodeEmitter.cpp:1.54 llvm/lib/Target/X86/X86CodeEmitter.cpp:1.55
--- llvm/lib/Target/X86/X86CodeEmitter.cpp:1.54	Fri Feb 27 12:55:12 2004
+++ llvm/lib/Target/X86/X86CodeEmitter.cpp	Sat Feb 28 16:02:05 2004
@@ -453,14 +453,24 @@
   }
 }
 
+static unsigned sizeOfImm(const TargetInstrDescriptor &Desc) {
+  switch (Desc.TSFlags & X86II::ImmMask) {
+  case X86II::Imm8:   return 1;
+  case X86II::Imm16:  return 2;
+  case X86II::Imm32:  return 4;
+  default: assert(0 && "Immediate size not set!");
+    return 0;
+  }
+}
+
 static unsigned sizeOfPtr(const TargetInstrDescriptor &Desc) {
-  switch (Desc.TSFlags & X86II::ArgMask) {
-  case X86II::Arg8:   return 1;
-  case X86II::Arg16:  return 2;
-  case X86II::Arg32:  return 4;
-  case X86II::ArgF32: return 4;
-  case X86II::ArgF64: return 8;
-  case X86II::ArgF80: return 10;
+  switch (Desc.TSFlags & X86II::MemMask) {
+  case X86II::Mem8:   return 1;
+  case X86II::Mem16:  return 2;
+  case X86II::Mem32:  return 4;
+  case X86II::Mem64:  return 8;
+  case X86II::Mem80:  return 10;
+  case X86II::Mem128: return 16;
   default: assert(0 && "Memory size not set!");
     return 0;
   }
@@ -527,25 +537,21 @@
     MCE.emitByte(BaseOpcode + getX86RegNum(MI.getOperand(0).getReg()));
     if (MI.getNumOperands() == 2) {
       MachineOperand &MO1 = MI.getOperand(1);
-      if (MO1.isImmediate() || MO1.getVRegValueOrNull() ||
-	  MO1.isGlobalAddress() || MO1.isExternalSymbol()) {
-	unsigned Size = sizeOfPtr(Desc);
-	if (Value *V = MO1.getVRegValueOrNull()) {
-	  assert(Size == 4 && "Don't know how to emit non-pointer values!");
-          emitGlobalAddressForPtr(cast<GlobalValue>(V));
-	} else if (MO1.isGlobalAddress()) {
-	  assert(Size == 4 && "Don't know how to emit non-pointer values!");
-          assert(!MO1.isPCRelative() && "Function pointer ref is PC relative?");
-          emitGlobalAddressForPtr(MO1.getGlobal());
-	} else if (MO1.isExternalSymbol()) {
-	  assert(Size == 4 && "Don't know how to emit non-pointer values!");
-
-          unsigned Address = MCE.getGlobalValueAddress(MO1.getSymbolName());
-          assert(Address && "Unknown external symbol!");
-          emitMaybePCRelativeValue(Address, MO1.isPCRelative());
-	} else {
-	  emitConstant(MO1.getImmedValue(), Size);
-	}
+      if (Value *V = MO1.getVRegValueOrNull()) {
+	assert(sizeOfImm(Desc) == 4 && "Don't know how to emit non-pointer values!");
+        emitGlobalAddressForPtr(cast<GlobalValue>(V));
+      } else if (MO1.isGlobalAddress()) {
+	assert(sizeOfImm(Desc) == 4 && "Don't know how to emit non-pointer values!");
+        assert(!MO1.isPCRelative() && "Function pointer ref is PC relative?");
+        emitGlobalAddressForPtr(MO1.getGlobal());
+      } else if (MO1.isExternalSymbol()) {
+	assert(sizeOfImm(Desc) == 4 && "Don't know how to emit non-pointer values!");
+
+        unsigned Address = MCE.getGlobalValueAddress(MO1.getSymbolName());
+        assert(Address && "Unknown external symbol!");
+        emitMaybePCRelativeValue(Address, MO1.isPCRelative());
+      } else {
+        emitConstant(MO1.getImmedValue(), sizeOfImm(Desc));
       }
     }
     break;
@@ -555,7 +561,7 @@
     emitRegModRMByte(MI.getOperand(0).getReg(),
                      getX86RegNum(MI.getOperand(1).getReg()));
     if (MI.getNumOperands() == 3)
-      emitConstant(MI.getOperand(2).getImmedValue(), sizeOfPtr(Desc));
+      emitConstant(MI.getOperand(2).getImmedValue(), sizeOfImm(Desc));
     break;
   }
   case X86II::MRMDestMem:
@@ -569,14 +575,14 @@
     emitRegModRMByte(MI.getOperand(1).getReg(),
                      getX86RegNum(MI.getOperand(0).getReg()));
     if (MI.getNumOperands() == 3)
-      emitConstant(MI.getOperand(2).getImmedValue(), sizeOfPtr(Desc));
+      emitConstant(MI.getOperand(2).getImmedValue(), sizeOfImm(Desc));
     break;
 
   case X86II::MRMSrcMem:
     MCE.emitByte(BaseOpcode);
     emitMemModRMByte(MI, 1, getX86RegNum(MI.getOperand(0).getReg()));
     if (MI.getNumOperands() == 2+4)
-      emitConstant(MI.getOperand(5).getImmedValue(), sizeOfPtr(Desc));
+      emitConstant(MI.getOperand(5).getImmedValue(), sizeOfImm(Desc));
     break;
 
   case X86II::MRM0r: case X86II::MRM1r:
@@ -588,8 +594,7 @@
                      (Desc.TSFlags & X86II::FormMask)-X86II::MRM0r);
 
     if (MI.getOperand(MI.getNumOperands()-1).isImmediate()) {
-      unsigned Size = sizeOfPtr(Desc);
-      emitConstant(MI.getOperand(MI.getNumOperands()-1).getImmedValue(), Size);
+      emitConstant(MI.getOperand(MI.getNumOperands()-1).getImmedValue(), sizeOfImm(Desc));
     }
     break;
 
@@ -601,9 +606,8 @@
     emitMemModRMByte(MI, 0, (Desc.TSFlags & X86II::FormMask)-X86II::MRM0m);
 
     if (MI.getNumOperands() == 5) {
-      unsigned Size = sizeOfPtr(Desc);
       if (MI.getOperand(4).isImmediate())
-        emitConstant(MI.getOperand(4).getImmedValue(), Size);
+        emitConstant(MI.getOperand(4).getImmedValue(), sizeOfImm(Desc));
       else if (MI.getOperand(4).isGlobalAddress())
         emitGlobalAddressForPtr(MI.getOperand(4).getGlobal());
       else


Index: llvm/lib/Target/X86/X86.td
diff -u llvm/lib/Target/X86/X86.td:1.7 llvm/lib/Target/X86/X86.td:1.8
--- llvm/lib/Target/X86/X86.td:1.7	Tue Oct 21 10:17:13 2003
+++ llvm/lib/Target/X86/X86.td	Sat Feb 28 16:02:05 2004
@@ -33,10 +33,10 @@
 
   // Define how we want to layout our TargetSpecific information field... This
   // should be kept up-to-date with the fields in the X86InstrInfo.h file.
-  let TSFlagsFields = ["FormBits"  , "hasOpSizePrefix" ,  "Prefix", "TypeBits",
-                       "FPFormBits", "printImplicitUses", "Opcode"];
-  let TSFlagsShifts = [           0,                   5,        6,         10,
-                                 13,                  16,       17];
+  let TSFlagsFields = ["FormBits"  , "hasOpSizePrefix" ,  "Prefix", "MemTypeBits",
+                       "ImmTypeBits", "FPFormBits", "printImplicitUses", "Opcode"];
+  let TSFlagsShifts = [0,         5,                  6,        10,            13,
+                                  15,           18,                  19];
 }
 
 def X86 : Target {


Index: llvm/lib/Target/X86/Printer.cpp
diff -u llvm/lib/Target/X86/Printer.cpp:1.87 llvm/lib/Target/X86/Printer.cpp:1.88
--- llvm/lib/Target/X86/Printer.cpp:1.87	Fri Feb 27 12:55:12 2004
+++ llvm/lib/Target/X86/Printer.cpp	Sat Feb 28 16:02:05 2004
@@ -433,16 +433,14 @@
   }
 }
 
-static const std::string sizePtr(const TargetInstrDescriptor &Desc) {
-  switch (Desc.TSFlags & X86II::ArgMask) {
+static const char* const sizePtr(const TargetInstrDescriptor &Desc) {
+  switch (Desc.TSFlags & X86II::MemMask) {
   default: assert(0 && "Unknown arg size!");
-  case X86II::Arg8:   return "BYTE PTR"; 
-  case X86II::Arg16:  return "WORD PTR"; 
-  case X86II::Arg32:  return "DWORD PTR"; 
-  case X86II::Arg64:  return "QWORD PTR"; 
-  case X86II::ArgF32:  return "DWORD PTR"; 
-  case X86II::ArgF64:  return "QWORD PTR"; 
-  case X86II::ArgF80:  return "XWORD PTR"; 
+  case X86II::Mem8:   return "BYTE PTR"; 
+  case X86II::Mem16:  return "WORD PTR"; 
+  case X86II::Mem32:  return "DWORD PTR"; 
+  case X86II::Mem64:  return "QWORD PTR"; 
+  case X86II::Mem80:  return "XWORD PTR"; 
   }
 }
 





More information about the llvm-commits mailing list