[llvm-branch-commits] [llvm-branch] r69194 - in /llvm/branches/Apple/Dib: include/llvm/Target/ lib/CodeGen/ lib/CodeGen/SelectionDAG/ lib/Target/ lib/Target/X86/ test/CodeGen/X86/ utils/TableGen/

Bill Wendling isanbard at gmail.com
Wed Apr 15 11:19:52 PDT 2009


Author: void
Date: Wed Apr 15 13:19:52 2009
New Revision: 69194

URL: http://llvm.org/viewvc/llvm-project?rev=69194&view=rev
Log:
Merge in Dan's recent changes:

--- Merging (from foreign repository) r68669 into '.':
U    include/llvm/Target/TargetRegisterInfo.h
U    utils/TableGen/RegisterInfoEmitter.cpp
U    lib/Target/TargetRegisterInfo.cpp
--- Merging (from foreign repository) r68730 into '.':
G    include/llvm/Target/TargetRegisterInfo.h
G    utils/TableGen/RegisterInfoEmitter.cpp
G    lib/Target/TargetRegisterInfo.cpp
--- Merging (from foreign repository) r68786 into '.':
U    lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
--- Merging (from foreign repository) r68922 into '.':
G    include/llvm/Target/TargetRegisterInfo.h
--- Merging (from foreign repository) r68949 into '.':
U    include/llvm/Target/TargetInstrInfo.h
--- Merging (from foreign repository) r68950 into '.':
U    lib/Target/X86/X86Instr64bit.td
--- Merging (from foreign repository) r68951 into '.':
U    lib/Target/X86/X86ISelDAGToDAG.cpp
--- Merging (from foreign repository) r68953 into '.':
U    lib/CodeGen/TwoAddressInstructionPass.cpp
--- Merging (from foreign repository) r68954 into '.':
U    lib/Target/X86/X86RegisterInfo.td
--- Merging (from foreign repository) r68955 into '.':
U    lib/CodeGen/Spiller.cpp
--- Merging (from foreign repository) r68956 into '.':
U    lib/CodeGen/LiveIntervalAnalysis.cpp
--- Merging (from foreign repository) r68957 into '.':
U    utils/TableGen/CodeGenTarget.h
--- Merging (from foreign repository) r68958 into '.':
G    lib/Target/X86/X86RegisterInfo.td
--- Merging (from foreign repository) r68959 into '.':
G    lib/Target/X86/X86ISelDAGToDAG.cpp
--- Merging (from foreign repository) r68961 into '.':
U    include/llvm/Target/Target.td
G    include/llvm/Target/TargetRegisterInfo.h
G    include/llvm/Target/TargetInstrInfo.h
U    utils/TableGen/DAGISelEmitter.cpp
U    utils/TableGen/InstrInfoEmitter.cpp
U    utils/TableGen/CodeEmitterGen.cpp
U    utils/TableGen/CodeGenDAGPatterns.cpp
U    utils/TableGen/CodeGenTarget.cpp
G    utils/TableGen/RegisterInfoEmitter.cpp
G    lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
U    lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
--- Merging (from foreign repository) r68962 into '.':
A    test/CodeGen/X86/h-register-addressing-32.ll
A    test/CodeGen/X86/h-register-store.ll
A    test/CodeGen/X86/h-registers.ll
A    test/CodeGen/X86/h-register-addressing-64.ll
U    test/CodeGen/X86/inline-asm-out-regs.ll
G    lib/Target/X86/X86Instr64bit.td
U    lib/Target/X86/X86InstrInfo.td
U    lib/Target/X86/X86FastISel.cpp
G    lib/Target/X86/X86ISelDAGToDAG.cpp
U    lib/Target/X86/X86RegisterInfo.h
U    lib/Target/X86/X86InstrInfo.cpp
G    lib/Target/X86/X86RegisterInfo.td
--- Merging (from foreign repository) r68986 into '.':
G    include/llvm/Target/Target.td
G    include/llvm/Target/TargetInstrInfo.h
G    utils/TableGen/InstrInfoEmitter.cpp
G    utils/TableGen/CodeEmitterGen.cpp
G    utils/TableGen/CodeGenDAGPatterns.cpp
G    utils/TableGen/CodeGenTarget.cpp
G    lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
G    lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
G    lib/Target/X86/X86Instr64bit.td
G    lib/Target/X86/X86InstrInfo.td
Skipped 'test/CodeGen/X86/2009-04-14-IllegalRegs.ll'
--- Merging (from foreign repository) r69049 into '.':
G    lib/Target/X86/X86RegisterInfo.td
--- Merging (from foreign repository) r69087 into '.':
A    test/CodeGen/X86/h-registers-0.ll
A    test/CodeGen/X86/h-registers-1.ll
Skipped 'test/CodeGen/X86/h-registers.ll'
G    lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
--- Merging (from foreign repository) r69094 into '.':
A    test/CodeGen/X86/h-registers-2.ll
G    lib/Target/X86/X86ISelDAGToDAG.cpp
--- Merging (from foreign repository) r69096 into '.':
U    test/CodeGen/X86/h-registers-2.ll
--- Merging (from foreign repository) r69108 into '.':
G    lib/Target/X86/X86RegisterInfo.td
--- Merging (from foreign repository) r69111 into '.':
G    lib/Target/X86/X86InstrInfo.td
G    lib/Target/X86/X86InstrInfo.cpp
--- Merging (from foreign repository) r69115 into '.':
G    lib/Target/X86/X86RegisterInfo.td


Added:
    llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-32.ll
    llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-64.ll
    llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-store.ll
    llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-0.ll
    llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-1.ll
    llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-2.ll
    llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers.ll
Modified:
    llvm/branches/Apple/Dib/include/llvm/Target/Target.td
    llvm/branches/Apple/Dib/include/llvm/Target/TargetInstrInfo.h
    llvm/branches/Apple/Dib/include/llvm/Target/TargetRegisterInfo.h
    llvm/branches/Apple/Dib/lib/CodeGen/LiveIntervalAnalysis.cpp
    llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
    llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
    llvm/branches/Apple/Dib/lib/CodeGen/Spiller.cpp
    llvm/branches/Apple/Dib/lib/CodeGen/TwoAddressInstructionPass.cpp
    llvm/branches/Apple/Dib/lib/Target/TargetRegisterInfo.cpp
    llvm/branches/Apple/Dib/lib/Target/X86/X86FastISel.cpp
    llvm/branches/Apple/Dib/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td
    llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp
    llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td
    llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.h
    llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.td
    llvm/branches/Apple/Dib/test/CodeGen/X86/inline-asm-out-regs.ll
    llvm/branches/Apple/Dib/utils/TableGen/CodeEmitterGen.cpp
    llvm/branches/Apple/Dib/utils/TableGen/CodeGenDAGPatterns.cpp
    llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.cpp
    llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.h
    llvm/branches/Apple/Dib/utils/TableGen/DAGISelEmitter.cpp
    llvm/branches/Apple/Dib/utils/TableGen/InstrInfoEmitter.cpp
    llvm/branches/Apple/Dib/utils/TableGen/RegisterInfoEmitter.cpp

Modified: llvm/branches/Apple/Dib/include/llvm/Target/Target.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/include/llvm/Target/Target.td?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/include/llvm/Target/Target.td (original)
+++ llvm/branches/Apple/Dib/include/llvm/Target/Target.td Wed Apr 15 13:19:52 2009
@@ -400,6 +400,14 @@
   let Namespace = "TargetInstrInfo";
   let neverHasSideEffects = 1;
 }
+def COPY_TO_REGCLASS : Instruction {
+  let OutOperandList = (ops unknown:$dst);
+  let InOperandList = (ops unknown:$src, i32imm:$regclass);
+  let AsmString = "";
+  let Namespace = "TargetInstrInfo";
+  let neverHasSideEffects = 1;
+  let isAsCheapAsAMove = 1;
+}
 
 //===----------------------------------------------------------------------===//
 // AsmWriter - This class can be implemented by targets that need to customize

Modified: llvm/branches/Apple/Dib/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/include/llvm/Target/TargetInstrInfo.h?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/branches/Apple/Dib/include/llvm/Target/TargetInstrInfo.h Wed Apr 15 13:19:52 2009
@@ -50,10 +50,39 @@
     EH_LABEL = 3,
     GC_LABEL = 4,
     DECLARE = 5,
+
+    /// EXTRACT_SUBREG - This instruction takes two operands: a register
+    /// that has subregisters, and a subregister index. It returns the
+    /// extracted subregister value. This is commonly used to implement
+    /// truncation operations on target architectures which support it.
     EXTRACT_SUBREG = 6,
+
+    /// INSERT_SUBREG - This instruction takes three operands: a register
+    /// that has subregisters, a register providing an insert value, and a
+    /// subregister index. It returns the value of the first register with
+    /// the value of the second register inserted. The first register is
+    /// often defined by an IMPLICIT_DEF, as is commonly used to implement
+    /// anyext operations on target architectures which support it.
     INSERT_SUBREG = 7,
+
+    /// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
     IMPLICIT_DEF = 8,
-    SUBREG_TO_REG = 9
+
+    /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except
+    /// that the first operand is an immediate integer constant. This constant
+    /// is often zero, as is commonly used to implement zext operations on
+    /// target architectures which support it, such as with x86-64 (with
+    /// zext from i32 to i64 via implicit zero-extension).
+    SUBREG_TO_REG = 9,
+
+    /// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
+    /// register-to-register copy into a specific register class. This is only
+    /// used between instruction selection and MachineInstr creation, before
+    /// virtual registers have been created for all the instructions, and it's
+    /// only needed in cases where the register classes implied by the
+    /// instructions are insufficient. The actual MachineInstrs to perform
+    /// the copy are emitted with the TargetInstrInfo::copyRegToReg hook.
+    COPY_TO_REGCLASS = 10
   };
 
   unsigned getNumOpcodes() const { return NumOpcodes; }

Modified: llvm/branches/Apple/Dib/include/llvm/Target/TargetRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/include/llvm/Target/TargetRegisterInfo.h?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/include/llvm/Target/TargetRegisterInfo.h (original)
+++ llvm/branches/Apple/Dib/include/llvm/Target/TargetRegisterInfo.h Wed Apr 15 13:19:52 2009
@@ -18,6 +18,7 @@
 
 #include "llvm/CodeGen/MachineBasicBlock.h"
 #include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/ADT/DenseSet.h"
 #include <cassert>
 #include <functional>
 
@@ -60,18 +61,27 @@
   const vt_iterator VTs;
   const sc_iterator SubClasses;
   const sc_iterator SuperClasses;
+  const sc_iterator SubRegClasses;
+  const sc_iterator SuperRegClasses;
   const unsigned RegSize, Alignment;    // Size & Alignment of register in bytes
   const int CopyCost;
   const iterator RegsBegin, RegsEnd;
+  DenseSet<unsigned> RegSet;
 public:
   TargetRegisterClass(unsigned id,
                       const MVT *vts,
                       const TargetRegisterClass * const *subcs,
                       const TargetRegisterClass * const *supcs,
+                      const TargetRegisterClass * const *subregcs,
+                      const TargetRegisterClass * const *superregcs,
                       unsigned RS, unsigned Al, int CC,
                       iterator RB, iterator RE)
     : ID(id), VTs(vts), SubClasses(subcs), SuperClasses(supcs),
-    RegSize(RS), Alignment(Al), CopyCost(CC), RegsBegin(RB), RegsEnd(RE) {}
+    SubRegClasses(subregcs), SuperRegClasses(superregcs),
+    RegSize(RS), Alignment(Al), CopyCost(CC), RegsBegin(RB), RegsEnd(RE) {
+      for (iterator I = RegsBegin, E = RegsEnd; I != E; ++I)
+        RegSet.insert(*I);
+    }
   virtual ~TargetRegisterClass() {}     // Allow subclasses
   
   /// getID() - Return the register class ID number.
@@ -97,9 +107,7 @@
   /// contains - Return true if the specified register is included in this
   /// register class.
   bool contains(unsigned Reg) const {
-    for (iterator I = begin(), E = end(); I != E; ++I)
-      if (*I == Reg) return true;
-    return false;
+    return RegSet.count(Reg);
   }
 
   /// hasType - return true if this TargetRegisterClass has the ValueType vt.
@@ -123,8 +131,32 @@
     return I;
   }
 
-  /// hasSubClass - return true if the specified TargetRegisterClass is a
-  /// sub-register class of this TargetRegisterClass.
+  /// subregclasses_begin / subregclasses_end - Loop over all of
+  /// the subreg register classes of this register class.
+  sc_iterator subregclasses_begin() const {
+    return SubRegClasses;
+  }
+
+  sc_iterator subregclasses_end() const {
+    sc_iterator I = SubRegClasses;
+    while (*I != NULL) ++I;
+    return I;
+  }
+
+  /// superregclasses_begin / superregclasses_end - Loop over all of
+  /// the superreg register classes of this register class.
+  sc_iterator superregclasses_begin() const {
+    return SuperRegClasses;
+  }
+
+  sc_iterator superregclasses_end() const {
+    sc_iterator I = SuperRegClasses;
+    while (*I != NULL) ++I;
+    return I;
+  }
+
+  /// hasSubClass - return true if the the specified TargetRegisterClass
+  /// is a proper subset of this TargetRegisterClass.
   bool hasSubClass(const TargetRegisterClass *cs) const {
     for (int i = 0; SubClasses[i] != NULL; ++i) 
       if (SubClasses[i] == cs)
@@ -132,8 +164,8 @@
     return false;
   }
 
-  /// subclasses_begin / subclasses_end - Loop over all of the sub-classes of
-  /// this register class.
+  /// subclasses_begin / subclasses_end - Loop over all of the classes
+  /// that are proper subsets of this register class.
   sc_iterator subclasses_begin() const {
     return SubClasses;
   }
@@ -145,7 +177,7 @@
   }
   
   /// hasSuperClass - return true if the specified TargetRegisterClass is a
-  /// super-register class of this TargetRegisterClass.
+  /// proper superset of this TargetRegisterClass.
   bool hasSuperClass(const TargetRegisterClass *cs) const {
     for (int i = 0; SuperClasses[i] != NULL; ++i) 
       if (SuperClasses[i] == cs)
@@ -153,8 +185,8 @@
     return false;
   }
 
-  /// superclasses_begin / superclasses_end - Loop over all of the super-classes
-  /// of this register class.
+  /// superclasses_begin / superclasses_end - Loop over all of the classes
+  /// that are proper supersets of this register class.
   sc_iterator superclasses_begin() const {
     return SuperClasses;
   }
@@ -165,8 +197,8 @@
     return I;
   }
 
-  /// isASubClass - return true if this TargetRegisterClass is a sub-class of at
-  /// least one other TargetRegisterClass.
+  /// isASubClass - return true if this TargetRegisterClass is a subset
+  /// class of at least one other TargetRegisterClass.
   bool isASubClass() const {
     return SuperClasses[0] != 0;
   }
@@ -218,6 +250,10 @@
 protected:
   const unsigned* SubregHash;
   const unsigned SubregHashSize;
+  const unsigned* SuperregHash;
+  const unsigned SuperregHashSize;
+  const unsigned* AliasesHash;
+  const unsigned AliasesHashSize;
 public:
   typedef const TargetRegisterClass * const * regclass_iterator;
 private:
@@ -234,7 +270,11 @@
                      int CallFrameSetupOpcode = -1,
                      int CallFrameDestroyOpcode = -1,
                      const unsigned* subregs = 0,
-                     const unsigned subregsize = 0);
+                     const unsigned subregsize = 0,
+		     const unsigned* superregs = 0,
+		     const unsigned superregsize = 0,
+		     const unsigned* aliases = 0,
+		     const unsigned aliasessize = 0);
   virtual ~TargetRegisterInfo();
 public:
 
@@ -336,8 +376,17 @@
   /// areAliases - Returns true if the two registers alias each other, false
   /// otherwise
   bool areAliases(unsigned regA, unsigned regB) const {
-    for (const unsigned *Alias = getAliasSet(regA); *Alias; ++Alias)
-      if (*Alias == regB) return true;
+    size_t index = (regA + regB * 37) & (AliasesHashSize-1);
+    unsigned ProbeAmt = 0;
+    while (AliasesHash[index*2] != 0 &&
+	   AliasesHash[index*2+1] != 0) {
+      if (AliasesHash[index*2] == regA && AliasesHash[index*2+1] == regB)
+	return true;
+
+      index = (index + ProbeAmt) & (AliasesHashSize-1);
+      ProbeAmt += 2;
+    }
+
     return false;
   }
 
@@ -373,8 +422,18 @@
   /// isSuperRegister - Returns true if regB is a super-register of regA.
   ///
   bool isSuperRegister(unsigned regA, unsigned regB) const {
-    for (const unsigned *SR = getSuperRegisters(regA); *SR; ++SR)
-      if (*SR == regB) return true;
+    // SuperregHash is a simple quadratically probed hash table.
+    size_t index = (regA + regB * 37) & (SuperregHashSize-1);
+    unsigned ProbeAmt = 2;
+    while (SuperregHash[index*2] != 0 &&
+           SuperregHash[index*2+1] != 0) {
+      if (SuperregHash[index*2] == regA && SuperregHash[index*2+1] == regB)
+        return true;
+      
+      index = (index + ProbeAmt) & (SuperregHashSize-1);
+      ProbeAmt += 2;
+    }
+    
     return false;
   }
 

Modified: llvm/branches/Apple/Dib/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/branches/Apple/Dib/lib/CodeGen/LiveIntervalAnalysis.cpp Wed Apr 15 13:19:52 2009
@@ -2228,7 +2228,7 @@
     // If there are registers which alias PhysReg, but which are not a
     // sub-register of the chosen representative super register. Assert
     // since we can't handle it yet.
-    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
+    assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
            tri_->isSuperRegister(*AS, SpillReg));
 
   bool Cut = false;

Modified: llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h (original)
+++ llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h Wed Apr 15 13:19:52 2009
@@ -134,6 +134,12 @@
     void EmitSubregNode(SDNode *Node, 
                         DenseMap<SDValue, unsigned> &VRBaseMap);
 
+    /// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS
+    /// nodes.
+    ///
+    void EmitCopyToRegClassNode(SDNode *Node,
+                                DenseMap<SDValue, unsigned> &VRBaseMap);
+
     /// getVR - Return the virtual register corresponding to the specified result
     /// of the specified node.
     unsigned getVR(SDValue Op, DenseMap<SDValue, unsigned> &VRBaseMap);
@@ -146,6 +152,13 @@
                     const TargetInstrDesc *II,
                     DenseMap<SDValue, unsigned> &VRBaseMap);
 
+    /// AddRegisterOperand - Add the specified register as an operand to the
+    /// specified machine instr. Insert register copies if the register is
+    /// not in the required register class.
+    void AddRegisterOperand(MachineInstr *MI, SDValue Op,
+                            unsigned IIOpNum, const TargetInstrDesc *II,
+                            DenseMap<SDValue, unsigned> &VRBaseMap);
+
     /// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
     /// implicit physical register output.
     void EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone,

Modified: llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp (original)
+++ llvm/branches/Apple/Dib/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp Wed Apr 15 13:19:52 2009
@@ -93,9 +93,13 @@
               getInstrOperandRegClass(TRI, II, i+II.getNumDefs());
             if (!UseRC)
               UseRC = RC;
-            else if (RC)
-              assert(UseRC == RC &&
-                     "Multiple uses expecting different register classes!");
+            else if (RC) {
+              if (UseRC->hasSuperClass(RC))
+                UseRC = RC;
+              else
+                assert((UseRC == RC || RC->hasSuperClass(UseRC)) &&
+                       "Multiple uses expecting different register classes!");
+            }
           }
         }
       }
@@ -127,10 +131,14 @@
     VRBase = MRI.createVirtualRegister(DstRC);
     bool Emitted = TII->copyRegToReg(*BB, InsertPos, VRBase, SrcReg,
                                      DstRC, SrcRC);
-    if (!Emitted) {
-      cerr << "Unable to issue a copy instruction!\n";
-      abort();
-    }
+    // If the target didn't handle the copy with different register
+    // classes and the destination is a subset of the source,
+    // try a normal same-RC copy.
+    if (!Emitted && DstRC->hasSuperClass(SrcRC))
+      Emitted = TII->copyRegToReg(*BB, InsertPos, VRBase, SrcReg,
+                                  SrcRC, SrcRC);
+
+    assert(Emitted && "Unable to issue a copy instruction!\n");
   }
 
   SDValue Op(Node, ResNo);
@@ -168,9 +176,10 @@
 
   for (unsigned i = 0; i < II.getNumDefs(); ++i) {
     // If the specific node value is only used by a CopyToReg and the dest reg
-    // is a vreg, use the CopyToReg'd destination register instead of creating
-    // a new vreg.
+    // is a vreg in the same register class, use the CopyToReg'd destination
+    // register instead of creating a new vreg.
     unsigned VRBase = 0;
+    const TargetRegisterClass *RC = getInstrOperandRegClass(TRI, II, i);
 
     if (!IsClone && !IsCloned)
       for (SDNode::use_iterator UI = Node->use_begin(), E = Node->use_end();
@@ -181,9 +190,12 @@
             User->getOperand(2).getResNo() == i) {
           unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
           if (TargetRegisterInfo::isVirtualRegister(Reg)) {
-            VRBase = Reg;
-            MI->addOperand(MachineOperand::CreateReg(Reg, true));
-            break;
+            const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
+            if (RegRC == RC) {
+              VRBase = Reg;
+              MI->addOperand(MachineOperand::CreateReg(Reg, true));
+              break;
+            }
           }
         }
       }
@@ -191,7 +203,6 @@
     // Create the result registers for this node and add the result regs to
     // the machine instruction.
     if (VRBase == 0) {
-      const TargetRegisterClass *RC = getInstrOperandRegClass(TRI, II, i);
       assert(RC && "Isn't a register operand!");
       VRBase = MRI.createVirtualRegister(RC);
       MI->addOperand(MachineOperand::CreateReg(VRBase, true));
@@ -230,6 +241,52 @@
 }
 
 
+/// AddRegisterOperand - Add the specified register as an operand to the
+/// specified machine instr. Insert register copies if the register is
+/// not in the required register class.
+void
+ScheduleDAGSDNodes::AddRegisterOperand(MachineInstr *MI, SDValue Op,
+                                       unsigned IIOpNum,
+                                       const TargetInstrDesc *II,
+                                       DenseMap<SDValue, unsigned> &VRBaseMap) {
+  assert(Op.getValueType() != MVT::Other &&
+         Op.getValueType() != MVT::Flag &&
+         "Chain and flag operands should occur at end of operand list!");
+  // Get/emit the operand.
+  unsigned VReg = getVR(Op, VRBaseMap);
+  assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
+
+  const TargetInstrDesc &TID = MI->getDesc();
+  bool isOptDef = IIOpNum < TID.getNumOperands() &&
+    TID.OpInfo[IIOpNum].isOptionalDef();
+
+  // If the instruction requires a register in a different class, create
+  // a new virtual register and copy the value into it.
+  if (II) {
+    const TargetRegisterClass *SrcRC =
+      MRI.getRegClass(VReg);
+    const TargetRegisterClass *DstRC =
+      getInstrOperandRegClass(TRI, *II, IIOpNum);
+    assert((DstRC || (TID.isVariadic() && IIOpNum >= TID.getNumOperands())) &&
+           "Don't have operand info for this instruction!");
+    if (DstRC && SrcRC != DstRC && !SrcRC->hasSuperClass(DstRC)) {
+      unsigned NewVReg = MRI.createVirtualRegister(DstRC);
+      bool Emitted = TII->copyRegToReg(*BB, InsertPos, NewVReg, VReg,
+                                       DstRC, SrcRC);
+      // If the target didn't handle the copy with different register
+      // classes and the destination is a subset of the source,
+      // try a normal same-RC copy.
+      if (!Emitted && DstRC->hasSuperClass(SrcRC))
+        Emitted = TII->copyRegToReg(*BB, InsertPos, NewVReg, VReg,
+                                    SrcRC, SrcRC);
+      assert(Emitted && "Unable to issue a copy instruction!\n");
+      VReg = NewVReg;
+    }
+  }
+
+  MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef));
+}
+
 /// AddOperand - Add the specified operand to the specified machine instr.  II
 /// specifies the instruction information for the node, and IIOpNum is the
 /// operand number (in the II) that we are adding. IIOpNum and II are used for 
@@ -239,44 +296,7 @@
                                     const TargetInstrDesc *II,
                                     DenseMap<SDValue, unsigned> &VRBaseMap) {
   if (Op.isMachineOpcode()) {
-    // Note that this case is redundant with the final else block, but we
-    // include it because it is the most common and it makes the logic
-    // simpler here.
-    assert(Op.getValueType() != MVT::Other &&
-           Op.getValueType() != MVT::Flag &&
-           "Chain and flag operands should occur at end of operand list!");
-    // Get/emit the operand.
-    unsigned VReg = getVR(Op, VRBaseMap);
-    const TargetInstrDesc &TID = MI->getDesc();
-    bool isOptDef = IIOpNum < TID.getNumOperands() &&
-      TID.OpInfo[IIOpNum].isOptionalDef();
-    MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef));
-    
-    // Verify that it is right.
-    assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
-#ifndef NDEBUG
-    if (II) {
-      // There may be no register class for this operand if it is a variadic
-      // argument (RC will be NULL in this case).  In this case, we just assume
-      // the regclass is ok.
-      const TargetRegisterClass *RC= getInstrOperandRegClass(TRI, *II, IIOpNum);
-      assert((RC || II->isVariadic()) && "Expected reg class info!");
-      const TargetRegisterClass *VRC = MRI.getRegClass(VReg);
-      if (RC && VRC != RC) {
-        cerr << "Register class of operand and regclass of use don't agree!\n";
-        cerr << "Operand = " << IIOpNum << "\n";
-        cerr << "Op->Val = "; Op.getNode()->dump(DAG); cerr << "\n";
-        cerr << "MI = "; MI->print(cerr);
-        cerr << "VReg = " << VReg << "\n";
-        cerr << "VReg RegClass     size = " << VRC->getSize()
-             << ", align = " << VRC->getAlignment() << "\n";
-        cerr << "Expected RegClass size = " << RC->getSize()
-             << ", align = " << RC->getAlignment() << "\n";
-        cerr << "Fatal error, aborting.\n";
-        abort();
-      }
-    }
-#endif
+    AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap);
   } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
     MI->addOperand(MachineOperand::CreateImm(C->getZExtValue()));
   } else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
@@ -286,8 +306,8 @@
     MI->addOperand(MachineOperand::CreateReg(R->getReg(), false));
   } else if (GlobalAddressSDNode *TGA = dyn_cast<GlobalAddressSDNode>(Op)) {
     MI->addOperand(MachineOperand::CreateGA(TGA->getGlobal(),TGA->getOffset()));
-  } else if (BasicBlockSDNode *BB = dyn_cast<BasicBlockSDNode>(Op)) {
-    MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
+  } else if (BasicBlockSDNode *BBNode = dyn_cast<BasicBlockSDNode>(Op)) {
+    MI->addOperand(MachineOperand::CreateMBB(BBNode->getBasicBlock()));
   } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
     MI->addOperand(MachineOperand::CreateFI(FI->getIndex()));
   } else if (JumpTableSDNode *JT = dyn_cast<JumpTableSDNode>(Op)) {
@@ -317,19 +337,35 @@
     assert(Op.getValueType() != MVT::Other &&
            Op.getValueType() != MVT::Flag &&
            "Chain and flag operands should occur at end of operand list!");
-    unsigned VReg = getVR(Op, VRBaseMap);
-    MI->addOperand(MachineOperand::CreateReg(VReg, false));
-    
-    // Verify that it is right.  Note that the reg class of the physreg and the
-    // vreg don't necessarily need to match, but the target copy insertion has
-    // to be able to handle it.  This handles things like copies from ST(0) to
-    // an FP vreg on x86.
-    assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
-    if (II && !II->isVariadic()) {
-      assert(getInstrOperandRegClass(TRI, *II, IIOpNum) &&
-             "Don't have operand info for this instruction!");
-    }
-  }  
+    AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap);
+  }
+}
+
+/// getSubRegisterRegClass - Returns the register class of specified register
+/// class' "SubIdx"'th sub-register class.
+static const TargetRegisterClass*
+getSubRegisterRegClass(const TargetRegisterClass *TRC, unsigned SubIdx) {
+  // Pick the register class of the subregister
+  TargetRegisterInfo::regclass_iterator I =
+    TRC->subregclasses_begin() + SubIdx-1;
+  assert(I < TRC->subregclasses_end() &&
+         "Invalid subregister index for register class");
+  return *I;
+}
+
+/// getSuperRegisterRegClass - Returns the register class of a superreg A whose
+/// "SubIdx"'th sub-register class is the specified register class and whose
+/// type matches the specified type.
+static const TargetRegisterClass*
+getSuperRegisterRegClass(const TargetRegisterClass *TRC,
+                         unsigned SubIdx, MVT VT) {
+  // Pick the register class of the superegister for this type
+  for (TargetRegisterInfo::regclass_iterator I = TRC->superregclasses_begin(),
+         E = TRC->superregclasses_end(); I != E; ++I)
+    if ((*I)->hasType(VT) && getSubRegisterRegClass(*I, SubIdx) == TRC)
+      return *I;
+  assert(false && "Couldn't find the register class");
+  return 0;
 }
 
 /// EmitSubregNode - Generate machine code for subreg nodes.
@@ -362,21 +398,20 @@
                                TII->get(TargetInstrInfo::EXTRACT_SUBREG));
 
     // Figure out the register class to create for the destreg.
-    const TargetRegisterClass *SRC = TLI->getRegClassFor(Node->getValueType(0));
+    unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
+    const TargetRegisterClass *TRC = MRI.getRegClass(VReg);
+    const TargetRegisterClass *SRC = getSubRegisterRegClass(TRC, SubIdx);
 
-    if (VRBase) {
-      // Grab the destination register
-#ifndef NDEBUG
-      const TargetRegisterClass *DRC = MRI.getRegClass(VRBase);
-      assert(SRC && DRC && SRC == DRC && 
-             "Source subregister and destination must have the same class");
-#endif
-    } else {
+    // Figure out the register class to create for the destreg.
+    // Note that if we're going to directly use an existing register,
+    // it must be precisely the required class, and not a subclass
+    // thereof.
+    if (VRBase == 0 || SRC != MRI.getRegClass(VRBase)) {
       // Create the reg
       assert(SRC && "Couldn't find source register class");
       VRBase = MRI.createVirtualRegister(SRC);
     }
-    
+
     // Add def, source, and subreg index
     MI->addOperand(MachineOperand::CreateReg(VRBase, true));
     AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap);
@@ -387,19 +422,23 @@
     SDValue N0 = Node->getOperand(0);
     SDValue N1 = Node->getOperand(1);
     SDValue N2 = Node->getOperand(2);
+    unsigned SubReg = getVR(N1, VRBaseMap);
     unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
-    
-      
+    const TargetRegisterClass *TRC = MRI.getRegClass(SubReg);
+    const TargetRegisterClass *SRC =
+      getSuperRegisterRegClass(TRC, SubIdx,
+                               Node->getValueType(0));
+
     // Figure out the register class to create for the destreg.
-    const TargetRegisterClass *TRC = 0;
-    if (VRBase) {
-      TRC = MRI.getRegClass(VRBase);
-    } else {
-      TRC = TLI->getRegClassFor(Node->getValueType(0));
-      assert(TRC && "Couldn't determine register class for insert_subreg");
-      VRBase = MRI.createVirtualRegister(TRC); // Create the reg
+    // Note that if we're going to directly use an existing register,
+    // it must be precisely the required class, and not a subclass
+    // thereof.
+    if (VRBase == 0 || SRC != MRI.getRegClass(VRBase)) {
+      // Create the reg
+      assert(SRC && "Couldn't find source register class");
+      VRBase = MRI.createVirtualRegister(SRC);
     }
-    
+
     // Create the insert_subreg or subreg_to_reg machine instruction.
     MachineInstr *MI = BuildMI(MF, Node->getDebugLoc(), TII->get(Opc));
     MI->addOperand(MachineOperand::CreateReg(VRBase, true));
@@ -424,6 +463,38 @@
   assert(isNew && "Node emitted out of order - early");
 }
 
+/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
+/// COPY_TO_REGCLASS is just a normal copy, except that the destination
+/// register is constrained to be in a particular register class.
+///
+void
+ScheduleDAGSDNodes::EmitCopyToRegClassNode(SDNode *Node,
+                                       DenseMap<SDValue, unsigned> &VRBaseMap) {
+  unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
+  const TargetRegisterClass *SrcRC = MRI.getRegClass(VReg);
+
+  unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+  const TargetRegisterClass *DstRC = TRI->getRegClass(DstRCIdx);
+
+  // Create the new VReg in the destination class and emit a copy.
+  unsigned NewVReg = MRI.createVirtualRegister(DstRC);
+  bool Emitted = TII->copyRegToReg(*BB, InsertPos, NewVReg, VReg,
+                                   DstRC, SrcRC);
+  // If the target didn't handle the copy with different register
+  // classes and the destination is a subset of the source,
+  // try a normal same-RC copy.
+  if (!Emitted && SrcRC->hasSubClass(DstRC))
+    Emitted = TII->copyRegToReg(*BB, InsertPos, NewVReg, VReg,
+                                SrcRC, SrcRC);
+  assert(Emitted &&
+         "Unable to issue a copy instruction for a COPY_TO_REGCLASS node!\n");
+
+  SDValue Op(Node, 0);
+  bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
+  isNew = isNew; // Silence compiler warning.
+  assert(isNew && "Node emitted out of order - early");
+}
+
 /// EmitNode - Generate machine code for an node and needed dependencies.
 ///
 void ScheduleDAGSDNodes::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
@@ -440,6 +511,12 @@
       return;
     }
 
+    // Handle COPY_TO_REGCLASS specially.
+    if (Opc == TargetInstrInfo::COPY_TO_REGCLASS) {
+      EmitCopyToRegClassNode(Node, VRBaseMap);
+      return;
+    }
+
     if (Opc == TargetInstrInfo::IMPLICIT_DEF)
       // We want a unique VR for each IMPLICIT_DEF use.
       return;
@@ -530,12 +607,17 @@
     else
       DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
                                             Node->getOperand(1).getValueType());
+
     bool Emitted = TII->copyRegToReg(*BB, InsertPos, DestReg, SrcReg,
                                      DstTRC, SrcTRC);
-    if (!Emitted) {
-      cerr << "Unable to issue a copy instruction!\n";
-      abort();
-    }
+    // If the target didn't handle the copy with different register
+    // classes and the destination is a subset of the source,
+    // try a normal same-RC copy.
+    if (!Emitted && DstTRC->hasSubClass(SrcTRC))
+      Emitted = TII->copyRegToReg(*BB, InsertPos, DestReg, SrcReg,
+                                  DstTRC, DstTRC);
+
+    assert(Emitted && "Unable to issue a copy instruction!\n");
     break;
   }
   case ISD::CopyFromReg: {

Modified: llvm/branches/Apple/Dib/lib/CodeGen/Spiller.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/CodeGen/Spiller.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/CodeGen/Spiller.cpp (original)
+++ llvm/branches/Apple/Dib/lib/CodeGen/Spiller.cpp Wed Apr 15 13:19:52 2009
@@ -104,6 +104,7 @@
             }
             MF.getRegInfo().setPhysRegUsed(RReg);
             MI.getOperand(i).setReg(RReg);
+            MI.getOperand(i).setSubReg(0);
           } else {
             MF.getRegInfo().setPhysRegUsed(MO.getReg());
           }
@@ -280,6 +281,7 @@
     assert(Phys);
     unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
     MO.setReg(RReg);
+    MO.setSubReg(0);
   }
   ++NumReMats;
 }
@@ -496,7 +498,8 @@
         unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg();
         unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg;
         MI->getOperand(NewOp.Operand).setReg(RReg);
-        
+        MI->getOperand(NewOp.Operand).setSubReg(0);
+
         Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
         --MII;
         UpdateKills(*MII, RegKills, KillOps, TRI);
@@ -1122,6 +1125,7 @@
           ReusedOperands.markClobbered(Phys);
         unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
         MI.getOperand(i).setReg(RReg);
+        MI.getOperand(i).setSubReg(0);
         if (VRM.isImplicitlyDefined(VirtReg))
           BuildMI(MBB, &MI, MI.getDebugLoc(),
                   TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
@@ -1185,6 +1189,7 @@
                << TRI->getName(VRM.getPhys(VirtReg)) << "\n";
           unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
           MI.getOperand(i).setReg(RReg);
+          MI.getOperand(i).setSubReg(0);
 
           // The only technical detail we have is that we don't know that
           // PhysReg won't be clobbered by a reloaded stack slot that occurs
@@ -1264,6 +1269,7 @@
                << " instead of reloading into same physreg.\n";
           unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
           MI.getOperand(i).setReg(RReg);
+          MI.getOperand(i).setSubReg(0);
           ReusedOperands.markClobbered(RReg);
           ++NumReused;
           continue;
@@ -1284,6 +1290,7 @@
         unsigned RReg =
           SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
         MI.getOperand(i).setReg(RReg);
+        MI.getOperand(i).setSubReg(0);
         DOUT << '\t' << *prior(MII);
         ++NumReused;
         continue;
@@ -1328,6 +1335,7 @@
       }
       unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
       MI.getOperand(i).setReg(RReg);
+      MI.getOperand(i).setSubReg(0);
       UpdateKills(*prior(MII), RegKills, KillOps, TRI);
       DOUT << '\t' << *prior(MII);
     }
@@ -1613,6 +1621,7 @@
       unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
       ReusedOperands.markClobbered(RReg);
       MI.getOperand(i).setReg(RReg);
+      MI.getOperand(i).setSubReg(0);
 
       if (!MO.isDead()) {
         MachineInstr *&LastStore = MaybeDeadStores[StackSlot];

Modified: llvm/branches/Apple/Dib/lib/CodeGen/TwoAddressInstructionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/CodeGen/TwoAddressInstructionPass.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/CodeGen/TwoAddressInstructionPass.cpp (original)
+++ llvm/branches/Apple/Dib/lib/CodeGen/TwoAddressInstructionPass.cpp Wed Apr 15 13:19:52 2009
@@ -857,7 +857,8 @@
             ReMatRegs.set(regB);
             ++NumReMats;
           } else {
-            TII->copyRegToReg(*mbbi, mi, regA, regB, rc, rc);
+            bool Emitted = TII->copyRegToReg(*mbbi, mi, regA, regB, rc, rc);
+            assert(Emitted && "Unable to issue a copy instruction!\n");
           }
 
           MachineBasicBlock::iterator prevMI = prior(mi);

Modified: llvm/branches/Apple/Dib/lib/Target/TargetRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/TargetRegisterInfo.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/TargetRegisterInfo.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Target/TargetRegisterInfo.cpp Wed Apr 15 13:19:52 2009
@@ -23,9 +23,13 @@
 TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
                              regclass_iterator RCB, regclass_iterator RCE,
                              int CFSO, int CFDO,
-                             const unsigned* subregs, const unsigned subregsize)
-  : SubregHash(subregs), SubregHashSize(subregsize), Desc(D), NumRegs(NR),
-    RegClassBegin(RCB), RegClassEnd(RCE) {
+			     const unsigned* subregs, const unsigned subregsize,
+                         const unsigned* superregs, const unsigned superregsize,
+			 const unsigned* aliases, const unsigned aliasessize)
+  : SubregHash(subregs), SubregHashSize(subregsize),
+    SuperregHash(superregs), SuperregHashSize(superregsize),
+    AliasesHash(aliases), AliasesHashSize(aliasessize),
+    Desc(D), NumRegs(NR), RegClassBegin(RCB), RegClassEnd(RCE) {
   assert(NumRegs < FirstVirtualRegister &&
          "Target has too many physical registers!");
 

Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86FastISel.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86FastISel.cpp Wed Apr 15 13:19:52 2009
@@ -997,7 +997,7 @@
     return false;
 
   // First issue a copy to GR16_ or GR32_.
-  unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16to16_ : X86::MOV32to32_;
+  unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
   const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
     ? X86::GR16_RegisterClass : X86::GR32_RegisterClass;
   unsigned CopyReg = createResultReg(CopyRC);

Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86ISelDAGToDAG.cpp Wed Apr 15 13:19:52 2009
@@ -229,12 +229,6 @@
     ///
     SDNode *getGlobalBaseReg();
 
-    /// getTruncateTo8Bit - return an SDNode that implements a subreg based
-    /// truncate of the specified operand to i8. This can be done with tablegen,
-    /// except that this code uses MVT::Flag in a tricky way that happens to
-    /// improve scheduling in some cases.
-    SDNode *getTruncateTo8Bit(SDValue N0);
-
 #ifndef NDEBUG
     unsigned Indent;
 #endif
@@ -938,21 +932,82 @@
     break;
       
   case ISD::AND: {
-    // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
-    // allows us to fold the shift into this addressing mode.
+    // Perform some heroic transforms on an and of a constant-count shift
+    // with a constant to enable use of the scaled offset field.
+
     SDValue Shift = N.getOperand(0);
-    if (Shift.getOpcode() != ISD::SHL) break;
+    if (Shift.getNumOperands() != 2) break;
 
     // Scale must not be used already.
     if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
 
     // Not when RIP is used as the base.
     if (AM.isRIPRel) break;
-      
+
+    SDValue X = Shift.getOperand(0);
     ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
     ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
     if (!C1 || !C2) break;
 
+    // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This
+    // allows us to convert the shift and and into an h-register extract and
+    // a scaled index.
+    if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) {
+      unsigned ScaleLog = 8 - C1->getZExtValue();
+      if (ScaleLog > 0 && ScaleLog < 64 &&
+          C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
+        SDValue Eight = CurDAG->getConstant(8, MVT::i8);
+        SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
+        SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
+                                      X, Eight);
+        SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
+                                      Srl, Mask);
+        SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
+        SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
+                                      And, ShlCount);
+
+        // Insert the new nodes into the topological ordering.
+        if (Eight.getNode()->getNodeId() == -1 ||
+            Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) {
+          CurDAG->RepositionNode(X.getNode(), Eight.getNode());
+          Eight.getNode()->setNodeId(X.getNode()->getNodeId());
+        }
+        if (Mask.getNode()->getNodeId() == -1 ||
+            Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
+          CurDAG->RepositionNode(X.getNode(), Mask.getNode());
+          Mask.getNode()->setNodeId(X.getNode()->getNodeId());
+        }
+        if (Srl.getNode()->getNodeId() == -1 ||
+            Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
+          CurDAG->RepositionNode(Shift.getNode(), Srl.getNode());
+          Srl.getNode()->setNodeId(Shift.getNode()->getNodeId());
+        }
+        if (And.getNode()->getNodeId() == -1 ||
+            And.getNode()->getNodeId() > N.getNode()->getNodeId()) {
+          CurDAG->RepositionNode(N.getNode(), And.getNode());
+          And.getNode()->setNodeId(N.getNode()->getNodeId());
+        }
+        if (ShlCount.getNode()->getNodeId() == -1 ||
+            ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) {
+          CurDAG->RepositionNode(X.getNode(), ShlCount.getNode());
+          ShlCount.getNode()->setNodeId(N.getNode()->getNodeId());
+        }
+        if (Shl.getNode()->getNodeId() == -1 ||
+            Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) {
+          CurDAG->RepositionNode(N.getNode(), Shl.getNode());
+          Shl.getNode()->setNodeId(N.getNode()->getNodeId());
+        }
+        CurDAG->ReplaceAllUsesWith(N, Shl);
+        AM.IndexReg = And;
+        AM.Scale = (1 << ScaleLog);
+        return false;
+      }
+    }
+
+    // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
+    // allows us to fold the shift into this addressing mode.
+    if (Shift.getOpcode() != ISD::SHL) break;
+
     // Not likely to be profitable if either the AND or SHIFT node has more
     // than one use (unless all uses are for address computation). Besides,
     // isel mechanism requires their node ids to be reused.
@@ -965,7 +1020,6 @@
       break;
     
     // Get the new AND mask, this folds to a constant.
-    SDValue X = Shift.getOperand(0);
     SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
                                          SDValue(C2, 0), SDValue(C1, 0));
     SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X, 
@@ -1173,36 +1227,6 @@
   return FindCallStartFromCall(Node->getOperand(0).getNode());
 }
 
-/// getTruncateTo8Bit - return an SDNode that implements a subreg based
-/// truncate of the specified operand to i8. This can be done with tablegen,
-/// except that this code uses MVT::Flag in a tricky way that happens to
-/// improve scheduling in some cases.
-SDNode *X86DAGToDAGISel::getTruncateTo8Bit(SDValue N0) {
-  assert(!Subtarget->is64Bit() &&
-         "getTruncateTo8Bit is only needed on x86-32!");
-  SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
-  DebugLoc dl = N0.getDebugLoc();
-
-  // Ensure that the source register has an 8-bit subreg on 32-bit targets
-  unsigned Opc;
-  MVT N0VT = N0.getValueType();
-  switch (N0VT.getSimpleVT()) {
-  default: assert(0 && "Unknown truncate!");
-  case MVT::i16:
-    Opc = X86::MOV16to16_;
-    break;
-  case MVT::i32:
-    Opc = X86::MOV32to32_;
-    break;
-  }
-
-  // The use of MVT::Flag here is not strictly accurate, but it helps
-  // scheduling in some cases.
-  N0 = SDValue(CurDAG->getTargetNode(Opc, dl, N0VT, MVT::Flag, N0), 0);
-  return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
-                               MVT::i8, N0, SRIdx, N0.getValue(1));
-}
-
 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
   SDValue Chain = Node->getOperand(0);
   SDValue In1 = Node->getOperand(1);
@@ -1342,7 +1366,7 @@
                                                  Result,
                                      CurDAG->getTargetConstant(8, MVT::i8)), 0);
           // Then truncate it down to i8.
-          SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
+          SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
           Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
                                                    MVT::i8, Result, SRIdx), 0);
         } else {
@@ -1492,7 +1516,7 @@
                                         CurDAG->getTargetConstant(8, MVT::i8)), 
                            0);
           // Then truncate it down to i8.
-          SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
+          SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
           Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
                                                    MVT::i8, Result, SRIdx), 0);
         } else {
@@ -1515,55 +1539,6 @@
       return NULL;
     }
 
-    case ISD::SIGN_EXTEND_INREG: {
-      MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
-      if (SVT == MVT::i8 && !Subtarget->is64Bit()) {
-        SDValue N0 = Node->getOperand(0);
-      
-        SDValue TruncOp = SDValue(getTruncateTo8Bit(N0), 0);
-        unsigned Opc = 0;
-        switch (NVT.getSimpleVT()) {
-        default: assert(0 && "Unknown sign_extend_inreg!");
-        case MVT::i16:
-          Opc = X86::MOVSX16rr8;
-          break;
-        case MVT::i32:
-          Opc = X86::MOVSX32rr8; 
-          break;
-        }
-      
-        SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, NVT, TruncOp);
-      
-#ifndef NDEBUG
-        DOUT << std::string(Indent-2, ' ') << "=> ";
-        DEBUG(TruncOp.getNode()->dump(CurDAG));
-        DOUT << "\n";
-        DOUT << std::string(Indent-2, ' ') << "=> ";
-        DEBUG(ResNode->dump(CurDAG));
-        DOUT << "\n";
-        Indent -= 2;
-#endif
-        return ResNode;
-      }
-      break;
-    }
-    
-    case ISD::TRUNCATE: {
-      if (NVT == MVT::i8 && !Subtarget->is64Bit()) {
-        SDValue Input = Node->getOperand(0);
-        SDNode *ResNode = getTruncateTo8Bit(Input);
-      
-#ifndef NDEBUG
-        DOUT << std::string(Indent-2, ' ') << "=> ";
-        DEBUG(ResNode->dump(CurDAG));
-        DOUT << "\n";
-        Indent -= 2;
-#endif
-        return ResNode;
-      }
-      break;
-    }
-
     case ISD::DECLARE: {
       // Handle DECLARE nodes here because the second operand may have been
       // wrapped in X86ISD::Wrapper.

Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86Instr64bit.td Wed Apr 15 13:19:52 2009
@@ -255,6 +255,10 @@
 
 // Sign/Zero extenders
 
+// MOVSX64rr8 always has a REX prefix and it has an 8-bit register
+// operand, which makes it a rare instruction with an 8-bit register
+// operand that can never access an h register. If support for h registers
+// were generalized, this would require a special register class.
 def MOVSX64rr8 : RI<0xBE, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
                     "movs{bq|x}\t{$src, $dst|$dst, $src}",
                     [(set GR64:$dst, (sext GR8:$src))]>, TB;
@@ -1518,7 +1522,7 @@
 
 // r & (2^32-1) ==> movz
 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
-          (MOVZX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
+          (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
 // r & (2^16-1) ==> movz
 def : Pat<(and GR64:$src, 0xffff),
           (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
@@ -1527,7 +1531,7 @@
           (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
 // r & (2^8-1) ==> movz
 def : Pat<(and GR32:$src1, 0xff),
-           (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit)))>,
+           (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit))>,
       Requires<[In64BitMode]>;
 // r & (2^8-1) ==> movz
 def : Pat<(and GR16:$src1, 0xff),
@@ -1536,13 +1540,13 @@
 
 // sext_inreg patterns
 def : Pat<(sext_inreg GR64:$src, i32),
-          (MOVSX64rr32 (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)))>;
+          (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
 def : Pat<(sext_inreg GR64:$src, i16),
-          (MOVSX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
+          (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
 def : Pat<(sext_inreg GR64:$src, i8),
-          (MOVSX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
+          (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
 def : Pat<(sext_inreg GR32:$src, i8),
-          (MOVSX32rr8 (i8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)))>,
+          (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
       Requires<[In64BitMode]>;
 def : Pat<(sext_inreg GR16:$src, i8),
           (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>,
@@ -1550,16 +1554,63 @@
 
 // trunc patterns
 def : Pat<(i32 (trunc GR64:$src)),
-          (i32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
+          (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)>;
 def : Pat<(i16 (trunc GR64:$src)),
-          (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
+          (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)>;
 def : Pat<(i8 (trunc GR64:$src)),
-          (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
+          (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)>;
 def : Pat<(i8 (trunc GR32:$src)),
-          (i8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
+          (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)>,
       Requires<[In64BitMode]>;
 def : Pat<(i8 (trunc GR16:$src)),
-          (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit))>,
+          (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)>,
+      Requires<[In64BitMode]>;
+
+// h-register tricks.
+// For now, be conservative and only the extract if the value is immediately
+// zero-extended or stored, which are somewhat common cases. This uses a bunch
+// of code to prevent a register requiring a REX prefix from being allocated in
+// the same instruction as the h register, as there's currently no way to
+// describe this requirement to the register allocator.
+
+// h-register extract and zero-extend.
+def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
+          (SUBREG_TO_REG
+            (i64 0),
+            (MOVZX32_NOREXrr8
+              (EXTRACT_SUBREG (COPY_TO_REGCLASS GR64:$src, GR64_),
+                              x86_subreg_8bit_hi)),
+            x86_subreg_32bit)>;
+def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
+          (MOVZX32_NOREXrr8
+            (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_),
+                            x86_subreg_8bit_hi))>,
+      Requires<[In64BitMode]>;
+def : Pat<(srl_su GR16:$src, (i8 8)),
+          (EXTRACT_SUBREG
+            (MOVZX32_NOREXrr8
+              (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_),
+                              x86_subreg_8bit_hi)),
+            x86_subreg_16bit)>,
+      Requires<[In64BitMode]>;
+
+// h-register extract and store.
+def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
+          (MOV8mr_NOREX
+            addr:$dst,
+            (EXTRACT_SUBREG (COPY_TO_REGCLASS GR64:$src, GR64_),
+                            x86_subreg_8bit_hi))>;
+def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
+          (MOV8mr_NOREX
+            addr:$dst,
+            (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_),
+                            x86_subreg_8bit_hi))>,
+      Requires<[In64BitMode]>;
+def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
+          (MOV8mr_NOREX
+            addr:$dst,
+            (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_),
+                            x86_subreg_8bit_hi))>,
       Requires<[In64BitMode]>;
 
 // (shl x, 1) ==> (add x, x)

Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.cpp Wed Apr 15 13:19:52 2009
@@ -258,10 +258,8 @@
     { X86::JMP64r,      X86::JMP64m, 1 },
     { X86::MOV16ri,     X86::MOV16mi, 0 },
     { X86::MOV16rr,     X86::MOV16mr, 0 },
-    { X86::MOV16to16_,  X86::MOV16_mr, 0 },
     { X86::MOV32ri,     X86::MOV32mi, 0 },
     { X86::MOV32rr,     X86::MOV32mr, 0 },
-    { X86::MOV32to32_,  X86::MOV32_mr, 0 },
     { X86::MOV64ri32,   X86::MOV64mi32, 0 },
     { X86::MOV64rr,     X86::MOV64mr, 0 },
     { X86::MOV8ri,      X86::MOV8mi, 0 },
@@ -372,9 +370,7 @@
     { X86::Int_UCOMISDrr,   X86::Int_UCOMISDrm },
     { X86::Int_UCOMISSrr,   X86::Int_UCOMISSrm },
     { X86::MOV16rr,         X86::MOV16rm },
-    { X86::MOV16to16_,      X86::MOV16_rm },
     { X86::MOV32rr,         X86::MOV32rm },
-    { X86::MOV32to32_,      X86::MOV32_rm },
     { X86::MOV64rr,         X86::MOV64rm },
     { X86::MOV64toPQIrr,    X86::MOVQI2PQIrm },
     { X86::MOV64toSDrr,     X86::MOV64toSDrm },
@@ -404,6 +400,7 @@
     { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm },
     { X86::MOVZX16rr8,      X86::MOVZX16rm8 },
     { X86::MOVZX32rr16,     X86::MOVZX32rm16 },
+    { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8 },
     { X86::MOVZX32rr8,      X86::MOVZX32rm8 },
     { X86::MOVZX64rr16,     X86::MOVZX64rm16 },
     { X86::MOVZX64rr32,     X86::MOVZX64rm32 },
@@ -672,8 +669,6 @@
   case X86::MOV16rr:
   case X86::MOV32rr: 
   case X86::MOV64rr:
-  case X86::MOV16to16_:
-  case X86::MOV32to32_:
   case X86::MOVSSrr:
   case X86::MOVSDrr:
 
@@ -710,9 +705,7 @@
   default: break;
   case X86::MOV8rm:
   case X86::MOV16rm:
-  case X86::MOV16_rm:
   case X86::MOV32rm:
-  case X86::MOV32_rm:
   case X86::MOV64rm:
   case X86::LD_Fp64m:
   case X86::MOVSSrm:
@@ -741,9 +734,7 @@
   default: break;
   case X86::MOV8mr:
   case X86::MOV16mr:
-  case X86::MOV16_mr:
   case X86::MOV32mr:
-  case X86::MOV32_mr:
   case X86::MOV64mr:
   case X86::ST_FpP64m:
   case X86::MOVSSmr:
@@ -795,9 +786,7 @@
   default: break;
     case X86::MOV8rm:
     case X86::MOV16rm:
-    case X86::MOV16_rm:
     case X86::MOV32rm:
-    case X86::MOV32_rm:
     case X86::MOV64rm:
     case X86::LD_Fp64m:
     case X86::MOVSSrm:
@@ -1652,6 +1641,11 @@
   return Count;
 }
 
+/// isHReg - Test if the given register is a physical h register.
+static bool isHReg(unsigned Reg) {
+  return Reg == X86::AH || Reg == X86::BH || Reg == X86::CH || Reg == X86::DH;
+}
+
 bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
                                 MachineBasicBlock::iterator MI,
                                 unsigned DestReg, unsigned SrcReg,
@@ -1669,11 +1663,28 @@
     } else if (DestRC == &X86::GR16RegClass) {
       Opc = X86::MOV16rr;
     } else if (DestRC == &X86::GR8RegClass) {
-      Opc = X86::MOV8rr;
+      // Copying two or from a physical H register requires a NOREX move. Otherwise
+      // use a normal move.
+      if (isHReg(DestReg) || isHReg(SrcReg))
+        Opc = X86::MOV8rr_NOREX;
+      else
+        Opc = X86::MOV8rr;
+    } else if (DestRC == &X86::GR64_RegClass) {
+      Opc = X86::MOV64rr;
     } else if (DestRC == &X86::GR32_RegClass) {
-      Opc = X86::MOV32_rr;
+      Opc = X86::MOV32rr;
     } else if (DestRC == &X86::GR16_RegClass) {
-      Opc = X86::MOV16_rr;
+      Opc = X86::MOV16rr;
+    } else if (DestRC == &X86::GR8_RegClass) {
+      Opc = X86::MOV8rr;
+    } else if (DestRC == &X86::GR64_NOREXRegClass) {
+      Opc = X86::MOV64rr;
+    } else if (DestRC == &X86::GR32_NOREXRegClass) {
+      Opc = X86::MOV32rr;
+    } else if (DestRC == &X86::GR16_NOREXRegClass) {
+      Opc = X86::MOV16rr;
+    } else if (DestRC == &X86::GR8_NOREXRegClass) {
+      Opc = X86::MOV8rr;
     } else if (DestRC == &X86::RFP32RegClass) {
       Opc = X86::MOV_Fp3232;
     } else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) {
@@ -1721,7 +1732,7 @@
       return true;
     }
   }
-  
+
   // Moving from ST(0) turns into FpGET_ST0_32 etc.
   if (SrcRC == &X86::RSTRegClass) {
     // Copying from ST(0)/ST(1).
@@ -1779,10 +1790,22 @@
     Opc = X86::MOV16mr;
   } else if (RC == &X86::GR8RegClass) {
     Opc = X86::MOV8mr;
+  } else if (RC == &X86::GR64_RegClass) {
+    Opc = X86::MOV64mr;
   } else if (RC == &X86::GR32_RegClass) {
-    Opc = X86::MOV32_mr;
+    Opc = X86::MOV32mr;
   } else if (RC == &X86::GR16_RegClass) {
-    Opc = X86::MOV16_mr;
+    Opc = X86::MOV16mr;
+  } else if (RC == &X86::GR8_RegClass) {
+    Opc = X86::MOV8mr;
+  } else if (RC == &X86::GR64_NOREXRegClass) {
+    Opc = X86::MOV64mr;
+  } else if (RC == &X86::GR32_NOREXRegClass) {
+    Opc = X86::MOV32mr;
+  } else if (RC == &X86::GR16_NOREXRegClass) {
+    Opc = X86::MOV16mr;
+  } else if (RC == &X86::GR8_NOREXRegClass) {
+    Opc = X86::MOV8mr;
   } else if (RC == &X86::RFP80RegClass) {
     Opc = X86::ST_FpP80m;   // pops
   } else if (RC == &X86::RFP64RegClass) {
@@ -1847,10 +1870,22 @@
     Opc = X86::MOV16rm;
   } else if (RC == &X86::GR8RegClass) {
     Opc = X86::MOV8rm;
+  } else if (RC == &X86::GR64_RegClass) {
+    Opc = X86::MOV64rm;
   } else if (RC == &X86::GR32_RegClass) {
-    Opc = X86::MOV32_rm;
+    Opc = X86::MOV32rm;
   } else if (RC == &X86::GR16_RegClass) {
-    Opc = X86::MOV16_rm;
+    Opc = X86::MOV16rm;
+  } else if (RC == &X86::GR8_RegClass) {
+    Opc = X86::MOV8rm;
+  } else if (RC == &X86::GR64_NOREXRegClass) {
+    Opc = X86::MOV64rm;
+  } else if (RC == &X86::GR32_NOREXRegClass) {
+    Opc = X86::MOV32rm;
+  } else if (RC == &X86::GR16_NOREXRegClass) {
+    Opc = X86::MOV16rm;
+  } else if (RC == &X86::GR8_NOREXRegClass) {
+    Opc = X86::MOV8rm;
   } else if (RC == &X86::RFP80RegClass) {
     Opc = X86::LD_Fp80m;
   } else if (RC == &X86::RFP64RegClass) {

Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86InstrInfo.td Wed Apr 15 13:19:52 2009
@@ -180,6 +180,13 @@
 def f80mem  : X86MemOperand<"printf80mem">;
 def f128mem : X86MemOperand<"printf128mem">;
 
+// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
+// plain GR64, so that it doesn't potentially require a REX prefix.
+def i8mem_NOREX : Operand<i64> {
+  let PrintMethod = "printi8mem";
+  let MIOperandInfo = (ops GR64_NOREX, i8imm, GR64_NOREX, i32imm, i8imm);
+}
+
 def lea32mem : Operand<i32> {
   let PrintMethod = "printi32mem";
   let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
@@ -354,6 +361,14 @@
 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
   return N->hasOneUse();
 }]>;
+// An 'srl' node with a single use.
+def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
+  return N->hasOneUse();
+}]>;
+// An 'trunc' node with a single use.
+def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
+  return N->hasOneUse();
+}]>;
 
 // 'shld' and 'shrd' instruction patterns. Note that even though these have
 // the srl and shl in their patterns, the C++ code must still check for them,
@@ -723,7 +738,17 @@
 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
                 "mov{l}\t{$src, $dst|$dst, $src}",
                 [(store GR32:$src, addr:$dst)]>;
-                
+
+// Versions of MOV8rr and MOV8mr that use i8mem_NOREX and GR8_NOREX so that they
+// can be used for copying and storing h registers, which can't be encoded when
+// a REX prefix is present.
+let neverHasSideEffects = 1 in
+def MOV8rr_NOREX : I<0x88, MRMDestReg, (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
+                     "mov{b}\t{$src, $dst|$dst, $src}  # NOREX", []>;
+def MOV8mr_NOREX : I<0x88, MRMDestMem,
+                     (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
+                     "mov{b}\t{$src, $dst|$dst, $src}  # NOREX", []>;
+
 //===----------------------------------------------------------------------===//
 //  Fixed-Register Multiplication and Division Instructions...
 //
@@ -2855,6 +2880,18 @@
                    "movz{wl|x}\t{$src, $dst|$dst, $src}",
                    [(set GR32:$dst, (zextloadi32i16 addr:$src))]>, TB;
 
+// These are the same as the regular regular MOVZX32rr8 and MOVZX32rm8
+// except that they use GR32_NOREX for the output operand register class
+// instead of GR32. This allows them to operate on h registers on x86-64.
+def MOVZX32_NOREXrr8 : I<0xB6, MRMSrcReg,
+                         (outs GR32_NOREX:$dst), (ins GR8:$src),
+                         "movz{bl|x}\t{$src, $dst|$dst, $src}  # NOREX",
+                         []>, TB;
+def MOVZX32_NOREXrm8 : I<0xB6, MRMSrcMem,
+                         (outs GR32_NOREX:$dst), (ins i8mem:$src),
+                         "movz{bl|x}\t{$src, $dst|$dst, $src}  # NOREX",
+                         []>, TB;
+
 let neverHasSideEffects = 1 in {
   let Defs = [AX], Uses = [AL] in
   def CBW : I<0x98, RawFrm, (outs), (ins),
@@ -2891,33 +2928,6 @@
                  [(set GR32:$dst, 0)]>;
 }
 
-// Basic operations on GR16 / GR32 subclasses GR16_ and GR32_ which contains only
-// those registers that have GR8 sub-registers (i.e. AX - DX, EAX - EDX).
-let neverHasSideEffects = 1, isAsCheapAsAMove = 1 in {
-def MOV16to16_ : I<0x89, MRMDestReg, (outs GR16_:$dst), (ins GR16:$src),
-                "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32to32_ : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32:$src),
-                "mov{l}\t{$src, $dst|$dst, $src}", []>;
-                
-def MOV16_rr : I<0x89, MRMDestReg, (outs GR16_:$dst), (ins GR16_:$src),
-                "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32_rr : I<0x89, MRMDestReg, (outs GR32_:$dst), (ins GR32_:$src),
-                "mov{l}\t{$src, $dst|$dst, $src}", []>;
-} // neverHasSideEffects
-
-let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in {
-def MOV16_rm : I<0x8B, MRMSrcMem, (outs GR16_:$dst), (ins i16mem:$src),
-                "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32_rm : I<0x8B, MRMSrcMem, (outs GR32_:$dst), (ins i32mem:$src),
-                "mov{l}\t{$src, $dst|$dst, $src}", []>;
-}
-let mayStore = 1, neverHasSideEffects = 1 in {
-def MOV16_mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16_:$src),
-                "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
-def MOV32_mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32_:$src),
-                "mov{l}\t{$src, $dst|$dst, $src}", []>;
-}
-
 //===----------------------------------------------------------------------===//
 // Thread Local Storage Instructions
 //
@@ -3387,38 +3397,61 @@
 
 // r & (2^16-1) ==> movz
 def : Pat<(and GR32:$src1, 0xffff),
-          (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR32:$src1, x86_subreg_16bit)))>;
+          (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, x86_subreg_16bit))>;
 // r & (2^8-1) ==> movz
 def : Pat<(and GR32:$src1, 0xff),
-          (MOVZX32rr8 (i8 (EXTRACT_SUBREG (MOV32to32_ GR32:$src1),
-                                          x86_subreg_8bit)))>,
+          (MOVZX32rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src1, GR32_),
+                                      x86_subreg_8bit))>,
       Requires<[In32BitMode]>;
 // r & (2^8-1) ==> movz
 def : Pat<(and GR16:$src1, 0xff),
-          (MOVZX16rr8 (i8 (EXTRACT_SUBREG (MOV16to16_ GR16:$src1),
-                                          x86_subreg_8bit)))>,
+          (MOVZX16rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src1, GR16_),
+                                      x86_subreg_8bit))>,
       Requires<[In32BitMode]>;
 
 // sext_inreg patterns
 def : Pat<(sext_inreg GR32:$src, i16),
-          (MOVSX32rr16 (i16 (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit)))>;
+          (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit))>;
 def : Pat<(sext_inreg GR32:$src, i8),
-          (MOVSX32rr8 (i8 (EXTRACT_SUBREG (MOV32to32_ GR32:$src),
-                                          x86_subreg_8bit)))>,
+          (MOVSX32rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_),
+                                      x86_subreg_8bit))>,
       Requires<[In32BitMode]>;
 def : Pat<(sext_inreg GR16:$src, i8),
-          (MOVSX16rr8 (i8 (EXTRACT_SUBREG (MOV16to16_ GR16:$src),
-                                          x86_subreg_8bit)))>,
+          (MOVSX16rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_),
+                                      x86_subreg_8bit))>,
       Requires<[In32BitMode]>;
 
 // trunc patterns
 def : Pat<(i16 (trunc GR32:$src)),
-          (i16 (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit))>;
+          (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit)>;
 def : Pat<(i8 (trunc GR32:$src)),
-          (i8 (EXTRACT_SUBREG (MOV32to32_ GR32:$src), x86_subreg_8bit))>,
+          (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_),
+                          x86_subreg_8bit)>,
       Requires<[In32BitMode]>;
 def : Pat<(i8 (trunc GR16:$src)),
-          (i8 (EXTRACT_SUBREG (MOV16to16_ GR16:$src), x86_subreg_8bit))>,
+          (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_),
+                          x86_subreg_8bit)>,
+      Requires<[In32BitMode]>;
+
+// h-register tricks
+def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
+          (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_),
+                          x86_subreg_8bit_hi)>,
+      Requires<[In32BitMode]>;
+def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
+          (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_),
+                          x86_subreg_8bit_hi)>,
+      Requires<[In32BitMode]>;
+def : Pat<(srl_su GR16:$src, (i8 8)),
+          (EXTRACT_SUBREG
+            (MOVZX32rr8
+              (EXTRACT_SUBREG (COPY_TO_REGCLASS GR16:$src, GR16_),
+                              x86_subreg_8bit_hi)),
+            x86_subreg_16bit)>,
+      Requires<[In32BitMode]>;
+def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
+          (MOVZX32rr8 (EXTRACT_SUBREG (COPY_TO_REGCLASS GR32:$src, GR32_),
+                                      x86_subreg_8bit_hi))>,
       Requires<[In32BitMode]>;
 
 // (shl x, 1) ==> (add x, x)

Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.h?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.h (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.h Wed Apr 15 13:19:52 2009
@@ -35,7 +35,7 @@
   /// these indices must be kept in sync with the class indices in the 
   /// X86RegisterInfo.td file.
   enum SubregIndex {
-    SUBREG_8BIT = 1, SUBREG_16BIT = 2, SUBREG_32BIT = 3
+    SUBREG_8BIT = 1, SUBREG_8BIT_HI = 2, SUBREG_16BIT = 3, SUBREG_32BIT = 4
   };
 }
 

Modified: llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.td?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.td (original)
+++ llvm/branches/Apple/Dib/lib/Target/X86/X86RegisterInfo.td Wed Apr 15 13:19:52 2009
@@ -49,17 +49,18 @@
   def R14B : Register<"r14b">, DwarfRegNum<[14, -2, -2]>;
   def R15B : Register<"r15b">, DwarfRegNum<[15, -2, -2]>;
 
-  // High registers X86-32 only
+  // High registers. On x86-64, these cannot be used in any instruction
+  // with a REX prefix.
   def AH : Register<"ah">, DwarfRegNum<[0, 0, 0]>;
   def DH : Register<"dh">, DwarfRegNum<[1, 2, 2]>;
   def CH : Register<"ch">, DwarfRegNum<[2, 1, 1]>;
   def BH : Register<"bh">, DwarfRegNum<[3, 3, 3]>;
 
   // 16-bit registers
-  def AX : RegisterWithSubRegs<"ax", [AH,AL]>, DwarfRegNum<[0, 0, 0]>;
-  def DX : RegisterWithSubRegs<"dx", [DH,DL]>, DwarfRegNum<[1, 2, 2]>;
-  def CX : RegisterWithSubRegs<"cx", [CH,CL]>, DwarfRegNum<[2, 1, 1]>;
-  def BX : RegisterWithSubRegs<"bx", [BH,BL]>, DwarfRegNum<[3, 3, 3]>;
+  def AX : RegisterWithSubRegs<"ax", [AL,AH]>, DwarfRegNum<[0, 0, 0]>;
+  def DX : RegisterWithSubRegs<"dx", [DL,DH]>, DwarfRegNum<[1, 2, 2]>;
+  def CX : RegisterWithSubRegs<"cx", [CL,CH]>, DwarfRegNum<[2, 1, 1]>;
+  def BX : RegisterWithSubRegs<"bx", [BL,BH]>, DwarfRegNum<[3, 3, 3]>;
   def SI : RegisterWithSubRegs<"si", [SIL]>, DwarfRegNum<[4, 6, 6]>;
   def DI : RegisterWithSubRegs<"di", [DIL]>, DwarfRegNum<[5, 7, 7]>;
   def BP : RegisterWithSubRegs<"bp", [BPL]>, DwarfRegNum<[6, 4, 5]>;
@@ -177,41 +178,45 @@
 //
 
 def x86_subreg_8bit    : PatLeaf<(i32 1)>;
-def x86_subreg_16bit   : PatLeaf<(i32 2)>;
-def x86_subreg_32bit   : PatLeaf<(i32 3)>;
+def x86_subreg_8bit_hi : PatLeaf<(i32 2)>;
+def x86_subreg_16bit   : PatLeaf<(i32 3)>;
+def x86_subreg_32bit   : PatLeaf<(i32 4)>;
 
 def : SubRegSet<1, [AX, CX, DX, BX, SP,  BP,  SI,  DI,  
                     R8W, R9W, R10W, R11W, R12W, R13W, R14W, R15W],
                    [AL, CL, DL, BL, SPL, BPL, SIL, DIL, 
                     R8B, R9B, R10B, R11B, R12B, R13B, R14B, R15B]>;
 
-// It's unclear if this subreg set is safe, given that not all registers
-// in the class have an 'H' subreg.
-// def : SubRegSet<2, [AX, CX, DX, BX],
-//                    [AH, CH, DH, BH]>;
+def : SubRegSet<2, [AX, CX, DX, BX],
+                   [AH, CH, DH, BH]>;
 
 def : SubRegSet<1, [EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,  
                     R8D, R9D, R10D, R11D, R12D, R13D, R14D, R15D],
                    [AL, CL, DL, BL, SPL, BPL, SIL, DIL, 
                     R8B, R9B, R10B, R11B, R12B, R13B, R14B, R15B]>;
 
-def : SubRegSet<2, [EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,  
+def : SubRegSet<2, [EAX, ECX, EDX, EBX],
+                   [AH, CH, DH, BH]>;
+
+def : SubRegSet<3, [EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,
                     R8D, R9D, R10D, R11D, R12D, R13D, R14D, R15D],
                    [AX,  CX,  DX,  BX,  SP,  BP,  SI,  DI, 
                     R8W, R9W, R10W, R11W, R12W, R13W, R14W, R15W]>;
 
-
 def : SubRegSet<1, [RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI,  
                     R8,  R9,  R10, R11, R12, R13, R14, R15],
                    [AL, CL, DL, BL, SPL, BPL, SIL, DIL, 
                     R8B, R9B, R10B, R11B, R12B, R13B, R14B, R15B]>;
 
-def : SubRegSet<2, [RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI,  
+def : SubRegSet<2, [RAX, RCX, RDX, RBX],
+                   [AH, CH, DH, BH]>;
+
+def : SubRegSet<3, [RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI,
                     R8,  R9,  R10, R11, R12, R13, R14, R15],
                    [AX,  CX,  DX,  BX,  SP,  BP,  SI,  DI, 
                     R8W, R9W, R10W, R11W, R12W, R13W, R14W, R15W]>;
-                    
-def : SubRegSet<3, [RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI,  
+
+def : SubRegSet<4, [RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI,
                     R8,  R9,  R10, R11, R12, R13, R14, R15],
                    [EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI, 
                     R8D, R9D, R10D, R11D, R12D, R13D, R14D, R15D]>;
@@ -228,7 +233,11 @@
 // R8B, ... R15B. 
 // Allocate R12 and R13 last, as these require an extra byte when
 // encoded in x86_64 instructions.
-// FIXME: Allow AH, CH, DH, BH in 64-mode for non-REX instructions,
+// FIXME: Allow AH, CH, DH, BH to be used as general-purpose registers in
+// 64-bit mode. The main complication is that they cannot be encoded in an
+// instruction requiring a REX prefix, while SIL, DIL, BPL, R8D, etc.
+// require a REX prefix. For example, "addb %ah, %dil" and "movzbl %ah, %r8d"
+// cannot be encoded.
 def GR8 : RegisterClass<"X86", [i8],  8,
                         [AL, CL, DL, BL, AH, CH, DH, BH, SIL, DIL, BPL, SPL,
                          R8B, R9B, R10B, R11B, R14B, R15B, R12B, R13B]> {
@@ -287,7 +296,7 @@
 def GR16 : RegisterClass<"X86", [i16], 16,
                          [AX, CX, DX, SI, DI, BX, BP, SP,
                           R8W, R9W, R10W, R11W, R14W, R15W, R12W, R13W]> {
-  let SubRegClassList = [GR8];
+  let SubRegClassList = [GR8, GR8];
   let MethodProtos = [{
     iterator allocation_order_begin(const MachineFunction &MF) const;
     iterator allocation_order_end(const MachineFunction &MF) const;
@@ -303,7 +312,7 @@
     static const unsigned X86_GR16_AO_32_fp[] = {
       X86::AX, X86::CX, X86::DX, X86::SI, X86::DI, X86::BX
     };
-    // If not, just don't allocate SPL.
+    // If not, just don't allocate SP.
     static const unsigned X86_GR16_AO_64[] = {
       X86::AX,  X86::CX,   X86::DX,   X86::SI,   X86::DI,
       X86::R8W, X86::R9W,  X86::R10W, X86::R11W,
@@ -355,7 +364,7 @@
 def GR32 : RegisterClass<"X86", [i32], 32, 
                          [EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP,
                           R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> {
-  let SubRegClassList = [GR8, GR16];
+  let SubRegClassList = [GR8, GR8, GR16];
   let MethodProtos = [{
     iterator allocation_order_begin(const MachineFunction &MF) const;
     iterator allocation_order_end(const MachineFunction &MF) const;
@@ -371,7 +380,7 @@
     static const unsigned X86_GR32_AO_32_fp[] = {
       X86::EAX, X86::ECX, X86::EDX, X86::ESI, X86::EDI, X86::EBX
     };
-    // If not, just don't allocate SPL.
+    // If not, just don't allocate ESP.
     static const unsigned X86_GR32_AO_64[] = {
       X86::EAX, X86::ECX,  X86::EDX,  X86::ESI,  X86::EDI,
       X86::R8D, X86::R9D,  X86::R10D, X86::R11D,
@@ -423,7 +432,7 @@
 def GR64 : RegisterClass<"X86", [i64], 64, 
                          [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
                           RBX, R14, R15, R12, R13, RBP, RSP]> {
-  let SubRegClassList = [GR8, GR16, GR32];
+  let SubRegClassList = [GR8, GR8, GR16, GR32];
   let MethodProtos = [{
     iterator allocation_order_end(const MachineFunction &MF) const;
   }];
@@ -444,13 +453,202 @@
 }
 
 
-// GR16, GR32 subclasses which contain registers that have GR8 sub-registers.
-// These should only be used for 32-bit mode.
+// GR8_, GR16_, GR32_, GR64_ - Subclasses of GR8, GR16, GR32, and GR64
+// which contain just the "a" "b", "c", and "d" registers. On x86-32,
+// GR16_ and GR32_ are classes for registers that support 8-bit subreg
+// operations. On x86-64, GR16_, GR32_, and GR64_ are classes for registers
+// that support 8-bit h-register operations.
+def GR8_ : RegisterClass<"X86", [i8], 8, [AL, CL, DL, BL]> {
+}
 def GR16_ : RegisterClass<"X86", [i16], 16, [AX, CX, DX, BX]> {
-  let SubRegClassList = [GR8];
+  let SubRegClassList = [GR8_, GR8_];
 }
 def GR32_ : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX, EBX]> {
-  let SubRegClassList = [GR8, GR16];
+  let SubRegClassList = [GR8_, GR8_, GR16_];
+}
+def GR64_ : RegisterClass<"X86", [i64], 64, [RAX, RCX, RDX, RBX]> {
+  let SubRegClassList = [GR8_, GR8_, GR16_, GR32_];
+}
+
+// GR8_NOREX, GR16_NOREX, GR32_NOREX, GR64_NOREX - Subclasses of
+// GR8, GR16, GR32, and GR64 which contain only the first 8 GPRs.
+// On x86-64, GR64_NOREX, GR32_NOREX and GR16_NOREX are the classes
+// of registers which do not by themselves require a REX prefix.
+def GR8_NOREX : RegisterClass<"X86", [i8], 8,
+                              [AL, CL, DL, BL, AH, CH, DH, BH,
+                               SIL, DIL, BPL, SPL]> {
+  let MethodProtos = [{
+    iterator allocation_order_begin(const MachineFunction &MF) const;
+    iterator allocation_order_end(const MachineFunction &MF) const;
+  }];
+  let MethodBodies = [{
+    // Does the function dedicate RBP / EBP to being a frame ptr?
+    // If so, don't allocate SPL or BPL.
+    static const unsigned X86_GR8_NOREX_AO_64_fp[] = {
+      X86::AL, X86::CL, X86::DL, X86::SIL, X86::DIL, X86::BL
+    };
+    // If not, just don't allocate SPL.
+    static const unsigned X86_GR8_NOREX_AO_64[] = {
+      X86::AL, X86::CL, X86::DL, X86::SIL, X86::DIL, X86::BL, X86::BPL
+    };
+    // In 32-mode, none of the 8-bit registers aliases EBP or ESP.
+    static const unsigned X86_GR8_NOREX_AO_32[] = {
+      X86::AL, X86::CL, X86::DL, X86::AH, X86::CH, X86::DH, X86::BL, X86::BH
+    };
+
+    GR8_NOREXClass::iterator
+    GR8_NOREXClass::allocation_order_begin(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const TargetRegisterInfo *RI = TM.getRegisterInfo();
+      const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+      if (!Subtarget.is64Bit())
+        return X86_GR8_NOREX_AO_32;
+      else if (RI->hasFP(MF))
+        return X86_GR8_NOREX_AO_64_fp;
+      else
+        return X86_GR8_NOREX_AO_64;
+    }
+
+    GR8_NOREXClass::iterator
+    GR8_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const TargetRegisterInfo *RI = TM.getRegisterInfo();
+      const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+      if (!Subtarget.is64Bit())
+        return X86_GR8_NOREX_AO_32 +
+               (sizeof(X86_GR8_NOREX_AO_32) / sizeof(unsigned));
+      else if (RI->hasFP(MF))
+        return X86_GR8_NOREX_AO_64_fp +
+               (sizeof(X86_GR8_NOREX_AO_64_fp) / sizeof(unsigned));
+      else
+        return X86_GR8_NOREX_AO_64 +
+               (sizeof(X86_GR8_NOREX_AO_64) / sizeof(unsigned));
+    }
+  }];
+}
+def GR16_NOREX : RegisterClass<"X86", [i16], 16,
+                               [AX, CX, DX, SI, DI, BX, BP, SP]> {
+  let SubRegClassList = [GR8_NOREX, GR8_NOREX];
+  let MethodProtos = [{
+    iterator allocation_order_begin(const MachineFunction &MF) const;
+    iterator allocation_order_end(const MachineFunction &MF) const;
+  }];
+  let MethodBodies = [{
+    // Does the function dedicate RBP / EBP to being a frame ptr?
+    // If so, don't allocate SP or BP.
+    static const unsigned X86_GR16_AO_fp[] = {
+      X86::AX, X86::CX, X86::DX, X86::SI, X86::DI, X86::BX
+    };
+    // If not, just don't allocate SP.
+    static const unsigned X86_GR16_AO[] = {
+      X86::AX, X86::CX, X86::DX, X86::SI, X86::DI, X86::BX, X86::BP
+    };
+
+    GR16_NOREXClass::iterator
+    GR16_NOREXClass::allocation_order_begin(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const TargetRegisterInfo *RI = TM.getRegisterInfo();
+      if (RI->hasFP(MF))
+        return X86_GR16_AO_fp;
+      else
+        return X86_GR16_AO;
+    }
+
+    GR16_NOREXClass::iterator
+    GR16_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const TargetRegisterInfo *RI = TM.getRegisterInfo();
+      if (RI->hasFP(MF))
+        return X86_GR16_AO_fp+(sizeof(X86_GR16_AO_fp)/sizeof(unsigned));
+      else
+        return X86_GR16_AO + (sizeof(X86_GR16_AO) / sizeof(unsigned));
+    }
+  }];
+}
+// GR32_NOREX - GR32 registers which do not require a REX prefix.
+def GR32_NOREX : RegisterClass<"X86", [i32], 32,
+                               [EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP]> {
+  let SubRegClassList = [GR8_NOREX, GR8_NOREX, GR16_NOREX];
+  let MethodProtos = [{
+    iterator allocation_order_begin(const MachineFunction &MF) const;
+    iterator allocation_order_end(const MachineFunction &MF) const;
+  }];
+  let MethodBodies = [{
+    // Does the function dedicate RBP / EBP to being a frame ptr?
+    // If so, don't allocate ESP or EBP.
+    static const unsigned X86_GR32_NOREX_AO_fp[] = {
+      X86::EAX, X86::ECX, X86::EDX, X86::ESI, X86::EDI, X86::EBX
+    };
+    // If not, just don't allocate ESP.
+    static const unsigned X86_GR32_NOREX_AO[] = {
+      X86::EAX, X86::ECX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP
+    };
+
+    GR32_NOREXClass::iterator
+    GR32_NOREXClass::allocation_order_begin(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const TargetRegisterInfo *RI = TM.getRegisterInfo();
+      if (RI->hasFP(MF))
+        return X86_GR32_NOREX_AO_fp;
+      else
+        return X86_GR32_NOREX_AO;
+    }
+
+    GR32_NOREXClass::iterator
+    GR32_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const TargetRegisterInfo *RI = TM.getRegisterInfo();
+      if (RI->hasFP(MF))
+        return X86_GR32_NOREX_AO_fp +
+               (sizeof(X86_GR32_NOREX_AO_fp) / sizeof(unsigned));
+      else
+        return X86_GR32_NOREX_AO +
+               (sizeof(X86_GR32_NOREX_AO) / sizeof(unsigned));
+    }
+  }];
+}
+
+// GR64_NOREX - GR64 registers which do not require a REX prefix.
+def GR64_NOREX : RegisterClass<"X86", [i64], 64,
+                               [RAX, RCX, RDX, RSI, RDI, RBX, RBP, RSP]> {
+  let SubRegClassList = [GR8_NOREX, GR8_NOREX, GR16_NOREX, GR32_NOREX];
+  let MethodProtos = [{
+    iterator allocation_order_begin(const MachineFunction &MF) const;
+    iterator allocation_order_end(const MachineFunction &MF) const;
+  }];
+  let MethodBodies = [{
+    // Does the function dedicate RBP / EBP to being a frame ptr?
+    // If so, don't allocate RSP or RBP.
+    static const unsigned X86_GR64_NOREX_AO_fp[] = {
+      X86::RAX, X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::RBX
+    };
+    // If not, just don't allocate RSP.
+    static const unsigned X86_GR64_NOREX_AO[] = {
+      X86::RAX, X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::RBX, X86::RBP
+    };
+
+    GR64_NOREXClass::iterator
+    GR64_NOREXClass::allocation_order_begin(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const TargetRegisterInfo *RI = TM.getRegisterInfo();
+      if (RI->hasFP(MF))
+        return X86_GR64_NOREX_AO_fp;
+      else
+        return X86_GR64_NOREX_AO;
+    }
+
+    GR64_NOREXClass::iterator
+    GR64_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const TargetRegisterInfo *RI = TM.getRegisterInfo();
+      if (RI->hasFP(MF))
+        return X86_GR64_NOREX_AO_fp +
+               (sizeof(X86_GR64_NOREX_AO_fp) / sizeof(unsigned));
+      else
+        return X86_GR64_NOREX_AO +
+               (sizeof(X86_GR64_NOREX_AO) / sizeof(unsigned));
+    }
+  }];
 }
 
 // A class to support the 'A' assembler constraint: EAX then EDX.

Added: llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-32.ll?rev=69194&view=auto

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-32.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-32.ll Wed Apr 15 13:19:52 2009
@@ -0,0 +1,53 @@
+; RUN: llvm-as < %s | llc -march=x86 | grep {movzbl	%\[abcd\]h,} | count 7
+
+; Use h-register extract and zero-extend.
+
+define double @foo8(double* nocapture inreg %p, i32 inreg %x) nounwind readonly {
+  %t0 = lshr i32 %x, 8
+  %t1 = and i32 %t0, 255
+  %t2 = getelementptr double* %p, i32 %t1
+  %t3 = load double* %t2, align 8
+  ret double %t3
+}
+define float @foo4(float* nocapture inreg %p, i32 inreg %x) nounwind readonly {
+  %t0 = lshr i32 %x, 8
+  %t1 = and i32 %t0, 255
+  %t2 = getelementptr float* %p, i32 %t1
+  %t3 = load float* %t2, align 8
+  ret float %t3
+}
+define i16 @foo2(i16* nocapture inreg %p, i32 inreg %x) nounwind readonly {
+  %t0 = lshr i32 %x, 8
+  %t1 = and i32 %t0, 255
+  %t2 = getelementptr i16* %p, i32 %t1
+  %t3 = load i16* %t2, align 8
+  ret i16 %t3
+}
+define i8 @foo1(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
+  %t0 = lshr i32 %x, 8
+  %t1 = and i32 %t0, 255
+  %t2 = getelementptr i8* %p, i32 %t1
+  %t3 = load i8* %t2, align 8
+  ret i8 %t3
+}
+define i8 @bar8(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
+  %t0 = lshr i32 %x, 5
+  %t1 = and i32 %t0, 2040
+  %t2 = getelementptr i8* %p, i32 %t1
+  %t3 = load i8* %t2, align 8
+  ret i8 %t3
+}
+define i8 @bar4(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
+  %t0 = lshr i32 %x, 6
+  %t1 = and i32 %t0, 1020
+  %t2 = getelementptr i8* %p, i32 %t1
+  %t3 = load i8* %t2, align 8
+  ret i8 %t3
+}
+define i8 @bar2(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
+  %t0 = lshr i32 %x, 7
+  %t1 = and i32 %t0, 510
+  %t2 = getelementptr i8* %p, i32 %t1
+  %t3 = load i8* %t2, align 8
+  ret i8 %t3
+}

Added: llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-64.ll?rev=69194&view=auto

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-64.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-addressing-64.ll Wed Apr 15 13:19:52 2009
@@ -0,0 +1,53 @@
+; RUN: llvm-as < %s | llc -march=x86-64 | grep {movzbl	%\[abcd\]h,} | count 7
+
+; Use h-register extract and zero-extend.
+
+define double @foo8(double* nocapture inreg %p, i64 inreg %x) nounwind readonly {
+  %t0 = lshr i64 %x, 8
+  %t1 = and i64 %t0, 255
+  %t2 = getelementptr double* %p, i64 %t1
+  %t3 = load double* %t2, align 8
+  ret double %t3
+}
+define float @foo4(float* nocapture inreg %p, i64 inreg %x) nounwind readonly {
+  %t0 = lshr i64 %x, 8
+  %t1 = and i64 %t0, 255
+  %t2 = getelementptr float* %p, i64 %t1
+  %t3 = load float* %t2, align 8
+  ret float %t3
+}
+define i16 @foo2(i16* nocapture inreg %p, i64 inreg %x) nounwind readonly {
+  %t0 = lshr i64 %x, 8
+  %t1 = and i64 %t0, 255
+  %t2 = getelementptr i16* %p, i64 %t1
+  %t3 = load i16* %t2, align 8
+  ret i16 %t3
+}
+define i8 @foo1(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
+  %t0 = lshr i64 %x, 8
+  %t1 = and i64 %t0, 255
+  %t2 = getelementptr i8* %p, i64 %t1
+  %t3 = load i8* %t2, align 8
+  ret i8 %t3
+}
+define i8 @bar8(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
+  %t0 = lshr i64 %x, 5
+  %t1 = and i64 %t0, 2040
+  %t2 = getelementptr i8* %p, i64 %t1
+  %t3 = load i8* %t2, align 8
+  ret i8 %t3
+}
+define i8 @bar4(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
+  %t0 = lshr i64 %x, 6
+  %t1 = and i64 %t0, 1020
+  %t2 = getelementptr i8* %p, i64 %t1
+  %t3 = load i8* %t2, align 8
+  ret i8 %t3
+}
+define i8 @bar2(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
+  %t0 = lshr i64 %x, 7
+  %t1 = and i64 %t0, 510
+  %t2 = getelementptr i8* %p, i64 %t1
+  %t3 = load i8* %t2, align 8
+  ret i8 %t3
+}

Added: llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-store.ll?rev=69194&view=auto

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-store.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/h-register-store.ll Wed Apr 15 13:19:52 2009
@@ -0,0 +1,27 @@
+; RUN: llvm-as < %s | llc -march=x86-64 > %t
+; RUN: grep mov %t | count 6
+; RUN: grep {movb	%ah, (%rsi)} %t | count 3
+; RUN: llvm-as < %s | llc -march=x86 > %t
+; RUN: grep mov %t | count 3
+; RUN: grep {movb	%ah, (%e} %t | count 3
+
+; Use h-register extract and store.
+
+define void @foo16(i16 inreg %p, i8* inreg %z) nounwind {
+  %q = lshr i16 %p, 8
+  %t = trunc i16 %q to i8
+  store i8 %t, i8* %z
+  ret void
+}
+define void @foo32(i32 inreg %p, i8* inreg %z) nounwind {
+  %q = lshr i32 %p, 8
+  %t = trunc i32 %q to i8
+  store i8 %t, i8* %z
+  ret void
+}
+define void @foo64(i64 inreg %p, i8* inreg %z) nounwind {
+  %q = lshr i64 %p, 8
+  %t = trunc i64 %q to i8
+  store i8 %t, i8* %z
+  ret void
+}

Added: llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-0.ll?rev=69194&view=auto

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-0.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-0.ll Wed Apr 15 13:19:52 2009
@@ -0,0 +1,48 @@
+; RUN: llvm-as < %s | llc -march=x86-64 | grep {movzbl	%\[abcd\]h,} | count 4
+; RUN: llvm-as < %s | llc -march=x86    > %t
+; RUN: grep {incb	%ah} %t | count 3
+; RUN: grep {movzbl	%ah,} %t | count 3
+
+; Use h registers. On x86-64, codegen doesn't support general allocation
+; of h registers yet, due to x86 encoding complications.
+
+define void @bar64(i64 inreg %x, i8* inreg %p) nounwind {
+  %t0 = lshr i64 %x, 8
+  %t1 = trunc i64 %t0 to i8
+  %t2 = add i8 %t1, 1
+  store i8 %t2, i8* %p
+  ret void
+}
+
+define void @bar32(i32 inreg %x, i8* inreg %p) nounwind {
+  %t0 = lshr i32 %x, 8
+  %t1 = trunc i32 %t0 to i8
+  %t2 = add i8 %t1, 1
+  store i8 %t2, i8* %p
+  ret void
+}
+
+define void @bar16(i16 inreg %x, i8* inreg %p) nounwind {
+  %t0 = lshr i16 %x, 8
+  %t1 = trunc i16 %t0 to i8
+  %t2 = add i8 %t1, 1
+  store i8 %t2, i8* %p
+  ret void
+}
+
+define i64 @qux64(i64 inreg %x) nounwind {
+  %t0 = lshr i64 %x, 8
+  %t1 = and i64 %t0, 255
+  ret i64 %t1
+}
+
+define i32 @qux32(i32 inreg %x) nounwind {
+  %t0 = lshr i32 %x, 8
+  %t1 = and i32 %t0, 255
+  ret i32 %t1
+}
+
+define i16 @qux16(i16 inreg %x) nounwind {
+  %t0 = lshr i16 %x, 8
+  ret i16 %t0
+}

Added: llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-1.ll?rev=69194&view=auto

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-1.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-1.ll Wed Apr 15 13:19:52 2009
@@ -0,0 +1,39 @@
+; RUN: llvm-as < %s | llc -march=x86-64 > %t
+; RUN: grep {movzbl	%\[abcd\]h,} %t | count 8
+; RUN: grep {%\[abcd\]h} %t | not grep {%r\[\[:digit:\]\]*d}
+
+; LLVM creates virtual registers for values live across blocks
+; based on the type of the value. Make sure that the extracts
+; here use the GR64_NOREX register class for their result,
+; instead of plain GR64.
+
+define i64 @foo(i64 %a, i64 %b, i64 %c, i64 %d,
+                i64 %e, i64 %f, i64 %g, i64 %h) {
+  %sa = lshr i64 %a, 8
+  %A = and i64 %sa, 255
+  %sb = lshr i64 %b, 8
+  %B = and i64 %sb, 255
+  %sc = lshr i64 %c, 8
+  %C = and i64 %sc, 255
+  %sd = lshr i64 %d, 8
+  %D = and i64 %sd, 255
+  %se = lshr i64 %e, 8
+  %E = and i64 %se, 255
+  %sf = lshr i64 %f, 8
+  %F = and i64 %sf, 255
+  %sg = lshr i64 %g, 8
+  %G = and i64 %sg, 255
+  %sh = lshr i64 %h, 8
+  %H = and i64 %sh, 255
+  br label %next
+
+next:
+  %u = add i64 %A, %B
+  %v = add i64 %C, %D
+  %w = add i64 %E, %F
+  %x = add i64 %G, %H
+  %y = add i64 %u, %v
+  %z = add i64 %w, %x
+  %t = add i64 %y, %z
+  ret i64 %t
+}

Added: llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-2.ll?rev=69194&view=auto

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-2.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers-2.ll Wed Apr 15 13:19:52 2009
@@ -0,0 +1,15 @@
+; RUN: llvm-as < %s | llc -march=x86 > %t
+; RUN: grep {movzbl	%\[abcd\]h,} %t | count 1
+; RUN: grep {shll	\$3,} %t | count 1
+
+; Use an h register, but don't omit the explicit shift for
+; non-address use(s).
+
+define i32 @foo(i8* %x, i32 %y) nounwind {
+	%t0 = lshr i32 %y, 8		; <i32> [#uses=1]
+	%t1 = and i32 %t0, 255		; <i32> [#uses=2]
+        %t2 = shl i32 %t1, 3
+	%t3 = getelementptr i8* %x, i32 %t2		; <i8*> [#uses=1]
+	store i8 77, i8* %t3, align 4
+	ret i32 %t2
+}

Added: llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers.ll?rev=69194&view=auto

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers.ll (added)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/h-registers.ll Wed Apr 15 13:19:52 2009
@@ -0,0 +1,48 @@
+; RUN: llvm-as < %s | llc -march=x86-64 | grep {movzbl	%\[abcd\]h,} | count 4
+; RUN: llvm-as < %s | llc -march=x86    > %t
+; RUN: grep {incb	%ah} %t | count 3
+; RUN: grep {movzbl	%ah,} %t | count 3
+
+; Use h registers. On x86-64, codegen doesn't support general allocation
+; of h registers yet, due to x86 encoding complications.
+
+define void @bar64(i64 inreg %x, i8* inreg %p) nounwind {
+  %t0 = lshr i64 %x, 8
+  %t1 = trunc i64 %t0 to i8
+  %t2 = add i8 %t1, 1
+  store i8 %t2, i8* %p
+  ret void
+}
+
+define void @bar32(i32 inreg %x, i8* inreg %p) nounwind {
+  %t0 = lshr i32 %x, 8
+  %t1 = trunc i32 %t0 to i8
+  %t2 = add i8 %t1, 1
+  store i8 %t2, i8* %p
+  ret void
+}
+
+define void @bar16(i16 inreg %x, i8* inreg %p) nounwind {
+  %t0 = lshr i16 %x, 8
+  %t1 = trunc i16 %t0 to i8
+  %t2 = add i8 %t1, 1
+  store i8 %t2, i8* %p
+  ret void
+}
+
+define i64 @qux64(i64 inreg %x) nounwind {
+  %t0 = lshr i64 %x, 8
+  %t1 = and i64 %t0, 255
+  ret i64 %t1
+}
+
+define i32 @qux32(i32 inreg %x) nounwind {
+  %t0 = lshr i32 %x, 8
+  %t1 = and i32 %t0, 255
+  ret i32 %t1
+}
+
+define i16 @qux16(i16 inreg %x) nounwind {
+  %t0 = lshr i16 %x, 8
+  ret i16 %t0
+}

Modified: llvm/branches/Apple/Dib/test/CodeGen/X86/inline-asm-out-regs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/CodeGen/X86/inline-asm-out-regs.ll?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/test/CodeGen/X86/inline-asm-out-regs.ll (original)
+++ llvm/branches/Apple/Dib/test/CodeGen/X86/inline-asm-out-regs.ll Wed Apr 15 13:19:52 2009
@@ -1,6 +1,4 @@
 ; RUN: llvm-as < %s | llc -mtriple=i386-unknown-linux-gnu
-; XFAIL: *
-; Expected to run out of registers during allocation.
 ; PR3391
 
 @pci_indirect = external global { }             ; <{ }*> [#uses=1]

Modified: llvm/branches/Apple/Dib/utils/TableGen/CodeEmitterGen.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/utils/TableGen/CodeEmitterGen.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/utils/TableGen/CodeEmitterGen.cpp (original)
+++ llvm/branches/Apple/Dib/utils/TableGen/CodeEmitterGen.cpp Wed Apr 15 13:19:52 2009
@@ -33,8 +33,9 @@
         R->getName() == "EXTRACT_SUBREG" ||
         R->getName() == "INSERT_SUBREG" ||
         R->getName() == "IMPLICIT_DEF" ||
-        R->getName() == "SUBREG_TO_REG") continue;
-    
+        R->getName() == "SUBREG_TO_REG" ||
+        R->getName() == "COPY_TO_REGCLASS") continue;
+
     BitsInit *BI = R->getValueAsBitsInit("Inst");
 
     unsigned numBits = BI->getNumBits();
@@ -109,7 +110,8 @@
         R->getName() == "EXTRACT_SUBREG" ||
         R->getName() == "INSERT_SUBREG" ||
         R->getName() == "IMPLICIT_DEF" ||
-        R->getName() == "SUBREG_TO_REG") {
+        R->getName() == "SUBREG_TO_REG" ||
+        R->getName() == "COPY_TO_REGCLASS") {
       o << "    0U,\n";
       continue;
     }
@@ -146,8 +148,9 @@
         InstName == "EXTRACT_SUBREG" ||
         InstName == "INSERT_SUBREG" ||
         InstName == "IMPLICIT_DEF" ||
-        InstName == "SUBREG_TO_REG") continue;
-    
+        InstName == "SUBREG_TO_REG" ||
+        InstName == "COPY_TO_REGCLASS") continue;
+
     BitsInit *BI = R->getValueAsBitsInit("Inst");
     const std::vector<RecordVal> &Vals = R->getValues();
     CodeGenInstruction &CGI = Target.getInstruction(InstName);

Modified: llvm/branches/Apple/Dib/utils/TableGen/CodeGenDAGPatterns.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/utils/TableGen/CodeGenDAGPatterns.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/utils/TableGen/CodeGenDAGPatterns.cpp (original)
+++ llvm/branches/Apple/Dib/utils/TableGen/CodeGenDAGPatterns.cpp Wed Apr 15 13:19:52 2009
@@ -884,6 +884,12 @@
       MadeChange = getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
     MadeChange |= UpdateNodeType(MVT::isVoid, TP);
     return MadeChange;
+  } else if (getOperator()->getName() == "COPY_TO_REGCLASS") {
+    bool MadeChange = false;
+    MadeChange |= getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
+    MadeChange |= getChild(1)->ApplyTypeConstraints(TP, NotRegisters);
+    MadeChange |= UpdateNodeType(getChild(1)->getTypeNum(0), TP);
+    return MadeChange;
   } else if (const CodeGenIntrinsic *Int = getIntrinsicInfo(CDP)) {
     bool MadeChange = false;
 

Modified: llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.cpp (original)
+++ llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.cpp Wed Apr 15 13:19:52 2009
@@ -344,7 +344,12 @@
   if (I == Instructions.end())
     throw "Could not find 'SUBREG_TO_REG' instruction!";
   const CodeGenInstruction *SUBREG_TO_REG = &I->second;
-  
+
+  I = getInstructions().find("COPY_TO_REGCLASS");
+  if (I == Instructions.end())
+    throw "Could not find 'COPY_TO_REGCLASS' instruction!";
+  const CodeGenInstruction *COPY_TO_REGCLASS = &I->second;
+
   // Print out the rest of the instructions now.
   NumberedInstructions.push_back(PHI);
   NumberedInstructions.push_back(INLINEASM);
@@ -356,6 +361,7 @@
   NumberedInstructions.push_back(INSERT_SUBREG);
   NumberedInstructions.push_back(IMPLICIT_DEF);
   NumberedInstructions.push_back(SUBREG_TO_REG);
+  NumberedInstructions.push_back(COPY_TO_REGCLASS);
   for (inst_iterator II = inst_begin(), E = inst_end(); II != E; ++II)
     if (&II->second != PHI &&
         &II->second != INLINEASM &&
@@ -366,7 +372,8 @@
         &II->second != EXTRACT_SUBREG &&
         &II->second != INSERT_SUBREG &&
         &II->second != IMPLICIT_DEF &&
-        &II->second != SUBREG_TO_REG)
+        &II->second != SUBREG_TO_REG &&
+        &II->second != COPY_TO_REGCLASS)
       NumberedInstructions.push_back(&II->second);
 }
 

Modified: llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.h?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.h (original)
+++ llvm/branches/Apple/Dib/utils/TableGen/CodeGenTarget.h Wed Apr 15 13:19:52 2009
@@ -19,6 +19,7 @@
 
 #include "CodeGenRegisters.h"
 #include "CodeGenInstruction.h"
+#include <algorithm>
 #include <iosfwd>
 #include <map>
 
@@ -110,19 +111,54 @@
   }
   
   /// getRegisterClassForRegister - Find the register class that contains the
-  /// specified physical register.  If there register exists in multiple
-  /// register classes or is not in a register class, return null.
+  /// specified physical register.  If the register is not in a register
+  /// class, return null. If the register is in multiple classes, and the
+  /// classes have a superset-subset relationship and the same set of
+  /// types, return the superclass.  Otherwise return null.
   const CodeGenRegisterClass *getRegisterClassForRegister(Record *R) const {
     const std::vector<CodeGenRegisterClass> &RCs = getRegisterClasses();
     const CodeGenRegisterClass *FoundRC = 0;
     for (unsigned i = 0, e = RCs.size(); i != e; ++i) {
       const CodeGenRegisterClass &RC = RegisterClasses[i];
       for (unsigned ei = 0, ee = RC.Elements.size(); ei != ee; ++ei) {
-        if (R == RC.Elements[ei]) {
-          if (FoundRC) return 0;  // In multiple RC's
+        if (R != RC.Elements[ei])
+          continue;
+
+        // If a register's classes have different types, return null.
+        if (FoundRC && RC.getValueTypes() != FoundRC->getValueTypes())
+          return 0;
+
+        // If this is the first class that contains the register,
+        // make a note of it and go on to the next class.
+        if (!FoundRC) {
+          FoundRC = &RC;
+          break;
+        }
+
+        std::vector<Record *> Elements(RC.Elements);
+        std::vector<Record *> FoundElements(FoundRC->Elements);
+        std::sort(Elements.begin(), Elements.end());
+        std::sort(FoundElements.begin(), FoundElements.end());
+
+        // Check to see if the previously found class that contains
+        // the register is a subclass of the current class. If so,
+        // prefer the superclass.
+        if (std::includes(Elements.begin(), Elements.end(),
+                          FoundElements.begin(), FoundElements.end())) {
           FoundRC = &RC;
           break;
         }
+
+        // Check to see if the previously found class that contains
+        // the register is a superclass of the current class. If so,
+        // prefer the superclass.
+        if (std::includes(FoundElements.begin(), FoundElements.end(),
+                          Elements.begin(), Elements.end()))
+          break;
+
+        // Multiple classes, and neither is a superclass of the other.
+        // Return null.
+        return 0;
       }
     }
     return FoundRC;

Modified: llvm/branches/Apple/Dib/utils/TableGen/DAGISelEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/utils/TableGen/DAGISelEmitter.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/utils/TableGen/DAGISelEmitter.cpp (original)
+++ llvm/branches/Apple/Dib/utils/TableGen/DAGISelEmitter.cpp Wed Apr 15 13:19:52 2009
@@ -918,6 +918,15 @@
                    getEnumName(N->getTypeNum(0)) + ");");
           NodeOps.push_back("Tmp" + utostr(ResNo));
           return NodeOps;
+        } else if (DI->getDef()->isSubClassOf("RegisterClass")) {
+          // Handle a reference to a register class. This is used
+          // in COPY_TO_SUBREG instructions.
+          emitCode("SDValue Tmp" + utostr(ResNo) +
+                   " = CurDAG->getTargetConstant(" +
+                   getQualifiedName(DI->getDef()) + "RegClassID, " +
+                   "MVT::i32);");
+          NodeOps.push_back("Tmp" + utostr(ResNo));
+          return NodeOps;
         }
       } else if (IntInit *II = dynamic_cast<IntInit*>(N->getLeafValue())) {
         unsigned ResNo = TmpNo++;

Modified: llvm/branches/Apple/Dib/utils/TableGen/InstrInfoEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/utils/TableGen/InstrInfoEmitter.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/utils/TableGen/InstrInfoEmitter.cpp (original)
+++ llvm/branches/Apple/Dib/utils/TableGen/InstrInfoEmitter.cpp Wed Apr 15 13:19:52 2009
@@ -340,7 +340,8 @@
         R->getName() != "EXTRACT_SUBREG" &&
         R->getName() != "INSERT_SUBREG" &&
         R->getName() != "IMPLICIT_DEF" &&
-        R->getName() != "SUBREG_TO_REG")
+        R->getName() != "SUBREG_TO_REG" &&
+        R->getName() != "COPY_TO_REGCLASS")
       throw R->getName() + " doesn't have a field named '" + 
             Val->getValue() + "'!";
     return;

Modified: llvm/branches/Apple/Dib/utils/TableGen/RegisterInfoEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/utils/TableGen/RegisterInfoEmitter.cpp?rev=69194&r1=69193&r2=69194&view=diff

==============================================================================
--- llvm/branches/Apple/Dib/utils/TableGen/RegisterInfoEmitter.cpp (original)
+++ llvm/branches/Apple/Dib/utils/TableGen/RegisterInfoEmitter.cpp Wed Apr 15 13:19:52 2009
@@ -240,8 +240,85 @@
          << RegisterClasses[i].getName() << "RegClass;\n";
          
     std::map<unsigned, std::set<unsigned> > SuperClassMap;
+    std::map<unsigned, std::set<unsigned> > SuperRegClassMap;
     OS << "\n";
 
+    // Emit the sub-register classes for each RegisterClass
+    for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
+      const CodeGenRegisterClass &RC = RegisterClasses[rc];
+
+      // Give the register class a legal C name if it's anonymous.
+      std::string Name = RC.TheDef->getName();
+
+      OS << "  // " << Name
+         << " Sub-register Classes...\n"
+         << "  static const TargetRegisterClass* const "
+         << Name << "SubRegClasses [] = {\n    ";
+
+      bool Empty = true;
+
+      for (unsigned subrc = 0, subrcMax = RC.SubRegClasses.size();
+            subrc != subrcMax; ++subrc) {
+        unsigned rc2 = 0, e2 = RegisterClasses.size();
+        for (; rc2 != e2; ++rc2) {
+          const CodeGenRegisterClass &RC2 =  RegisterClasses[rc2];
+          if (RC.SubRegClasses[subrc]->getName() == RC2.getName()) {
+            if (!Empty)
+              OS << ", ";
+            OS << "&" << getQualifiedName(RC2.TheDef) << "RegClass";
+            Empty = false;
+
+            std::map<unsigned, std::set<unsigned> >::iterator SCMI =
+              SuperRegClassMap.find(rc2);
+            if (SCMI == SuperRegClassMap.end()) {
+              SuperRegClassMap.insert(std::make_pair(rc2,
+                                                     std::set<unsigned>()));
+              SCMI = SuperRegClassMap.find(rc2);
+            }
+            SCMI->second.insert(rc);
+            break;
+          }
+        }
+        if (rc2 == e2)
+          throw "Register Class member '" +
+            RC.SubRegClasses[subrc]->getName() +
+            "' is not a valid RegisterClass!";
+      }
+
+      OS << (!Empty ? ", " : "") << "NULL";
+      OS << "\n  };\n\n";
+    }
+
+    // Emit the super-register classes for each RegisterClass
+    for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
+      const CodeGenRegisterClass &RC = RegisterClasses[rc];
+
+      // Give the register class a legal C name if it's anonymous.
+      std::string Name = RC.TheDef->getName();
+
+      OS << "  // " << Name
+         << " Super-register Classes...\n"
+         << "  static const TargetRegisterClass* const "
+         << Name << "SuperRegClasses [] = {\n    ";
+
+      bool Empty = true;
+      std::map<unsigned, std::set<unsigned> >::iterator I =
+        SuperRegClassMap.find(rc);
+      if (I != SuperRegClassMap.end()) {
+        for (std::set<unsigned>::iterator II = I->second.begin(),
+               EE = I->second.end(); II != EE; ++II) {
+          const CodeGenRegisterClass &RC2 = RegisterClasses[*II];
+          if (!Empty)
+            OS << ", ";
+          OS << "&" << getQualifiedName(RC2.TheDef) << "RegClass";
+          Empty = false;
+        }
+      }
+
+      OS << (!Empty ? ", " : "") << "NULL";
+      OS << "\n  };\n\n";
+    }
+
     // Emit the sub-classes array for each RegisterClass
     for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
       const CodeGenRegisterClass &RC = RegisterClasses[rc];
@@ -322,6 +399,8 @@
          << RC.getName() + "VTs" << ", "
          << RC.getName() + "Subclasses" << ", "
          << RC.getName() + "Superclasses" << ", "
+         << RC.getName() + "SubRegClasses" << ", "
+         << RC.getName() + "SuperRegClasses" << ", "
          << RC.SpillSize/8 << ", "
          << RC.SpillAlignment/8 << ", "
          << RC.CopyCost << ", "
@@ -462,6 +541,159 @@
   
   delete [] SubregHashTable;
 
+
+  // Print the SuperregHashTable, a simple quadratically probed
+  // hash table for determining if a register is a super-register
+  // of another register.
+  unsigned NumSupRegs = 0;
+  RegNo.clear();
+  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+    RegNo[Regs[i].TheDef] = i;
+    NumSupRegs += RegisterSuperRegs[Regs[i].TheDef].size();
+  }
+  
+  unsigned SuperregHashTableSize = 2 * NextPowerOf2(2 * NumSupRegs);
+  unsigned* SuperregHashTable = new unsigned[2 * SuperregHashTableSize];
+  std::fill(SuperregHashTable, SuperregHashTable + 2 * SuperregHashTableSize, ~0U);
+  
+  hashMisses = 0;
+  
+  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+    Record* R = Regs[i].TheDef;
+    for (std::set<Record*>::iterator I = RegisterSuperRegs[R].begin(),
+         E = RegisterSuperRegs[R].end(); I != E; ++I) {
+      Record* RJ = *I;
+      // We have to increase the indices of both registers by one when
+      // computing the hash because, in the generated code, there
+      // will be an extra empty slot at register 0.
+      size_t index = ((i+1) + (RegNo[RJ]+1) * 37) & (SuperregHashTableSize-1);
+      unsigned ProbeAmt = 2;
+      while (SuperregHashTable[index*2] != ~0U &&
+             SuperregHashTable[index*2+1] != ~0U) {
+        index = (index + ProbeAmt) & (SuperregHashTableSize-1);
+        ProbeAmt += 2;
+        
+        hashMisses++;
+      }
+      
+      SuperregHashTable[index*2] = i;
+      SuperregHashTable[index*2+1] = RegNo[RJ];
+    }
+  }
+  
+  OS << "\n\n  // Number of hash collisions: " << hashMisses << "\n";
+  
+  if (SuperregHashTableSize) {
+    std::string Namespace = Regs[0].TheDef->getValueAsString("Namespace");
+    
+    OS << "  const unsigned SuperregHashTable[] = { ";
+    for (unsigned i = 0; i < SuperregHashTableSize - 1; ++i) {
+      if (i != 0)
+        // Insert spaces for nice formatting.
+        OS << "                                       ";
+      
+      if (SuperregHashTable[2*i] != ~0U) {
+        OS << getQualifiedName(Regs[SuperregHashTable[2*i]].TheDef) << ", "
+           << getQualifiedName(Regs[SuperregHashTable[2*i+1]].TheDef) << ", \n";
+      } else {
+        OS << Namespace << "::NoRegister, " << Namespace << "::NoRegister, \n";
+      }
+    }
+    
+    unsigned Idx = SuperregHashTableSize*2-2;
+    if (SuperregHashTable[Idx] != ~0U) {
+      OS << "                                       "
+         << getQualifiedName(Regs[SuperregHashTable[Idx]].TheDef) << ", "
+         << getQualifiedName(Regs[SuperregHashTable[Idx+1]].TheDef) << " };\n";
+    } else {
+      OS << Namespace << "::NoRegister, " << Namespace << "::NoRegister };\n";
+    }
+    
+    OS << "  const unsigned SuperregHashTableSize = "
+       << SuperregHashTableSize << ";\n";
+  } else {
+    OS << "  const unsigned SuperregHashTable[] = { ~0U, ~0U };\n"
+       << "  const unsigned SuperregHashTableSize = 1;\n";
+  }
+  
+  delete [] SuperregHashTable;
+
+
+  // Print the AliasHashTable, a simple quadratically probed
+  // hash table for determining if a register aliases another register.
+  unsigned NumAliases = 0;
+  RegNo.clear();
+  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+    RegNo[Regs[i].TheDef] = i;
+    NumAliases += RegisterAliases[Regs[i].TheDef].size();
+  }
+  
+  unsigned AliasesHashTableSize = 2 * NextPowerOf2(2 * NumAliases);
+  unsigned* AliasesHashTable = new unsigned[2 * AliasesHashTableSize];
+  std::fill(AliasesHashTable, AliasesHashTable + 2 * AliasesHashTableSize, ~0U);
+  
+  hashMisses = 0;
+  
+  for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+    Record* R = Regs[i].TheDef;
+    for (std::set<Record*>::iterator I = RegisterAliases[R].begin(),
+         E = RegisterAliases[R].end(); I != E; ++I) {
+      Record* RJ = *I;
+      // We have to increase the indices of both registers by one when
+      // computing the hash because, in the generated code, there
+      // will be an extra empty slot at register 0.
+      size_t index = ((i+1) + (RegNo[RJ]+1) * 37) & (AliasesHashTableSize-1);
+      unsigned ProbeAmt = 2;
+      while (AliasesHashTable[index*2] != ~0U &&
+             AliasesHashTable[index*2+1] != ~0U) {
+        index = (index + ProbeAmt) & (AliasesHashTableSize-1);
+        ProbeAmt += 2;
+        
+        hashMisses++;
+      }
+      
+      AliasesHashTable[index*2] = i;
+      AliasesHashTable[index*2+1] = RegNo[RJ];
+    }
+  }
+  
+  OS << "\n\n  // Number of hash collisions: " << hashMisses << "\n";
+  
+  if (AliasesHashTableSize) {
+    std::string Namespace = Regs[0].TheDef->getValueAsString("Namespace");
+    
+    OS << "  const unsigned AliasesHashTable[] = { ";
+    for (unsigned i = 0; i < AliasesHashTableSize - 1; ++i) {
+      if (i != 0)
+        // Insert spaces for nice formatting.
+        OS << "                                       ";
+      
+      if (AliasesHashTable[2*i] != ~0U) {
+        OS << getQualifiedName(Regs[AliasesHashTable[2*i]].TheDef) << ", "
+           << getQualifiedName(Regs[AliasesHashTable[2*i+1]].TheDef) << ", \n";
+      } else {
+        OS << Namespace << "::NoRegister, " << Namespace << "::NoRegister, \n";
+      }
+    }
+    
+    unsigned Idx = AliasesHashTableSize*2-2;
+    if (AliasesHashTable[Idx] != ~0U) {
+      OS << "                                       "
+         << getQualifiedName(Regs[AliasesHashTable[Idx]].TheDef) << ", "
+         << getQualifiedName(Regs[AliasesHashTable[Idx+1]].TheDef) << " };\n";
+    } else {
+      OS << Namespace << "::NoRegister, " << Namespace << "::NoRegister };\n";
+    }
+    
+    OS << "  const unsigned AliasesHashTableSize = "
+       << AliasesHashTableSize << ";\n";
+  } else {
+    OS << "  const unsigned AliasesHashTable[] = { ~0U, ~0U };\n"
+       << "  const unsigned AliasesHashTableSize = 1;\n";
+  }
+  
+  delete [] AliasesHashTable;
+
   if (!RegisterAliases.empty())
     OS << "\n\n  // Register Alias Sets...\n";
 
@@ -598,7 +830,9 @@
      << "  : TargetRegisterInfo(RegisterDescriptors, " << Registers.size()+1
      << ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n "
      << "                 CallFrameSetupOpcode, CallFrameDestroyOpcode,\n"
-     << "                 SubregHashTable, SubregHashTableSize) {\n"
+     << "                 SubregHashTable, SubregHashTableSize,\n"
+     << "                 SuperregHashTable, SuperregHashTableSize,\n"
+     << "                 AliasesHashTable, AliasesHashTableSize) {\n"
      << "}\n\n";
 
   // Collect all information about dwarf register numbers





More information about the llvm-branch-commits mailing list