[llvm-branch-commits] [llvm-branch] r108038 - in /llvm/branches/wendling/eh: ./ docs/ include/llvm/ADT/ include/llvm/Analysis/ include/llvm/CodeGen/ include/llvm/MC/ include/llvm/Support/ include/llvm/Target/ lib/Analysis/ lib/Analysis/IPA/ lib/Bitcode/Reader/ lib/CodeGen/ lib/CodeGen/AsmPrinter/ lib/CodeGen/SelectionDAG/ lib/MC/ lib/MC/MCParser/ lib/Target/ lib/Target/ARM/ lib/Target/ARM/AsmPrinter/ lib/Target/CellSPU/ lib/Target/X86/ lib/Target/X86/AsmPrinter/ lib/Transforms/IPO/ lib/Transforms/InstCombine/ lib/Transforms/S...

Bill Wendling isanbard at gmail.com
Fri Jul 9 22:06:30 PDT 2010


Author: void
Date: Sat Jul 10 00:06:30 2010
New Revision: 108038

URL: http://llvm.org/viewvc/llvm-project?rev=108038&view=rev
Log:
Merge to ToT.

Added:
    llvm/branches/wendling/eh/test/CodeGen/X86/leaf-fp-elim.ll
      - copied unchanged from r108037, llvm/trunk/test/CodeGen/X86/leaf-fp-elim.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/licm-nested.ll
      - copied unchanged from r108037, llvm/trunk/test/CodeGen/X86/licm-nested.ll
    llvm/branches/wendling/eh/test/FrontendC/2010-07-08-DeclDebugLineNo.c
      - copied unchanged from r108037, llvm/trunk/test/FrontendC/2010-07-08-DeclDebugLineNo.c
Modified:
    llvm/branches/wendling/eh/   (props changed)
    llvm/branches/wendling/eh/docs/GettingStarted.html
    llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html
    llvm/branches/wendling/eh/include/llvm/ADT/PostOrderIterator.h
    llvm/branches/wendling/eh/include/llvm/Analysis/DominatorInternals.h
    llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h
    llvm/branches/wendling/eh/include/llvm/Analysis/IntervalIterator.h
    llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterScavenging.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/SlotIndexes.h
    llvm/branches/wendling/eh/include/llvm/MC/MCDirectives.h
    llvm/branches/wendling/eh/include/llvm/Support/CFG.h
    llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h
    llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h
    llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp
    llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp
    llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp
    llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp
    llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp
    llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp
    llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
    llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h
    llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp
    llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp
    llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp
    llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp
    llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp
    llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp
    llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp
    llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp
    llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp
    llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp
    llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp
    llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp
    llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp
    llvm/branches/wendling/eh/lib/CodeGen/StackSlotColoring.cpp
    llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp
    llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp
    llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp
    llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp
    llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp
    llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td
    llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp
    llvm/branches/wendling/eh/lib/Target/CellSPU/SPUCallingConv.td
    llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.cpp
    llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.h
    llvm/branches/wendling/eh/lib/Target/README.txt
    llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
    llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h
    llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
    llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h
    llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86AsmBackend.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86CodeEmitter.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86FloatingPoint.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrBuilder.h
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.td
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td
    llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td
    llvm/branches/wendling/eh/lib/Transforms/IPO/GlobalOpt.cpp
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineSelect.cpp
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstructionCombining.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/GVN.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/BreakCriticalEdges.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/LCSSA.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/LoopSimplify.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/SimplifyCFG.cpp
    llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/alloca.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/sse-minmax.ll
    llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s
    llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s
    llvm/branches/wendling/eh/test/Transforms/InstCombine/select.ll
    llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp

Propchange: llvm/branches/wendling/eh/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Jul 10 00:06:30 2010
@@ -1 +1 @@
-/llvm/trunk:104459-107860
+/llvm/trunk:104459-108037

Modified: llvm/branches/wendling/eh/docs/GettingStarted.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/GettingStarted.html?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/GettingStarted.html (original)
+++ llvm/branches/wendling/eh/docs/GettingStarted.html Sat Jul 10 00:06:30 2010
@@ -1137,13 +1137,13 @@
 named after the build type:</p>
 
 <dl>
-  <dt>Debug Builds
+  <dt>Debug Builds with assertions enabled (the default)
   <dd>
   <dl>
     <dt>Tools
-    <dd><tt><i>OBJ_ROOT</i>/Debug/bin</tt>
+    <dd><tt><i>OBJ_ROOT</i>/Debug+Asserts/bin</tt>
     <dt>Libraries
-    <dd><tt><i>OBJ_ROOT</i>/Debug/lib</tt>
+    <dd><tt><i>OBJ_ROOT</i>/Debug+Asserts/lib</tt>
   </dl>
   <br><br>
 

Modified: llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html (original)
+++ llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html Sat Jul 10 00:06:30 2010
@@ -195,7 +195,7 @@
 
 <p>This makefile specifies that all of the <tt>.cpp</tt> files in the current
 directory are to be compiled and linked together into a
-<tt>Debug/lib/Hello.so</tt> shared object that can be dynamically loaded by
+<tt>Debug+Asserts/lib/Hello.so</tt> shared object that can be dynamically loaded by
 the <tt>opt</tt> or <tt>bugpoint</tt> tools via their <tt>-load</tt> options.  
 If your operating system uses a suffix other than .so (such as windows or 
 Mac OS/X), the appropriate extension will be used.</p>
@@ -332,7 +332,7 @@
 
 <p>Now that it's all together, compile the file with a simple "<tt>gmake</tt>"
 command in the local directory and you should get a new
-"<tt>Debug/lib/Hello.so</tt> file.  Note that everything in this file is
+"<tt>Debug+Asserts/lib/Hello.so</tt> file.  Note that everything in this file is
 contained in an anonymous namespace: this reflects the fact that passes are self
 contained units that do not need external interfaces (although they can have
 them) to be useful.</p>
@@ -358,7 +358,7 @@
 work):</p>
 
 <div class="doc_code"><pre>
-$ opt -load ../../../Debug/lib/Hello.so -hello < hello.bc > /dev/null
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -hello < hello.bc > /dev/null
 Hello: __main
 Hello: puts
 Hello: main
@@ -375,7 +375,7 @@
 <tt>opt</tt> with the <tt>-help</tt> option:</p>
 
 <div class="doc_code"><pre>
-$ opt -load ../../../Debug/lib/Hello.so -help
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -help
 OVERVIEW: llvm .bc -> .bc modular optimizer
 
 USAGE: opt [options] <input bitcode>
@@ -403,7 +403,7 @@
 example:</p>
 
 <div class="doc_code"><pre>
-$ opt -load ../../../Debug/lib/Hello.so -hello -time-passes < hello.bc > /dev/null
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -hello -time-passes < hello.bc > /dev/null
 Hello: __main
 Hello: puts
 Hello: main
@@ -1418,7 +1418,7 @@
 Lets try it out with the <tt>gcse</tt> and <tt>licm</tt> passes:</p>
 
 <div class="doc_code"><pre>
-$ opt -load ../../../Debug/lib/Hello.so -gcse -licm --debug-pass=Structure < hello.bc > /dev/null
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -gcse -licm --debug-pass=Structure < hello.bc > /dev/null
 Module Pass Manager
   Function Pass Manager
     Dominator Set Construction
@@ -1455,7 +1455,7 @@
 World</a> pass in between the two passes:</p>
 
 <div class="doc_code"><pre>
-$ opt -load ../../../Debug/lib/Hello.so -gcse -hello -licm --debug-pass=Structure < hello.bc > /dev/null
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -gcse -hello -licm --debug-pass=Structure < hello.bc > /dev/null
 Module Pass Manager
   Function Pass Manager
     Dominator Set Construction
@@ -1496,7 +1496,7 @@
 <p>Now when we run our pass, we get this output:</p>
 
 <div class="doc_code"><pre>
-$ opt -load ../../../Debug/lib/Hello.so -gcse -hello -licm --debug-pass=Structure < hello.bc > /dev/null
+$ opt -load ../../../Debug+Asserts/lib/Hello.so -gcse -hello -licm --debug-pass=Structure < hello.bc > /dev/null
 Pass Arguments:  -gcse -hello -licm
 Module Pass Manager
   Function Pass Manager
@@ -1737,8 +1737,8 @@
 <div class="doc_code"><pre>
 (gdb) <b>break llvm::PassManager::run</b>
 Breakpoint 1 at 0x2413bc: file Pass.cpp, line 70.
-(gdb) <b>run test.bc -load $(LLVMTOP)/llvm/Debug/lib/[libname].so -[passoption]</b>
-Starting program: opt test.bc -load $(LLVMTOP)/llvm/Debug/lib/[libname].so -[passoption]
+(gdb) <b>run test.bc -load $(LLVMTOP)/llvm/Debug+Asserts/lib/[libname].so -[passoption]</b>
+Starting program: opt test.bc -load $(LLVMTOP)/llvm/Debug+Asserts/lib/[libname].so -[passoption]
 Breakpoint 1, PassManager::run (this=0xffbef174, M=@0x70b298) at Pass.cpp:70
 70      bool PassManager::run(Module &M) { return PM->run(M); }
 (gdb)

Modified: llvm/branches/wendling/eh/include/llvm/ADT/PostOrderIterator.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/ADT/PostOrderIterator.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/ADT/PostOrderIterator.h (original)
+++ llvm/branches/wendling/eh/include/llvm/ADT/PostOrderIterator.h Sat Jul 10 00:06:30 2010
@@ -19,7 +19,6 @@
 #include "llvm/ADT/GraphTraits.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include <set>
-#include <stack>
 #include <vector>
 
 namespace llvm {
@@ -52,21 +51,21 @@
 
   // VisitStack - Used to maintain the ordering.  Top = current block
   // First element is basic block pointer, second is the 'next child' to visit
-  std::stack<std::pair<NodeType *, ChildItTy> > VisitStack;
+  std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
 
   void traverseChild() {
-    while (VisitStack.top().second != GT::child_end(VisitStack.top().first)) {
-      NodeType *BB = *VisitStack.top().second++;
+    while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
+      NodeType *BB = *VisitStack.back().second++;
       if (!this->Visited.count(BB)) {  // If the block is not visited...
         this->Visited.insert(BB);
-        VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+        VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
       }
     }
   }
 
   inline po_iterator(NodeType *BB) {
     this->Visited.insert(BB);
-    VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+    VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
     traverseChild();
   }
   inline po_iterator() {} // End is when stack is empty.
@@ -75,7 +74,7 @@
     po_iterator_storage<SetType, ExtStorage>(S) {
     if(!S.count(BB)) {
       this->Visited.insert(BB);
-      VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+      VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
       traverseChild();
     }
   }
@@ -102,7 +101,7 @@
   inline bool operator!=(const _Self& x) const { return !operator==(x); }
 
   inline pointer operator*() const {
-    return VisitStack.top().first;
+    return VisitStack.back().first;
   }
 
   // This is a nonstandard operator-> that dereferences the pointer an extra
@@ -112,7 +111,7 @@
   inline NodeType *operator->() const { return operator*(); }
 
   inline _Self& operator++() {   // Preincrement
-    VisitStack.pop();
+    VisitStack.pop_back();
     if (!VisitStack.empty())
       traverseChild();
     return *this;

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/DominatorInternals.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/DominatorInternals.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/DominatorInternals.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/DominatorInternals.h Sat Jul 10 00:06:30 2010
@@ -265,14 +265,17 @@
 
     // initialize the semi dominator to point to the parent node
     WInfo.Semi = WInfo.Parent;
-    for (typename GraphTraits<Inverse<NodeT> >::ChildIteratorType CI =
-         GraphTraits<Inverse<NodeT> >::child_begin(W),
-         E = GraphTraits<Inverse<NodeT> >::child_end(W); CI != E; ++CI)
-      if (DT.Info.count(*CI)) {  // Only if this predecessor is reachable!
-        unsigned SemiU = DT.Info[Eval<GraphT>(DT, *CI)].Semi;
+    typedef GraphTraits<Inverse<NodeT> > InvTraits;
+    for (typename InvTraits::ChildIteratorType CI =
+         InvTraits::child_begin(W),
+         E = InvTraits::child_end(W); CI != E; ++CI) {
+      typename InvTraits::NodeType *N = *CI;
+      if (DT.Info.count(N)) {  // Only if this predecessor is reachable!
+        unsigned SemiU = DT.Info[Eval<GraphT>(DT, N)].Semi;
         if (SemiU < WInfo.Semi)
           WInfo.Semi = SemiU;
       }
+    }
 
     DT.Info[DT.Vertex[WInfo.Semi]].Bucket.push_back(W);
 

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h Sat Jul 10 00:06:30 2010
@@ -246,22 +246,25 @@
     typename GraphT::NodeType* NewBBSucc = *GraphT::child_begin(NewBB);
 
     std::vector<typename GraphT::NodeType*> PredBlocks;
-    for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI =
-         GraphTraits<Inverse<N> >::child_begin(NewBB),
-         PE = GraphTraits<Inverse<N> >::child_end(NewBB); PI != PE; ++PI)
+    typedef GraphTraits<Inverse<N> > InvTraits;
+    for (typename InvTraits::ChildIteratorType PI =
+         InvTraits::child_begin(NewBB),
+         PE = InvTraits::child_end(NewBB); PI != PE; ++PI)
       PredBlocks.push_back(*PI);
 
-    assert(!PredBlocks.empty() && "No predblocks??");
+    assert(!PredBlocks.empty() && "No predblocks?");
 
     bool NewBBDominatesNewBBSucc = true;
-    for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI =
-         GraphTraits<Inverse<N> >::child_begin(NewBBSucc),
-         E = GraphTraits<Inverse<N> >::child_end(NewBBSucc); PI != E; ++PI)
-      if (*PI != NewBB && !DT.dominates(NewBBSucc, *PI) &&
-          DT.isReachableFromEntry(*PI)) {
+    for (typename InvTraits::ChildIteratorType PI =
+         InvTraits::child_begin(NewBBSucc),
+         E = InvTraits::child_end(NewBBSucc); PI != E; ++PI) {
+      typename InvTraits::NodeType *ND = *PI;
+      if (ND != NewBB && !DT.dominates(NewBBSucc, ND) &&
+          DT.isReachableFromEntry(ND)) {
         NewBBDominatesNewBBSucc = false;
         break;
       }
+    }
 
     // Find NewBB's immediate dominator and create new dominator tree node for
     // NewBB.

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/IntervalIterator.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/IntervalIterator.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/IntervalIterator.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/IntervalIterator.h Sat Jul 10 00:06:30 2010
@@ -36,9 +36,9 @@
 #include "llvm/Analysis/IntervalPartition.h"
 #include "llvm/Function.h"
 #include "llvm/Support/CFG.h"
-#include <stack>
-#include <set>
 #include <algorithm>
+#include <set>
+#include <vector>
 
 namespace llvm {
 
@@ -88,7 +88,7 @@
 template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy*>,
          class IGT = GraphTraits<Inverse<NodeTy*> > >
 class IntervalIterator {
-  std::stack<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
+  std::vector<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
   std::set<BasicBlock*> Visited;
   OrigContainer_t *OrigContainer;
   bool IOwnMem;     // If True, delete intervals when done with them
@@ -116,15 +116,15 @@
     if (IOwnMem)
       while (!IntStack.empty()) {
         delete operator*();
-        IntStack.pop();
+        IntStack.pop_back();
       }
   }
 
   inline bool operator==(const _Self& x) const { return IntStack == x.IntStack;}
   inline bool operator!=(const _Self& x) const { return !operator==(x); }
 
-  inline const Interval *operator*() const { return IntStack.top().first; }
-  inline       Interval *operator*()       { return IntStack.top().first; }
+  inline const Interval *operator*() const { return IntStack.back().first; }
+  inline       Interval *operator*()       { return IntStack.back().first; }
   inline const Interval *operator->() const { return operator*(); }
   inline       Interval *operator->()       { return operator*(); }
 
@@ -133,8 +133,8 @@
     do {
       // All of the intervals on the stack have been visited.  Try visiting
       // their successors now.
-      Interval::succ_iterator &SuccIt = IntStack.top().second,
-                                EndIt = succ_end(IntStack.top().first);
+      Interval::succ_iterator &SuccIt = IntStack.back().second,
+                                EndIt = succ_end(IntStack.back().first);
       while (SuccIt != EndIt) {                 // Loop over all interval succs
         bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt));
         ++SuccIt;                               // Increment iterator
@@ -142,10 +142,10 @@
       }
 
       // Free interval memory... if necessary
-      if (IOwnMem) delete IntStack.top().first;
+      if (IOwnMem) delete IntStack.back().first;
 
       // We ran out of successors for this interval... pop off the stack
-      IntStack.pop();
+      IntStack.pop_back();
     } while (!IntStack.empty());
 
     return *this;
@@ -175,7 +175,7 @@
            E = GT::child_end(Node); I != E; ++I)
       ProcessNode(Int, getSourceGraphNode(OrigContainer, *I));
 
-    IntStack.push(std::make_pair(Int, succ_begin(Int)));
+    IntStack.push_back(std::make_pair(Int, succ_begin(Int)));
     return true;
   }
 

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h Sat Jul 10 00:06:30 2010
@@ -285,12 +285,14 @@
     typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
     for (typename InvBlockTraits::ChildIteratorType PI =
          InvBlockTraits::child_begin(Header),
-         PE = InvBlockTraits::child_end(Header); PI != PE; ++PI)
-      if (!contains(*PI)) {     // If the block is not in the loop...
-        if (Out && Out != *PI)
+         PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
+      typename InvBlockTraits::NodeType *N = *PI;
+      if (!contains(N)) {     // If the block is not in the loop...
+        if (Out && Out != N)
           return 0;             // Multiple predecessors outside the loop
-        Out = *PI;
+        Out = N;
       }
+    }
 
     // Make sure there is only one exit out of the preheader.
     assert(Out && "Header of loop has no predecessors from outside loop?");
@@ -307,11 +309,13 @@
     typename InvBlockTraits::ChildIteratorType PE =
                                               InvBlockTraits::child_end(Header);
     BlockT *Latch = 0;
-    for (; PI != PE; ++PI)
-      if (contains(*PI)) {
+    for (; PI != PE; ++PI) {
+      typename InvBlockTraits::NodeType *N = *PI;
+      if (contains(N)) {
         if (Latch) return 0;
-        Latch = *PI;
+        Latch = N;
       }
+    }
 
     return Latch;
   }
@@ -423,10 +427,11 @@
       for (typename InvBlockTraits::ChildIteratorType PI =
            InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
            PI != PE; ++PI) {
-        if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *PI))
+        typename InvBlockTraits::NodeType *N = *PI;
+        if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
           HasInsideLoopPreds = true;
         else
-          OutsideLoopPreds.push_back(*PI);
+          OutsideLoopPreds.push_back(N);
       }
 
       if (BB == getHeader()) {
@@ -757,9 +762,11 @@
     typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
     for (typename InvBlockTraits::ChildIteratorType I =
          InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB);
-         I != E; ++I)
-      if (DT.dominates(BB, *I))   // If BB dominates its predecessor...
-        TodoStack.push_back(*I);
+         I != E; ++I) {
+      typename InvBlockTraits::NodeType *N = *I;
+      if (DT.dominates(BB, N))   // If BB dominates its predecessor...
+          TodoStack.push_back(N);
+    }
 
     if (TodoStack.empty()) return 0;  // No backedges to this block...
 

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h Sat Jul 10 00:06:30 2010
@@ -215,9 +215,6 @@
   bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
   bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
   bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
-  bool isExtractSubreg() const {
-    return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
-  }
   bool isInsertSubreg() const {
     return getOpcode() == TargetOpcode::INSERT_SUBREG;
   }
@@ -234,7 +231,13 @@
   /// isCopyLike - Return true if the instruction behaves like a copy.
   /// This does not include native copy instructions.
   bool isCopyLike() const {
-    return isCopy() || isSubregToReg() || isExtractSubreg() || isInsertSubreg();
+    return isCopy() || isSubregToReg();
+  }
+
+  /// isIdentityCopy - Return true is the instruction is an identity copy.
+  bool isIdentityCopy() const {
+    return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
+      getOperand(0).getSubReg() == getOperand(1).getSubReg();
   }
 
   /// readsRegister - Return true if the MachineInstr reads the specified

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterScavenging.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterScavenging.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterScavenging.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterScavenging.h Sat Jul 10 00:06:30 2010
@@ -106,16 +106,6 @@
   /// Return 0 if none is found.
   unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
 
-  /// findSurvivorReg - Return the candidate register that is unused for the
-  /// longest after StartMI. UseMI is set to the instruction where the search
-  /// stopped.
-  ///
-  /// No more than InstrLimit instructions are inspected.
-  unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
-                           BitVector &Candidates,
-                           unsigned InstrLimit,
-                           MachineBasicBlock::iterator &UseMI);
-
   /// setScavengingFrameIndex / getScavengingFrameIndex - accessor and setter of
   /// ScavengingFrameIndex.
   void setScavengingFrameIndex(int FI) { ScavengingFrameIndex = FI; }
@@ -161,6 +151,16 @@
   /// Add Reg and its aliases to BV.
   void addRegWithAliases(BitVector &BV, unsigned Reg);
 
+  /// findSurvivorReg - Return the candidate register that is unused for the
+  /// longest after StartMI. UseMI is set to the instruction where the search
+  /// stopped.
+  ///
+  /// No more than InstrLimit instructions are inspected.
+  unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
+                           BitVector &Candidates,
+                           unsigned InstrLimit,
+                           MachineBasicBlock::iterator &UseMI);
+
 };
 
 } // End llvm namespace

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/SlotIndexes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/SlotIndexes.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/SlotIndexes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/SlotIndexes.h Sat Jul 10 00:06:30 2010
@@ -23,6 +23,7 @@
 #define LLVM_CODEGEN_SLOTINDEXES_H
 
 #include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/ADT/PointerIntPair.h"
 #include "llvm/ADT/SmallVector.h"
@@ -762,6 +763,47 @@
       mi2iMap.insert(std::make_pair(newMI, replaceBaseIndex));
     }
 
+    /// Add the given MachineBasicBlock into the maps.
+    void insertMBBInMaps(MachineBasicBlock *mbb) {
+      MachineFunction::iterator nextMBB =
+        llvm::next(MachineFunction::iterator(mbb));
+      IndexListEntry *startEntry = createEntry(0, 0);
+      IndexListEntry *terminatorEntry = createEntry(0, 0); 
+      IndexListEntry *nextEntry = 0;
+
+      if (nextMBB == mbb->getParent()->end()) {
+        nextEntry = getTail();
+      } else {
+        nextEntry = &getMBBStartIdx(nextMBB).entry();
+      }
+
+      insert(nextEntry, startEntry);
+      insert(nextEntry, terminatorEntry);
+
+      SlotIndex startIdx(startEntry, SlotIndex::LOAD);
+      SlotIndex terminatorIdx(terminatorEntry, SlotIndex::PHI_BIT);
+      SlotIndex endIdx(nextEntry, SlotIndex::LOAD);
+
+      terminatorGaps.insert(
+        std::make_pair(mbb, terminatorIdx));
+
+      mbb2IdxMap.insert(
+        std::make_pair(mbb, std::make_pair(startIdx, endIdx)));
+
+      idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
+
+      if (MachineFunction::iterator(mbb) != mbb->getParent()->begin()) {
+        // Have to update the end index of the previous block.
+        MachineBasicBlock *priorMBB =
+          llvm::prior(MachineFunction::iterator(mbb));
+        mbb2IdxMap[priorMBB].second = startIdx;
+      }
+
+      renumberIndexes();
+      std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+
+    }
+
   };
 
 

Modified: llvm/branches/wendling/eh/include/llvm/MC/MCDirectives.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/MC/MCDirectives.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/MC/MCDirectives.h (original)
+++ llvm/branches/wendling/eh/include/llvm/MC/MCDirectives.h Sat Jul 10 00:06:30 2010
@@ -38,7 +38,8 @@
   MCSA_Reference,           ///< .reference (MachO)
   MCSA_Weak,                ///< .weak
   MCSA_WeakDefinition,      ///< .weak_definition (MachO)
-  MCSA_WeakReference        ///< .weak_reference (MachO)
+  MCSA_WeakReference,       ///< .weak_reference (MachO)
+  MCSA_WeakDefAutoPrivate   ///< .weak_def_can_be_hidden (MachO)
 };
 
 enum MCAssemblerFlag {

Modified: llvm/branches/wendling/eh/include/llvm/Support/CFG.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/CFG.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/CFG.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/CFG.h Sat Jul 10 00:06:30 2010
@@ -53,7 +53,7 @@
     assert(!It.atEnd() && "pred_iterator out of range!");
     return cast<TerminatorInst>(*It)->getParent();
   }
-  inline pointer *operator->() const { return &(operator*()); }
+  inline pointer *operator->() const { return &operator*(); }
 
   inline Self& operator++() {   // Preincrement
     assert(!It.atEnd() && "pred_iterator out of range!");

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h Sat Jul 10 00:06:30 2010
@@ -122,10 +122,6 @@
         SrcReg == DstReg)
       return true;
 
-    if (MI.getOpcode() == TargetOpcode::EXTRACT_SUBREG &&
-        MI.getOperand(0).getReg() == MI.getOperand(1).getReg())
-    return true;
-
     if ((MI.getOpcode() == TargetOpcode::INSERT_SUBREG ||
          MI.getOpcode() == TargetOpcode::SUBREG_TO_REG) &&
         MI.getOperand(0).getReg() == MI.getOperand(2).getReg())
@@ -442,19 +438,17 @@
   /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
   /// slot into the specified machine instruction for the specified operand(s).
   /// If this is possible, a new instruction is returned with the specified
-  /// operand folded, otherwise NULL is returned. The client is responsible for
-  /// removing the old instruction and adding the new one in the instruction
-  /// stream.
-  MachineInstr* foldMemoryOperand(MachineFunction &MF,
-                                  MachineInstr* MI,
+  /// operand folded, otherwise NULL is returned.
+  /// The new instruction is inserted before MI, and the client is responsible
+  /// for removing the old instruction.
+  MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
                                   const SmallVectorImpl<unsigned> &Ops,
                                   int FrameIndex) const;
 
   /// foldMemoryOperand - Same as the previous version except it allows folding
   /// of any load and store from / to any address, not just from a specific
   /// stack slot.
-  MachineInstr* foldMemoryOperand(MachineFunction &MF,
-                                  MachineInstr* MI,
+  MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
                                   const SmallVectorImpl<unsigned> &Ops,
                                   MachineInstr* LoadMI) const;
 
@@ -484,9 +478,7 @@
   /// folding is possible.
   virtual
   bool canFoldMemoryOperand(const MachineInstr *MI,
-                            const SmallVectorImpl<unsigned> &Ops) const {
-    return false;
-  }
+                            const SmallVectorImpl<unsigned> &Ops) const =0;
 
   /// unfoldMemoryOperand - Separate a single instruction which folded a load or
   /// a store or a load and a store into two or more instruction. If this is
@@ -649,6 +641,8 @@
                                            bool NewMI = false) const;
   virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
                                      unsigned &SrcOpIdx2) const;
+  virtual bool canFoldMemoryOperand(const MachineInstr *MI,
+                                    const SmallVectorImpl<unsigned> &Ops) const;
   virtual bool PredicateInstruction(MachineInstr *MI,
                             const SmallVectorImpl<MachineOperand> &Pred) const;
   virtual void reMaterialize(MachineBasicBlock &MBB,

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h Sat Jul 10 00:06:30 2010
@@ -115,6 +115,11 @@
     return RegSet.count(Reg);
   }
 
+  /// contains - Return true if both registers are in this class.
+  bool contains(unsigned Reg1, unsigned Reg2) const {
+    return contains(Reg1) && contains(Reg2);
+  }
+
   /// hasType - return true if this TargetRegisterClass has the ValueType vt.
   ///
   bool hasType(EVT vt) const {

Modified: llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp Sat Jul 10 00:06:30 2010
@@ -126,13 +126,15 @@
     }
 
     // Loop over all of the users of the function, looking for non-call uses.
-    for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I)
-      if ((!isa<CallInst>(I) && !isa<InvokeInst>(I))
-          || !CallSite(cast<Instruction>(I)).isCallee(I)) {
+    for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I){
+      User *U = *I;
+      if ((!isa<CallInst>(U) && !isa<InvokeInst>(U))
+          || !CallSite(cast<Instruction>(U)).isCallee(I)) {
         // Not a call, or being used as a parameter rather than as the callee.
         ExternalCallingNode->addCalledFunction(CallSite(), Node);
         break;
       }
+    }
 
     // If this function is not defined in this translation unit, it could call
     // anything.

Modified: llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp Sat Jul 10 00:06:30 2010
@@ -233,33 +233,34 @@
                                          GlobalValue *OkayStoreDest) {
   if (!V->getType()->isPointerTy()) return true;
 
-  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
-    if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+  for (Value::use_iterator UI = V->use_begin(), E=V->use_end(); UI != E; ++UI) {
+    User *U = *UI;
+    if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
       Readers.push_back(LI->getParent()->getParent());
-    } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+    } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
       if (V == SI->getOperand(1)) {
         Writers.push_back(SI->getParent()->getParent());
       } else if (SI->getOperand(1) != OkayStoreDest) {
         return true;  // Storing the pointer
       }
-    } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
+    } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
       if (AnalyzeUsesOfPointer(GEP, Readers, Writers)) return true;
-    } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
+    } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
       if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))
         return true;
-    } else if (isFreeCall(*UI)) {
-      Writers.push_back(cast<Instruction>(*UI)->getParent()->getParent());
-    } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
+    } else if (isFreeCall(U)) {
+      Writers.push_back(cast<Instruction>(U)->getParent()->getParent());
+    } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
       // Make sure that this is just the function being called, not that it is
       // passing into the function.
       for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
         if (CI->getArgOperand(i) == V) return true;
-    } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
+    } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
       // Make sure that this is just the function being called, not that it is
       // passing into the function.
       for (unsigned i = 0, e = II->getNumArgOperands(); i != e; ++i)
         if (II->getArgOperand(i) == V) return true;
-    } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
+    } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
       if (CE->getOpcode() == Instruction::GetElementPtr ||
           CE->getOpcode() == Instruction::BitCast) {
         if (AnalyzeUsesOfPointer(CE, Readers, Writers))
@@ -267,12 +268,14 @@
       } else {
         return true;
       }
-    } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(*UI)) {
+    } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
       if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
         return true;  // Allow comparison against null.
     } else {
       return true;
     }
+  }
+
   return false;
 }
 
@@ -291,7 +294,8 @@
   // Walk the user list of the global.  If we find anything other than a direct
   // load or store, bail out.
   for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
-    if (LoadInst *LI = dyn_cast<LoadInst>(*I)) {
+    User *U = *I;
+    if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
       // The pointer loaded from the global can only be used in simple ways:
       // we allow addressing of it and loading storing to it.  We do *not* allow
       // storing the loaded pointer somewhere else or passing to a function.
@@ -299,7 +303,7 @@
       if (AnalyzeUsesOfPointer(LI, ReadersWriters, ReadersWriters))
         return false;  // Loaded pointer escapes.
       // TODO: Could try some IP mod/ref of the loaded pointer.
-    } else if (StoreInst *SI = dyn_cast<StoreInst>(*I)) {
+    } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
       // Storing the global itself.
       if (SI->getOperand(0) == GV) return false;
 

Modified: llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp Sat Jul 10 00:06:30 2010
@@ -266,15 +266,16 @@
 bool Loop::isLCSSAForm(DominatorTree &DT) const {
   // Sort the blocks vector so that we can use binary search to do quick
   // lookups.
-  SmallPtrSet<BasicBlock *, 16> LoopBBs(block_begin(), block_end());
+  SmallPtrSet<BasicBlock*, 16> LoopBBs(block_begin(), block_end());
 
   for (block_iterator BI = block_begin(), E = block_end(); BI != E; ++BI) {
     BasicBlock *BB = *BI;
     for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I)
       for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
            ++UI) {
-        BasicBlock *UserBB = cast<Instruction>(*UI)->getParent();
-        if (PHINode *P = dyn_cast<PHINode>(*UI))
+        User *U = *UI;
+        BasicBlock *UserBB = cast<Instruction>(U)->getParent();
+        if (PHINode *P = dyn_cast<PHINode>(U))
           UserBB = P->getIncomingBlock(UI);
 
         // Check the current block, as a fast-path, before checking whether

Modified: llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp Sat Jul 10 00:06:30 2010
@@ -67,10 +67,11 @@
   if (BB)
     for (pred_iterator SI = pred_begin(BB), SE = pred_end(BB);
          SI != SE; ++SI) {
+      BasicBlock *P = *SI;
       // Does Node immediately dominate this predecessor?
-      DomTreeNode *SINode = DT[*SI];
+      DomTreeNode *SINode = DT[P];
       if (SINode && SINode->getIDom() != Node)
-        S.insert(*SI);
+        S.insert(P);
     }
 
   // At this point, S is DFlocal.  Now we union in DFup's of our children...

Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp Sat Jul 10 00:06:30 2010
@@ -21,18 +21,19 @@
 #include "llvm/ADT/STLExtras.h"
 using namespace llvm;
 
-/// ReuseOrCreateCast - Arange for there to be a cast of V to Ty at IP,
+/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
 /// reusing an existing cast if a suitable one exists, moving an existing
 /// cast if a suitable one exists but isn't in the right place, or
-/// or creating a new one.
+/// creating a new one.
 Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
                                        Instruction::CastOps Op,
                                        BasicBlock::iterator IP) {
   // Check to see if there is already a cast!
   for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
-       UI != E; ++UI)
-    if ((*UI)->getType() == Ty)
-      if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
+       UI != E; ++UI) {
+    User *U = *UI;
+    if (U->getType() == Ty)
+      if (CastInst *CI = dyn_cast<CastInst>(U))
         if (CI->getOpcode() == Op) {
           // If the cast isn't where we want it, fix it.
           if (BasicBlock::iterator(CI) != IP) {
@@ -49,6 +50,7 @@
           rememberInstruction(CI);
           return CI;
         }
+  }
 
   // Create a new cast.
   Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
@@ -1118,17 +1120,19 @@
 
     Constant *One = ConstantInt::get(Ty, 1);
     for (pred_iterator HPI = pred_begin(Header), HPE = pred_end(Header);
-         HPI != HPE; ++HPI)
-      if (L->contains(*HPI)) {
+         HPI != HPE; ++HPI) {
+      BasicBlock *HP = *HPI;
+      if (L->contains(HP)) {
         // Insert a unit add instruction right before the terminator
         // corresponding to the back-edge.
         Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next",
-                                                     (*HPI)->getTerminator());
+                                                           HP->getTerminator());
         rememberInstruction(Add);
-        PN->addIncoming(Add, *HPI);
+        PN->addIncoming(Add, HP);
       } else {
-        PN->addIncoming(Constant::getNullValue(Ty), *HPI);
+        PN->addIncoming(Constant::getNullValue(Ty), HP);
       }
+    }
   }
 
   // {0,+,F} --> {0,+,1} * F

Modified: llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp Sat Jul 10 00:06:30 2010
@@ -253,17 +253,18 @@
     // at once.
     while (!Placeholder->use_empty()) {
       Value::use_iterator UI = Placeholder->use_begin();
+      User *U = *UI;
 
       // If the using object isn't uniqued, just update the operands.  This
       // handles instructions and initializers for global variables.
-      if (!isa<Constant>(*UI) || isa<GlobalValue>(*UI)) {
+      if (!isa<Constant>(U) || isa<GlobalValue>(U)) {
         UI.getUse().set(RealVal);
         continue;
       }
 
       // Otherwise, we have a constant that uses the placeholder.  Replace that
       // constant with a new constant that has *all* placeholder uses updated.
-      Constant *UserC = cast<Constant>(*UI);
+      Constant *UserC = cast<Constant>(U);
       for (User::op_iterator I = UserC->op_begin(), E = UserC->op_end();
            I != E; ++I) {
         Value *NewOp;

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Sat Jul 10 00:06:30 2010
@@ -322,6 +322,7 @@
   DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0; 
   DwarfDebugLineSectionSym = CurrentLineSectionSym = 0;
   FunctionBeginSym = FunctionEndSym = 0;
+  DIEIntegerOne = new (DIEValueAllocator) DIEInteger(1);
   {
     NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
     beginModule(M);
@@ -376,7 +377,8 @@
 void DwarfDebug::addUInt(DIE *Die, unsigned Attribute,
                          unsigned Form, uint64_t Integer) {
   if (!Form) Form = DIEInteger::BestForm(false, Integer);
-  DIEValue *Value = new (DIEValueAllocator) DIEInteger(Integer);
+  DIEValue *Value = Integer == 1 ? 
+    DIEIntegerOne : new (DIEValueAllocator) DIEInteger(Integer);
   Die->addValue(Attribute, Form, Value);
 }
 
@@ -1502,7 +1504,7 @@
   const MCSymbol *StartLabel = getLabelBeforeInsn(RI->first);
   const MCSymbol *EndLabel = getLabelAfterInsn(RI->second);
 
-  if (StartLabel == FunctionBeginSym || EndLabel == 0) {
+  if (StartLabel == 0 || EndLabel == 0) {
     assert (0 && "Unexpected Start and End  labels for a inlined scope!");
     return 0;
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h Sat Jul 10 00:06:30 2010
@@ -263,6 +263,8 @@
   MCSymbol *DwarfDebugLocSectionSym;
   MCSymbol *DwarfDebugLineSectionSym, *CurrentLineSectionSym;
   MCSymbol *FunctionBeginSym, *FunctionEndSym;
+
+  DIEInteger *DIEIntegerOne;
 private:
   
   /// getSourceDirectoryAndFileIds - Return the directory and file ids that

Modified: llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp Sat Jul 10 00:06:30 2010
@@ -282,13 +282,12 @@
       FoldOps.push_back(Idx);
   }
 
-  MachineInstr *FoldMI = tii_.foldMemoryOperand(mf_, MI, FoldOps, stackSlot_);
+  MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
   if (!FoldMI)
     return false;
-  MachineBasicBlock &MBB = *MI->getParent();
   lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
   vrm_.addSpillSlotUse(stackSlot_, FoldMI);
-  MBB.insert(MBB.erase(MI), FoldMI);
+  MI->eraseFromParent();
   DEBUG(dbgs() << "\tfolded: " << *FoldMI);
   return true;
 }

Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp Sat Jul 10 00:06:30 2010
@@ -192,6 +192,10 @@
     if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
       if (SrcReg == li.reg || DstReg == li.reg)
         continue;
+    if (MI.isCopy())
+      if (MI.getOperand(0).getReg() == li.reg ||
+          MI.getOperand(1).getReg() == li.reg)
+        continue;
 
     // Check for operands using reg
     for (unsigned i = 0, e = MI.getNumOperands(); i != e;  ++i) {
@@ -324,12 +328,6 @@
     if (mi->isCopyLike() ||
         tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
       CopyMI = mi;
-
-      // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates
-      // implicit defs without really knowing. It shows up as INSERT_SUBREG
-      // using an undefined register.
-      if (mi->isInsertSubreg())
-        mi->getOperand(1).setIsUndef();
     }
 
     VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
@@ -949,22 +947,22 @@
   if (DefMI && (MRInfo & VirtRegMap::isMod))
     return false;
 
-  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
-                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
+  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
+                           : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
   if (fmi) {
     // Remember this instruction uses the spill slot.
     if (isSS) vrm.addSpillSlotUse(Slot, fmi);
 
     // Attempt to fold the memory reference into the instruction. If
     // we can do this, we don't need to insert spill code.
-    MachineBasicBlock &MBB = *MI->getParent();
     if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
       vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
     vrm.transferSpillPts(MI, fmi);
     vrm.transferRestorePts(MI, fmi);
     vrm.transferEmergencySpills(MI, fmi);
     ReplaceMachineInstrInMaps(MI, fmi);
-    MI = MBB.insert(MBB.erase(MI), fmi);
+    MI->eraseFromParent();
+    MI = fmi;
     ++numFolds;
     return true;
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp Sat Jul 10 00:06:30 2010
@@ -53,8 +53,6 @@
     bool runOnMachineFunction(MachineFunction&);
 
   private:
-    bool LowerExtract(MachineInstr *MI);
-    bool LowerInsert(MachineInstr *MI);
     bool LowerSubregToReg(MachineInstr *MI);
     bool LowerCopy(MachineInstr *MI);
 
@@ -122,57 +120,6 @@
   }
 }
 
-bool LowerSubregsInstructionPass::LowerExtract(MachineInstr *MI) {
-  MachineBasicBlock *MBB = MI->getParent();
-
-  assert(MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
-         MI->getOperand(1).isReg() && MI->getOperand(1).isUse() &&
-         MI->getOperand(2).isImm() && "Malformed extract_subreg");
-
-  unsigned DstReg   = MI->getOperand(0).getReg();
-  unsigned SuperReg = MI->getOperand(1).getReg();
-  unsigned SubIdx   = MI->getOperand(2).getImm();
-  unsigned SrcReg   = TRI->getSubReg(SuperReg, SubIdx);
-
-  assert(TargetRegisterInfo::isPhysicalRegister(SuperReg) &&
-         "Extract supperg source must be a physical register");
-  assert(TargetRegisterInfo::isPhysicalRegister(DstReg) &&
-         "Extract destination must be in a physical register");
-  assert(SrcReg && "invalid subregister index for register");
-
-  DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
-
-  if (SrcReg == DstReg) {
-    // No need to insert an identity copy instruction.
-    if (MI->getOperand(1).isKill()) {
-      // We must make sure the super-register gets killed. Replace the
-      // instruction with KILL.
-      MI->setDesc(TII->get(TargetOpcode::KILL));
-      MI->RemoveOperand(2);     // SubIdx
-      DEBUG(dbgs() << "subreg: replace by: " << *MI);
-      return true;
-    }
-
-    DEBUG(dbgs() << "subreg: eliminated!");
-  } else {
-    TII->copyPhysReg(*MBB, MI, MI->getDebugLoc(), DstReg, SrcReg, false);
-    // Transfer the kill/dead flags, if needed.
-    if (MI->getOperand(0).isDead())
-      TransferDeadFlag(MI, DstReg, TRI);
-    if (MI->getOperand(1).isKill())
-      TransferKillFlag(MI, SuperReg, TRI, true);
-    TransferImplicitDefs(MI);
-    DEBUG({
-        MachineBasicBlock::iterator dMI = MI;
-        dbgs() << "subreg: " << *(--dMI);
-      });
-  }
-
-  DEBUG(dbgs() << '\n');
-  MBB->erase(MI);
-  return true;
-}
-
 bool LowerSubregsInstructionPass::LowerSubregToReg(MachineInstr *MI) {
   MachineBasicBlock *MBB = MI->getParent();
   assert((MI->getOperand(0).isReg() && MI->getOperand(0).isDef()) &&
@@ -225,85 +172,6 @@
   return true;
 }
 
-bool LowerSubregsInstructionPass::LowerInsert(MachineInstr *MI) {
-  MachineBasicBlock *MBB = MI->getParent();
-  assert((MI->getOperand(0).isReg() && MI->getOperand(0).isDef()) &&
-         (MI->getOperand(1).isReg() && MI->getOperand(1).isUse()) &&
-         (MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) &&
-          MI->getOperand(3).isImm() && "Invalid insert_subreg");
-          
-  unsigned DstReg = MI->getOperand(0).getReg();
-#ifndef NDEBUG
-  unsigned SrcReg = MI->getOperand(1).getReg();
-#endif
-  unsigned InsReg = MI->getOperand(2).getReg();
-  unsigned SubIdx = MI->getOperand(3).getImm();     
-
-  assert(DstReg == SrcReg && "insert_subreg not a two-address instruction?");
-  assert(SubIdx != 0 && "Invalid index for insert_subreg");
-  unsigned DstSubReg = TRI->getSubReg(DstReg, SubIdx);
-  assert(DstSubReg && "invalid subregister index for register");
-  assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
-         "Insert superreg source must be in a physical register");
-  assert(TargetRegisterInfo::isPhysicalRegister(InsReg) &&
-         "Inserted value must be in a physical register");
-
-  DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
-
-  if (DstSubReg == InsReg) {
-    // No need to insert an identity copy instruction. If the SrcReg was
-    // <undef>, we need to make sure it is alive by inserting a KILL
-    if (MI->getOperand(1).isUndef() && !MI->getOperand(0).isDead()) {
-      MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI->getDebugLoc(),
-                                TII->get(TargetOpcode::KILL), DstReg);
-      if (MI->getOperand(2).isUndef())
-        MIB.addReg(InsReg, RegState::Undef);
-      else
-        MIB.addReg(InsReg, RegState::Kill);
-    } else {
-      DEBUG(dbgs() << "subreg: eliminated!\n");
-      MBB->erase(MI);
-      return true;
-    }
-  } else {
-    // Insert sub-register copy
-    if (MI->getOperand(2).isUndef())
-      // If the source register being inserted is undef, then this becomes a
-      // KILL.
-      BuildMI(*MBB, MI, MI->getDebugLoc(),
-              TII->get(TargetOpcode::KILL), DstSubReg);
-    else {
-      TII->copyPhysReg(*MBB, MI, MI->getDebugLoc(), DstSubReg, InsReg, false);
-    }
-    MachineBasicBlock::iterator CopyMI = MI;
-    --CopyMI;
-
-    // INSERT_SUBREG is a two-address instruction so it implicitly kills SrcReg.
-    if (!MI->getOperand(1).isUndef())
-      CopyMI->addOperand(MachineOperand::CreateReg(DstReg, false, true, true));
-
-    // Transfer the kill/dead flags, if needed.
-    if (MI->getOperand(0).isDead()) {
-      TransferDeadFlag(MI, DstSubReg, TRI);
-    } else {
-      // Make sure the full DstReg is live after this replacement.
-      CopyMI->addOperand(MachineOperand::CreateReg(DstReg, true, true));
-    }
-
-    // Make sure the inserted register gets killed
-    if (MI->getOperand(2).isKill() && !MI->getOperand(2).isUndef())
-      TransferKillFlag(MI, InsReg, TRI);
-  }
-
-  DEBUG({
-      MachineBasicBlock::iterator dMI = MI;
-      dbgs() << "subreg: " << *(--dMI) << "\n";
-    });
-
-  MBB->erase(MI);
-  return true;
-}
-
 bool LowerSubregsInstructionPass::LowerCopy(MachineInstr *MI) {
   MachineOperand &DstMO = MI->getOperand(0);
   MachineOperand &SrcMO = MI->getOperand(1);
@@ -359,11 +227,10 @@
          mi != me;) {
       MachineBasicBlock::iterator nmi = llvm::next(mi);
       MachineInstr *MI = mi;
-      if (MI->isExtractSubreg()) {
-        MadeChange |= LowerExtract(MI);
-      } else if (MI->isInsertSubreg()) {
-        MadeChange |= LowerInsert(MI);
-      } else if (MI->isSubregToReg()) {
+      assert(!MI->isInsertSubreg() && "INSERT_SUBREG should no longer appear");
+      assert(MI->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
+             "EXTRACT_SUBREG should no longer appear");
+      if (MI->isSubregToReg()) {
         MadeChange |= LowerSubregToReg(MI);
       } else if (MI->isCopy()) {
         MadeChange |= LowerCopy(MI);

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp Sat Jul 10 00:06:30 2010
@@ -126,6 +126,28 @@
       ++NumCoalesces;
       Changed = true;
     }
+
+    if (!DefMI->isCopy())
+      continue;
+    SrcReg = DefMI->getOperand(1).getReg();
+    if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+      continue;
+    if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg())
+      continue;
+    const TargetRegisterClass *SRC   = MRI->getRegClass(SrcReg);
+    const TargetRegisterClass *RC    = MRI->getRegClass(Reg);
+    const TargetRegisterClass *NewRC = getCommonSubClass(RC, SRC);
+    if (!NewRC)
+      continue;
+    DEBUG(dbgs() << "Coalescing: " << *DefMI);
+    DEBUG(dbgs() << "*** to: " << *MI);
+    MO.setReg(SrcReg);
+    MRI->clearKillFlags(SrcReg);
+    if (NewRC != SRC)
+      MRI->setRegClass(SrcReg, NewRC);
+    DefMI->eraseFromParent();
+    ++NumCoalesces;
+    Changed = true;
   }
 
   return Changed;

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp Sat Jul 10 00:06:30 2010
@@ -199,9 +199,14 @@
 /// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
 /// loop that has a unique predecessor.
 static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
+  // Check whether this loop even has a unique predecessor.
+  if (!CurLoop->getLoopPredecessor())
+    return false;
+  // Ok, now check to see if any of its outer loops do.
   for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
     if (L->getLoopPredecessor())
       return false;
+  // None of them did, so this is the outermost with a unique predecessor.
   return true;
 }
 
@@ -224,14 +229,17 @@
   DT  = &getAnalysis<MachineDominatorTree>();
   AA  = &getAnalysis<AliasAnalysis>();
 
-  for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); I != E; ++I){
-    CurLoop = *I;
+  SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
+  while (!Worklist.empty()) {
+    CurLoop = Worklist.pop_back_val();
     CurPreheader = 0;
 
     // If this is done before regalloc, only visit outer-most preheader-sporting
     // loops.
-    if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop))
+    if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
+      Worklist.append(CurLoop->begin(), CurLoop->end());
       continue;
+    }
 
     if (!PreRegAlloc)
       HoistRegionPostRA();

Modified: llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp Sat Jul 10 00:06:30 2010
@@ -128,12 +128,12 @@
       //
       //    %reg1025 = <sext> %reg1024
       //     ...
-      //    %reg1027 = EXTRACT_SUBREG %reg1025, 4
+      //    %reg1027 = COPY %reg1025:4
       //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
       //
       // The problem here is that SUBREG_TO_REG is there to assert that an
       // implicit zext occurs. It doesn't insert a zext instruction. If we allow
-      // the EXTRACT_SUBREG here, it will give us the value after the <sext>,
+      // the COPY here, it will give us the value after the <sext>,
       // not the original value of %reg1024 before <sext>.
       if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
         continue;
@@ -185,8 +185,8 @@
           continue;
         unsigned NewVR = MRI->createVirtualRegister(RC);
         BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
-                TII->get(TargetOpcode::EXTRACT_SUBREG), NewVR)
-          .addReg(DstReg).addImm(SubIdx);
+                TII->get(TargetOpcode::COPY), NewVR)
+          .addReg(DstReg, 0, SubIdx);
         UseMO->setReg(NewVR);
         ++NumReuse;
         Changed = true;

Modified: llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp Sat Jul 10 00:06:30 2010
@@ -863,12 +863,11 @@
     SS = MFI->CreateSpillStackObject(RC->getSize(), RC->getAlignment());
   }
   
-  MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
-                                             FoldPt, Ops, SS);
+  MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
   
   if (FMI) {
     LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
-    FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+    FoldPt->eraseFromParent();
     ++NumFolds;
     
     IntervalSSMap[vreg] = SS;
@@ -944,12 +943,11 @@
   if (!TII->canFoldMemoryOperand(FoldPt, Ops))
     return 0;
   
-  MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
-                                             FoldPt, Ops, SS);
+  MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
   
   if (FMI) {
     LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
-    FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+    FoldPt->eraseFromParent();
     ++NumRestoreFolds;
   }
   
@@ -1255,9 +1253,7 @@
         Ops.push_back(OpIdx);
         if (!TII->canFoldMemoryOperand(use, Ops)) continue;
 
-        MachineInstr* NewMI =
-                          TII->foldMemoryOperand(*use->getParent()->getParent(),  
-                                                 use, Ops, FrameIndex);
+        MachineInstr* NewMI = TII->foldMemoryOperand(use, Ops, FrameIndex);
 
         if (!NewMI) continue;
 
@@ -1267,10 +1263,9 @@
         (*LI)->removeValNo(CurrVN);
 
         DefMI->eraseFromParent();
-        MachineBasicBlock* MBB = use->getParent();
-        NewMI = MBB->insert(MBB->erase(use), NewMI);
+        use->eraseFromParent();
         VNUseCount[CurrVN].erase(use);
-        
+
         // Remove deleted instructions.  Note that we need to remove them from 
         // the VNInfo->use map as well, just to be safe.
         for (SmallPtrSet<MachineInstr*, 4>::iterator II = 

Modified: llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp Sat Jul 10 00:06:30 2010
@@ -50,8 +50,7 @@
     return true;
 
   switch(OpIdx) {
-    case 1: return (MI->isExtractSubreg() || MI->isCopy()) &&
-                   MI->getOperand(0).getSubReg() == 0;
+    case 1: return MI->isCopy() && MI->getOperand(0).getSubReg() == 0;
     case 2: return MI->isSubregToReg() && MI->getOperand(0).getSubReg() == 0;
     default: return false;
   }
@@ -102,21 +101,6 @@
         continue;
       }
 
-      if (MI->isInsertSubreg()) {
-        MachineOperand &MO = MI->getOperand(2);
-        if (ImpDefRegs.count(MO.getReg())) {
-          // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
-          // This is an identity copy, eliminate it now.
-          if (MO.isKill()) {
-            LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
-            vi.removeKill(MI);
-          }
-          MI->eraseFromParent();
-          Changed = true;
-          continue;
-        }
-      }
-
       // Eliminate %reg1032:sub<def> = COPY undef.
       if (MI->isCopy() && MI->getOperand(0).getSubReg()) {
         MachineOperand &MO = MI->getOperand(1);

Modified: llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp Sat Jul 10 00:06:30 2010
@@ -885,21 +885,7 @@
             // Scavenge a new scratch register
             CurrentVirtReg = Reg;
             const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
-            const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
-            BitVector Candidates(TRI->getNumRegs());
-            RS->getRegsAvailable(RC, Candidates);
-
-            // If there are any registers available, use the one that's
-            // unused for the longest after this instruction. That increases
-            // the ability to reuse the value.
-            if (Candidates.any()) {
-              MachineBasicBlock::iterator UMI;
-              CurrentScratchReg = RS->findSurvivorReg(I, Candidates, 25, UMI);
-            } else {
-              // No register is "free". Scavenge a register.
-              CurrentScratchReg = RS->scavengeRegister(RC, I, SPAdj);
-            }
-
+            CurrentScratchReg = RS->scavengeRegister(RC, I, SPAdj);
             PrevValue = Value;
           }
           // replace this reference to the virtual register with the

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp Sat Jul 10 00:06:30 2010
@@ -764,8 +764,25 @@
         LiveRegMap::iterator LRI = LiveVirtRegs.find(Reg);
         if (LRI != LiveVirtRegs.end())
           setPhysReg(MI, i, LRI->second.PhysReg);
-        else
-          MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+        else {
+          int SS = StackSlotForVirtReg[Reg];
+          if (SS == -1)
+            MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+          else {
+            // Modify DBG_VALUE now that the value is in a spill slot.
+            uint64_t Offset = MI->getOperand(1).getImm();
+            const MDNode *MDPtr = 
+              MI->getOperand(MI->getNumOperands()-1).getMetadata();
+            DebugLoc DL = MI->getDebugLoc();
+            if (MachineInstr *NewDV = 
+                TII->emitFrameIndexDebugValue(*MF, SS, Offset, MDPtr, DL)) {
+              DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
+              MachineBasicBlock *MBB = MI->getParent();
+              MBB->insert(MBB->erase(MI), NewDV);
+            } else
+              MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+          }
+        }
       }
       // Next instruction.
       continue;

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp Sat Jul 10 00:06:30 2010
@@ -422,9 +422,10 @@
     unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
     if (vni->def != SlotIndex() && vni->isDefAccurate() &&
         (CopyMI = li_->getInstructionFromIndex(vni->def)) &&
-        tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg))
+        (CopyMI->isCopy() ||
+         tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg)))
       // Defined by a copy, try to extend SrcReg forward
-      CandReg = SrcReg;
+      CandReg = CopyMI->isCopy() ? CopyMI->getOperand(1).getReg() : SrcReg;
     else if (TrivCoalesceEnds &&
              (CopyMI =
               li_->getInstructionFromIndex(range.end.getBaseIndex())) &&
@@ -993,6 +994,24 @@
           if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
             mri_->setRegAllocationHint(cur->reg, 0, Reg);
         }
+      } else if (CopyMI && CopyMI->isCopy()) {
+        DstReg = CopyMI->getOperand(0).getReg();
+        DstSubReg = CopyMI->getOperand(0).getSubReg();
+        SrcReg = CopyMI->getOperand(1).getReg();
+        SrcSubReg = CopyMI->getOperand(1).getSubReg();
+        unsigned Reg = 0;
+        if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
+          Reg = SrcReg;
+        else if (vrm_->isAssignedReg(SrcReg))
+          Reg = vrm_->getPhys(SrcReg);
+        if (Reg) {
+          if (SrcSubReg)
+            Reg = tri_->getSubReg(Reg, SrcSubReg);
+          if (DstSubReg)
+            Reg = tri_->getMatchingSuperReg(Reg, DstSubReg, RC);
+          if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
+            mri_->setRegAllocationHint(cur->reg, 0, Reg);
+        }
       }
     }
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp Sat Jul 10 00:06:30 2010
@@ -49,12 +49,7 @@
     DstSub = MI->getOperand(0).getSubReg();
     Src = MI->getOperand(1).getReg();
     SrcSub = MI->getOperand(1).getSubReg();
-  } else if (MI->isExtractSubreg()) {
-    Dst = MI->getOperand(0).getReg();
-    DstSub = MI->getOperand(0).getSubReg();
-    Src = MI->getOperand(1).getReg();
-    SrcSub = compose(MI->getOperand(1).getSubReg(), MI->getOperand(2).getImm());
-  } else if (MI->isInsertSubreg() || MI->isSubregToReg()) {
+  } else if (MI->isSubregToReg()) {
     Dst = MI->getOperand(0).getReg();
     DstSub = compose(MI->getOperand(0).getSubReg(), MI->getOperand(3).getImm());
     Src = MI->getOperand(2).getReg();

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp Sat Jul 10 00:06:30 2010
@@ -339,13 +339,16 @@
       Candidates.reset(MO.getReg());
   }
 
+  // Try to find a register that's unused if there is one, as then we won't
+  // have to spill.
+  if ((Candidates & RegsAvailable).any())
+     Candidates &= RegsAvailable;
+
   // Find the register whose use is furthest away.
   MachineBasicBlock::iterator UseMI;
   unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI);
 
-  // If we found an unused register there is no reason to spill it. We have
-  // probably found a callee-saved register that has been saved in the
-  // prologue, but happens to be unused at this point.
+  // If we found an unused register there is no reason to spill it.
   if (!isAliasUsed(SReg))
     return SReg;
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Sat Jul 10 00:06:30 2010
@@ -6309,8 +6309,6 @@
 }
 
 SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
-  return SDValue();
-  
   EVT VT = N->getValueType(0);
   unsigned NumElts = VT.getVectorNumElements();
 
@@ -6892,38 +6890,34 @@
     }
   }
 
-  // Check to see if this is an integer abs. select_cc setl[te] X, 0, -X, X ->
+  // Check to see if this is an integer abs.
+  // select_cc setg[te] X,  0,  X, -X ->
+  // select_cc setgt    X, -1,  X, -X ->
+  // select_cc setl[te] X,  0, -X,  X ->
+  // select_cc setlt    X,  1, -X,  X ->
   // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
-  if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) &&
-      N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) &&
-      N2.getOperand(0) == N1 && N0.getValueType().isInteger()) {
+  if (N1C) {
+    ConstantSDNode *SubC = NULL;
+    if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
+         (N1C->isAllOnesValue() && CC == ISD::SETGT)) &&
+        N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1))
+      SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0));
+    else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) ||
+              (N1C->isOne() && CC == ISD::SETLT)) &&
+             N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1))
+      SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0));
+
     EVT XType = N0.getValueType();
-    SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType, N0,
-                                DAG.getConstant(XType.getSizeInBits()-1,
-                                                getShiftAmountTy()));
-    SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(), XType,
-                              N0, Shift);
-    AddToWorkList(Shift.getNode());
-    AddToWorkList(Add.getNode());
-    return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
-  }
-  // Check to see if this is an integer abs. select_cc setgt X, -1, X, -X ->
-  // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
-  if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT &&
-      N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) {
-    if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) {
-      EVT XType = N0.getValueType();
-      if (SubC->isNullValue() && XType.isInteger()) {
-        SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
-                                    N0,
-                                    DAG.getConstant(XType.getSizeInBits()-1,
-                                                    getShiftAmountTy()));
-        SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
-                                  XType, N0, Shift);
-        AddToWorkList(Shift.getNode());
-        AddToWorkList(Add.getNode());
-        return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
-      }
+    if (SubC && SubC->isNullValue() && XType.isInteger()) {
+      SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
+                                  N0,
+                                  DAG.getConstant(XType.getSizeInBits()-1,
+                                                  getShiftAmountTy()));
+      SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
+                                XType, N0, Shift);
+      AddToWorkList(Shift.getNode());
+      AddToWorkList(Add.getNode());
+      return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
     }
   }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp Sat Jul 10 00:06:30 2010
@@ -1141,24 +1141,11 @@
 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
                                               unsigned Op0, bool Op0IsKill,
                                               uint32_t Idx) {
-  const TargetRegisterClass* RC = MRI.getRegClass(Op0);
-  
   unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
-  const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
-  
-  if (II.getNumDefs() >= 1)
-    BuildMI(MBB, DL, II, ResultReg)
-      .addReg(Op0, Op0IsKill * RegState::Kill)
-      .addImm(Idx);
-  else {
-    BuildMI(MBB, DL, II)
-      .addReg(Op0, Op0IsKill * RegState::Kill)
-      .addImm(Idx);
-    bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
-                                         II.ImplicitDefs[0], RC, RC, DL);
-    if (!InsertedCopy)
-      ResultReg = 0;
-  }
+  assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
+         "Cannot yet extract from physregs");
+  BuildMI(MBB, DL, TII.get(TargetOpcode::COPY), ResultReg)
+    .addReg(Op0, getKillRegState(Op0IsKill), Idx);
   return ResultReg;
 }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp Sat Jul 10 00:06:30 2010
@@ -46,9 +46,11 @@
   if (isa<PHINode>(I)) return true;
   const BasicBlock *BB = I->getParent();
   for (Value::const_use_iterator UI = I->use_begin(), E = I->use_end();
-        UI != E; ++UI)
-    if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
+        UI != E; ++UI) {
+    const User *U = *UI;
+    if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
       return true;
+  }
   return false;
 }
 
@@ -63,9 +65,11 @@
 
   const BasicBlock *Entry = A->getParent()->begin();
   for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
-       UI != E; ++UI)
-    if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
+       UI != E; ++UI) {
+    const User *U = *UI;
+    if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
       return false;  // Use not in entry block.
+  }
   return true;
 }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp Sat Jul 10 00:06:30 2010
@@ -428,12 +428,9 @@
   }
   
   if (Opc == TargetOpcode::EXTRACT_SUBREG) {
+    // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub
     unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
 
-    // Create the extract_subreg machine instruction.
-    MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
-                               TII->get(TargetOpcode::EXTRACT_SUBREG));
-
     // Figure out the register class to create for the destreg.
     unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
     const TargetRegisterClass *TRC = MRI->getRegClass(VReg);
@@ -450,11 +447,16 @@
       VRBase = MRI->createVirtualRegister(SRC);
     }
 
-    // Add def, source, and subreg index
-    MI->addOperand(MachineOperand::CreateReg(VRBase, true));
+    // Create the extract_subreg machine instruction.
+    MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
+                               TII->get(TargetOpcode::COPY), VRBase);
+
+    // Add source, and subreg index
     AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap, /*IsDebug=*/false,
                IsClone, IsCloned);
-    MI->addOperand(MachineOperand::CreateImm(SubIdx));
+    assert(TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()) &&
+           "Cannot yet extract from physregs");
+    MI->getOperand(1).setSubReg(SubIdx);
     MBB->insert(InsertPos, MI);
   } else if (Opc == TargetOpcode::INSERT_SUBREG ||
              Opc == TargetOpcode::SUBREG_TO_REG) {

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Sat Jul 10 00:06:30 2010
@@ -300,7 +300,11 @@
       for (MachineBasicBlock::const_iterator
              II = MBB->begin(), IE = MBB->end(); II != IE; ++II) {
         const TargetInstrDesc &TID = TM.getInstrInfo()->get(II->getOpcode());
-        if (II->isInlineAsm() || (TID.isCall() && !TID.isReturn())) {
+
+        // Operand 1 of an inline asm instruction indicates whether the asm
+        // needs stack or not.
+        if ((II->isInlineAsm() && II->getOperand(1).getImm()) ||
+            (TID.isCall() && !TID.isReturn())) {
           MFI->setHasCalls(true);
           goto done;
         }

Modified: llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp Sat Jul 10 00:06:30 2010
@@ -460,7 +460,10 @@
     LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
     if (ULR == IntA.end() || ULR->valno != AValNo)
       continue;
-    UseMO.setReg(NewReg);
+    if (TargetRegisterInfo::isPhysicalRegister(NewReg))
+      UseMO.substPhysReg(NewReg, *tri_);
+    else
+      UseMO.setReg(NewReg);
     if (UseMI == CopyMI)
       continue;
     if (UseMO.isKill()) {
@@ -482,6 +485,8 @@
     // extended to the end of the existing live range defined by the copy.
     SlotIndex DefIdx = UseIdx.getDefIndex();
     const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
+    if (!DLR)
+      continue;
     BHasPHIKill |= DLR->valno->hasPHIKill();
     assert(DLR->valno->def == DefIdx);
     BDeadValNos.push_back(DLR->valno);
@@ -618,10 +623,14 @@
     // of last use.
     LastUse->setIsKill();
     removeRange(li, LastUseIdx.getDefIndex(), LR->end, li_, tri_);
+    if (LastUseMI->isCopy()) {
+      MachineOperand &DefMO = LastUseMI->getOperand(0);
+      if (DefMO.getReg() == li.reg && !DefMO.getSubReg())
+        DefMO.setIsDead();
+    }
     unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
-    if ((LastUseMI->isCopy() && !LastUseMI->getOperand(0).getSubReg()) ||
-        (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
-         DstReg == li.reg && DstSubIdx == 0)) {
+    if (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
+        DstReg == li.reg && DstSubIdx == 0) {
       // Last use is itself an identity code.
       int DeadIdx = LastUseMI->findRegisterDefOperandIdx(li.reg,
                                                          false, false, tri_);
@@ -772,6 +781,17 @@
           ReMaterializeTrivialDef(li_->getInterval(SrcReg), CopyDstReg, 0,
                                   UseMI))
         continue;
+
+      if (UseMI->isCopy() &&
+          !UseMI->getOperand(1).getSubReg() &&
+          !UseMI->getOperand(0).getSubReg() &&
+          UseMI->getOperand(1).getReg() == SrcReg &&
+          UseMI->getOperand(0).getReg() != SrcReg &&
+          UseMI->getOperand(0).getReg() != DstReg &&
+          !JoinedCopies.count(UseMI) &&
+          ReMaterializeTrivialDef(li_->getInterval(SrcReg),
+                                  UseMI->getOperand(0).getReg(), 0, UseMI))
+        continue;
     }
 
     SmallVector<unsigned,8> Ops;
@@ -945,8 +965,8 @@
     // Live-in to the function but dead. Remove it from entry live-in set.
     if (mf_->begin()->isLiveIn(li.reg))
       mf_->begin()->removeLiveIn(li.reg);
-    const LiveRange *LR = li.getLiveRangeContaining(CopyIdx);
-    removeRange(li, LR->start, LR->end, li_, tri_);
+    if (const LiveRange *LR = li.getLiveRangeContaining(CopyIdx))
+      removeRange(li, LR->start, LR->end, li_, tri_);
     return removeIntervalIfEmpty(li, li_, tri_);
   }
 
@@ -1525,15 +1545,10 @@
     // If this isn't a copy nor a extract_subreg, we can't join intervals.
     unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
     bool isInsUndef = false;
-    if (Inst->isCopy() || Inst->isExtractSubreg()) {
+    if (Inst->isCopy()) {
       DstReg = Inst->getOperand(0).getReg();
       SrcReg = Inst->getOperand(1).getReg();
-    } else if (Inst->isInsertSubreg()) {
-      DstReg = Inst->getOperand(0).getReg();
-      SrcReg = Inst->getOperand(2).getReg();
-      if (Inst->getOperand(1).isUndef())
-        isInsUndef = true;
-    } else if (Inst->isInsertSubreg() || Inst->isSubregToReg()) {
+    } else if (Inst->isSubregToReg()) {
       DstReg = Inst->getOperand(0).getReg();
       SrcReg = Inst->getOperand(2).getReg();
     } else if (!tii_->isMoveInstr(*Inst, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
@@ -1662,6 +1677,8 @@
            E = mri_->use_nodbg_end(); I != E; ++I) {
       MachineOperand &Use = I.getOperand();
       MachineInstr *UseMI = Use.getParent();
+      if (UseMI->isIdentityCopy())
+        continue;
       unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
       if (tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
           SrcReg == DstReg && SrcSubIdx == DstSubIdx)
@@ -1692,7 +1709,8 @@
 
     // Ignore identity copies.
     unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
-    if (!(tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
+    if (!MI->isIdentityCopy() &&
+        !(tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
           SrcReg == DstReg && SrcSubIdx == DstSubIdx))
       for (unsigned i = 0, NumOps = MI->getNumOperands(); i != NumOps; ++i) {
         MachineOperand &Use = MI->getOperand(i);
@@ -1823,7 +1841,8 @@
 
       // If the move will be an identity move delete it
       bool isMove= tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx);
-      if (isMove && SrcReg == DstReg && SrcSubIdx == DstSubIdx) {
+      if (MI->isIdentityCopy() ||
+          (isMove && SrcReg == DstReg && SrcSubIdx == DstSubIdx)) {
         if (li_->hasInterval(SrcReg)) {
           LiveInterval &RegInt = li_->getInterval(SrcReg);
           // If def of this move instruction is dead, remove its live range

Modified: llvm/branches/wendling/eh/lib/CodeGen/StackSlotColoring.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/StackSlotColoring.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/StackSlotColoring.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/StackSlotColoring.cpp Sat Jul 10 00:06:30 2010
@@ -508,8 +508,7 @@
 
         // Abort the use is actually a sub-register def. We don't have enough
         // information to figure out if it is really legal.
-        if (MO.getSubReg() || MII->isExtractSubreg() ||
-            MII->isInsertSubreg() || MII->isSubregToReg())
+        if (MO.getSubReg() || MII->isSubregToReg())
           return false;
 
         const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI);
@@ -571,7 +570,7 @@
 
         // Abort the use is actually a sub-register use. We don't have enough
         // information to figure out if it is really legal.
-        if (MO.getSubReg() || MII->isExtractSubreg())
+        if (MO.getSubReg())
           return false;
 
         const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI);

Modified: llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp Sat Jul 10 00:06:30 2010
@@ -190,6 +190,47 @@
   return FnSize;
 }
 
+// If the COPY instruction in MI can be folded to a stack operation, return
+// the register class to use.
+static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
+                                              unsigned FoldIdx) {
+  assert(MI->isCopy() && "MI must be a COPY instruction");
+  if (MI->getNumOperands() != 2)
+    return 0;
+  assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
+
+  const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
+  const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
+
+  if (FoldOp.getSubReg() || LiveOp.getSubReg())
+    return 0;
+
+  unsigned FoldReg = FoldOp.getReg();
+  unsigned LiveReg = LiveOp.getReg();
+
+  assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
+         "Cannot fold physregs");
+
+  const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+  const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
+
+  if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
+    return RC->contains(LiveOp.getReg()) ? RC : 0;
+
+  const TargetRegisterClass *LiveRC = MRI.getRegClass(LiveReg);
+  if (RC == LiveRC || RC->hasSubClass(LiveRC))
+    return RC;
+
+  // FIXME: Allow folding when register classes are memory compatible.
+  return 0;
+}
+
+bool TargetInstrInfoImpl::
+canFoldMemoryOperand(const MachineInstr *MI,
+                     const SmallVectorImpl<unsigned> &Ops) const {
+  return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
+}
+
 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
 /// slot into the specified machine instruction for the specified operand(s).
 /// If this is possible, a new instruction is returned with the specified
@@ -197,10 +238,9 @@
 /// removing the old instruction and adding the new one in the instruction
 /// stream.
 MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
-                                   MachineInstr* MI,
+TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
                                    const SmallVectorImpl<unsigned> &Ops,
-                                   int FrameIndex) const {
+                                   int FI) const {
   unsigned Flags = 0;
   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
     if (MI->getOperand(Ops[i]).isDef())
@@ -208,10 +248,40 @@
     else
       Flags |= MachineMemOperand::MOLoad;
 
+  MachineBasicBlock *MBB = MI->getParent();
+  assert(MBB && "foldMemoryOperand needs an inserted instruction");
+  MachineFunction &MF = *MBB->getParent();
+
   // Ask the target to do the actual folding.
-  MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
+  MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI);
+
+  // Straight COPY may fold as load/store.
+  if (!NewMI) {
+    if (!MI->isCopy() || Ops.size() != 1)
+      return 0;
+
+    const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
+    if (!RC)
+      return 0;
+
+    const MachineOperand &MO = MI->getOperand(1-Ops[0]);
+    MachineBasicBlock::iterator Pos = MI;
+    const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+
+    if (Flags == MachineMemOperand::MOStore)
+      storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
+    else
+      loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
+
+    NewMI = --Pos;
+  } else {
+    // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
+    NewMI = MBB->insert(MI, NewMI);
+  }
+
   if (!NewMI) return 0;
 
+
   assert((!(Flags & MachineMemOperand::MOStore) ||
           NewMI->getDesc().mayStore()) &&
          "Folded a def to a non-store!");
@@ -219,12 +289,12 @@
           NewMI->getDesc().mayLoad()) &&
          "Folded a use to a non-load!");
   const MachineFrameInfo &MFI = *MF.getFrameInfo();
-  assert(MFI.getObjectOffset(FrameIndex) != -1);
+  assert(MFI.getObjectOffset(FI) != -1);
   MachineMemOperand *MMO =
-    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIndex),
+    MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
                             Flags, /*Offset=*/0,
-                            MFI.getObjectSize(FrameIndex),
-                            MFI.getObjectAlignment(FrameIndex));
+                            MFI.getObjectSize(FI),
+                            MFI.getObjectAlignment(FI));
   NewMI->addMemOperand(MF, MMO);
 
   return NewMI;
@@ -234,8 +304,7 @@
 /// of any load and store from / to any address, not just from a specific
 /// stack slot.
 MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
-                                   MachineInstr* MI,
+TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
                                    const SmallVectorImpl<unsigned> &Ops,
                                    MachineInstr* LoadMI) const {
   assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
@@ -243,11 +312,15 @@
   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
     assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
 #endif
+  MachineBasicBlock &MBB = *MI->getParent();
+  MachineFunction &MF = *MBB.getParent();
 
   // Ask the target to do the actual folding.
   MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
   if (!NewMI) return 0;
 
+  NewMI = MBB.insert(MI, NewMI);
+
   // Copy the memoperands from the load to the folded instruction.
   NewMI->setMemRefs(LoadMI->memoperands_begin(),
                     LoadMI->memoperands_end());

Modified: llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp Sat Jul 10 00:06:30 2010
@@ -382,7 +382,7 @@
   DstReg = 0;
   unsigned SrcSubIdx, DstSubIdx;
   if (!TII->isMoveInstr(MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
-    if (MI.isCopy() || MI.isExtractSubreg()) {
+    if (MI.isCopy()) {
       DstReg = MI.getOperand(0).getReg();
       SrcReg = MI.getOperand(1).getReg();
     } else if (MI.isInsertSubreg()) {
@@ -1291,7 +1291,7 @@
     if (SrcDefMI->getParent() != DstDefMI->getParent())
       continue;
 
-    // If there are no other uses than extract_subreg which feed into
+    // If there are no other uses than copies which feed into
     // the reg_sequence, then we might be able to coalesce them.
     bool CanCoalesce = true;
     SmallVector<unsigned, 4> SrcSubIndices, DstSubIndices;
@@ -1299,13 +1299,11 @@
            UI = MRI->use_nodbg_begin(SrcReg),
            UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
       MachineInstr *UseMI = &*UI;
-      if (!UseMI->isExtractSubreg() ||
-          UseMI->getOperand(0).getReg() != DstReg ||
-          UseMI->getOperand(1).getSubReg() != 0) {
+      if (!UseMI->isCopy() || UseMI->getOperand(0).getReg() != DstReg) {
         CanCoalesce = false;
         break;
       }
-      SrcSubIndices.push_back(UseMI->getOperand(2).getImm());
+      SrcSubIndices.push_back(UseMI->getOperand(1).getSubReg());
       DstSubIndices.push_back(UseMI->getOperand(0).getSubReg());
     }
 
@@ -1340,9 +1338,9 @@
            UI = MRI->use_nodbg_begin(SrcReg),
            UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
       MachineInstr *UseMI = &*UI;
-      assert(UseMI->isExtractSubreg());
+      assert(UseMI->isCopy());
       unsigned DstSubIdx = UseMI->getOperand(0).getSubReg();
-      unsigned SrcSubIdx = UseMI->getOperand(2).getImm();
+      unsigned SrcSubIdx = UseMI->getOperand(1).getSubReg();
       assert(DstSubIdx != 0 && "missing subreg from RegSequence elimination");
       if ((NewDstSubIdx == 0 &&
            TRI->composeSubRegIndices(NewSrcSubIdx, DstSubIdx) != SrcSubIdx) ||
@@ -1357,27 +1355,13 @@
     if (!CanCoalesce)
       continue;
 
-    // Insert a copy or an extract to replace the original extracts.
+    // Insert a copy to replace the original.
     MachineBasicBlock::iterator InsertLoc = SomeMI;
-    if (NewSrcSubIdx) {
-      // Insert an extract subreg.
-      BuildMI(*SomeMI->getParent(), InsertLoc, SomeMI->getDebugLoc(),
-              TII->get(TargetOpcode::EXTRACT_SUBREG), DstReg)
-        .addReg(SrcReg).addImm(NewSrcSubIdx);
-    } else if (NewDstSubIdx) {
-      // Do a subreg insertion.
-      BuildMI(*SomeMI->getParent(), InsertLoc, SomeMI->getDebugLoc(),
-              TII->get(TargetOpcode::INSERT_SUBREG), DstReg)
-        .addReg(DstReg).addReg(SrcReg).addImm(NewDstSubIdx);
-    } else {
-      // Insert a copy.
-      bool Emitted =
-        TII->copyRegToReg(*SomeMI->getParent(), InsertLoc, DstReg, SrcReg,
-                          MRI->getRegClass(DstReg), MRI->getRegClass(SrcReg),
-                          SomeMI->getDebugLoc());
-      (void)Emitted;
-    }
-    MachineBasicBlock::iterator CopyMI = prior(InsertLoc);
+    MachineInstr *CopyMI = BuildMI(*SomeMI->getParent(), SomeMI,
+                                   SomeMI->getDebugLoc(),
+                                   TII->get(TargetOpcode::COPY))
+      .addReg(DstReg, RegState::Define, NewDstSubIdx)
+      .addReg(SrcReg, 0, NewSrcSubIdx);
 
     // Remove all the old extract instructions.
     for (MachineRegisterInfo::use_nodbg_iterator
@@ -1387,11 +1371,10 @@
       ++UI;
       if (UseMI == CopyMI)
         continue;
-      assert(UseMI->isExtractSubreg());
+      assert(UseMI->isCopy());
       // Move any kills to the new copy or extract instruction.
       if (UseMI->getOperand(1).isKill()) {
-        MachineOperand *KillMO = CopyMI->findRegisterUseOperand(SrcReg);
-        KillMO->setIsKill();
+        CopyMI->getOperand(1).setIsKill();
         if (LV)
           // Update live variables
           LV->replaceKillInstruction(SrcReg, UseMI, &*CopyMI);
@@ -1452,9 +1435,8 @@
       }
       IsImpDef = false;
 
-      // Remember EXTRACT_SUBREG sources. These might be candidate for
-      // coalescing.
-      if (DefMI->isExtractSubreg())
+      // Remember COPY sources. These might be candidate for coalescing.
+      if (DefMI->isCopy())
         RealSrcs.push_back(DefMI->getOperand(1).getReg());
 
       if (!Seen.insert(SrcReg) ||

Modified: llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp Sat Jul 10 00:06:30 2010
@@ -1409,25 +1409,25 @@
     if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
       assert(NewMIs.size() == 1);
       MachineInstr *NewMI = NewMIs.back();
+      MBB->insert(MII, NewMI);
       NewMIs.clear();
       int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
       assert(Idx != -1);
       SmallVector<unsigned, 1> Ops;
       Ops.push_back(Idx);
-      MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
+      MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
+      NewMI->eraseFromParent();
       if (FoldedMI) {
         VRM->addSpillSlotUse(SS, FoldedMI);
         if (!VRM->hasPhys(UnfoldVR))
           VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
         VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
-        MII = MBB->insert(MII, FoldedMI);
+        MII = FoldedMI;
         InvalidateKills(MI, TRI, RegKills, KillOps);
         VRM->RemoveMachineInstrFromMaps(&MI);
         MBB->erase(&MI);
-        MF.DeleteMachineInstr(NewMI);
         return true;
       }
-      MF.DeleteMachineInstr(NewMI);
     }
   }
 
@@ -1479,7 +1479,6 @@
   if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
     return false;
 
-  MachineFunction &MF = *MBB->getParent();
   MachineInstr &MI = *MII;
   MachineBasicBlock::iterator DefMII = prior(MII);
   MachineInstr *DefMI = DefMII;
@@ -1510,11 +1509,12 @@
     MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
     if (!CommutedMI)
       return false;
+    MBB->insert(MII, CommutedMI);
     SmallVector<unsigned, 1> Ops;
     Ops.push_back(NewDstIdx);
-    MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
+    MachineInstr *FoldedMI = TII->foldMemoryOperand(CommutedMI, Ops, SS);
     // Not needed since foldMemoryOperand returns new MI.
-    MF.DeleteMachineInstr(CommutedMI);
+    CommutedMI->eraseFromParent();
     if (!FoldedMI)
       return false;
 
@@ -1527,7 +1527,7 @@
     MachineInstr *StoreMI = MII;
     VRM->addSpillSlotUse(SS, StoreMI);
     VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
-    MII = MBB->insert(MII, FoldedMI);  // Update MII to backtrack.
+    MII = FoldedMI;  // Update MII to backtrack.
 
     // Delete all 3 old instructions.
     InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
@@ -2012,7 +2012,7 @@
       //       = EXTRACT_SUBREG fi#1
       // fi#1 is available in EDI, but it cannot be reused because it's not in
       // the right register file.
-      if (PhysReg && !AvoidReload && (SubIdx || MI.isExtractSubreg())) {
+      if (PhysReg && !AvoidReload && SubIdx) {
         const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
         if (!RC->contains(PhysReg))
           PhysReg = 0;
@@ -2443,6 +2443,24 @@
         // Also check if it's copying from an "undef", if so, we can't
         // eliminate this or else the undef marker is lost and it will
         // confuses the scavenger. This is extremely rare.
+        if (MI.isIdentityCopy() && !MI.getOperand(1).isUndef() &&
+            MI.getNumOperands() == 2) {
+          ++NumDCE;
+          DEBUG(dbgs() << "Removing now-noop copy: " << MI);
+          SmallVector<unsigned, 2> KillRegs;
+          InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
+          if (MO.isDead() && !KillRegs.empty()) {
+            // Source register or an implicit super/sub-register use is killed.
+            assert(TRI->regsOverlap(KillRegs[0], MI.getOperand(0).getReg()));
+            // Last def is now dead.
+            TransferDeadness(MI.getOperand(1).getReg(), RegKills, KillOps);
+          }
+          VRM->RemoveMachineInstrFromMaps(&MI);
+          MBB->erase(&MI);
+          Erased = true;
+          Spills.disallowClobberPhysReg(VirtReg);
+          goto ProcessNextInst;
+        }
         unsigned Src, Dst, SrcSR, DstSR;
         if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) &&
             Src == Dst && SrcSR == DstSR &&
@@ -2532,6 +2550,16 @@
 
         // Check to see if this is a noop copy.  If so, eliminate the
         // instruction before considering the dest reg to be changed.
+        if (MI.isIdentityCopy()) {
+          ++NumDCE;
+          DEBUG(dbgs() << "Removing now-noop copy: " << MI);
+          InvalidateKills(MI, TRI, RegKills, KillOps);
+          VRM->RemoveMachineInstrFromMaps(&MI);
+          MBB->erase(&MI);
+          Erased = true;
+          UpdateKills(*LastStore, TRI, RegKills, KillOps);
+          goto ProcessNextInst;
+        }
         {
           unsigned Src, Dst, SrcSR, DstSR;
           if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) &&

Modified: llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp Sat Jul 10 00:06:30 2010
@@ -288,6 +288,7 @@
   case MCSA_WeakDefinition: OS << "\t.weak_definition\t"; break;
       // .weak_reference
   case MCSA_WeakReference:  OS << MAI.getWeakRefDirective(); break;
+  case MCSA_WeakDefAutoPrivate: OS << "\t.weak_def_can_be_hidden\t"; break;
   }
 
   OS << *Symbol;

Modified: llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp Sat Jul 10 00:06:30 2010
@@ -273,6 +273,10 @@
     // it has to be in a coalesced section, but this isn't enforced.
     SD.setFlags(SD.getFlags() | SF_WeakDefinition);
     break;
+
+  case MCSA_WeakDefAutoPrivate:
+    SD.setFlags(SD.getFlags() | SF_WeakDefinition | SF_WeakReference);
+    break;
   }
 }
 

Modified: llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp Sat Jul 10 00:06:30 2010
@@ -755,6 +755,8 @@
       return ParseDirectiveSymbolAttribute(MCSA_WeakDefinition);
     if (IDVal == ".weak_reference")
       return ParseDirectiveSymbolAttribute(MCSA_WeakReference);
+    if (IDVal == ".weak_def_can_be_hidden")
+      return ParseDirectiveSymbolAttribute(MCSA_WeakDefAutoPrivate);
 
     if (IDVal == ".comm")
       return ParseDirectiveComm(/*IsLocal=*/false);

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp Sat Jul 10 00:06:30 2010
@@ -848,7 +848,7 @@
     if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
       // FIXME: It's possible to only store part of the QQ register if the
       // spilled def has a sub-register index.
-      MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VST2q32))
+      MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VST1d64Q))
         .addFrameIndex(FI).addImm(16);
       MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
       MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
@@ -941,7 +941,7 @@
   case ARM::QQPRRegClassID:
   case ARM::QQPR_VFP2RegClassID:
     if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
-      MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLD2q32));
+      MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLD1d64Q));
       MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
       MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
       MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
@@ -1204,7 +1204,7 @@
 
   // FIXME: VMOVQQ and VMOVQQQQ?
 
-  return false;
+  return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
 }
 
 /// Create a copy of a const pool value. Update CPI to the new index and return

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp Sat Jul 10 00:06:30 2010
@@ -724,6 +724,7 @@
   SmallVector<unsigned, 4> UnspilledCS1GPRs;
   SmallVector<unsigned, 4> UnspilledCS2GPRs;
   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+  MachineFrameInfo *MFI = MF.getFrameInfo();
 
   // Spill R4 if Thumb2 function requires stack realignment - it will be used as
   // scratch register.
@@ -820,9 +821,16 @@
   // offset, make sure a register (or a spill slot) is available for the
   // register scavenger. Note that if we're indexing off the frame pointer, the
   // effective stack size is 4 bytes larger since the FP points to the stack
-  // slot of the previous FP.
+  // slot of the previous FP. Also, if we have variable sized objects in the
+  // function, stack slot references will often be negative, and some of
+  // our instructions are positive-offset only, so conservatively consider
+  // that case to want a spill slot (or register) as well.
+  // FIXME: We could add logic to be more precise about negative offsets
+  //        and which instructions will need a scratch register for them. Is it
+  //        worth the effort and added fragility?
   bool BigStack = RS &&
-    estimateStackSize(MF) + (hasFP(MF) ? 4 : 0) >= estimateRSStackSizeLimit(MF);
+    (estimateStackSize(MF) + (hasFP(MF) ? 4:0) >= estimateRSStackSizeLimit(MF))
+    || MFI->hasVarSizedObjects();
 
   bool ExtraCSSpill = false;
   if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
@@ -915,7 +923,6 @@
         // note: Thumb1 functions spill to R12, not the stack.  Reserve a slot
         // closest to SP or frame pointer.
         const TargetRegisterClass *RC = ARM::GPRRegisterClass;
-        MachineFrameInfo *MFI = MF.getFrameInfo();
         RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
                                                            RC->getAlignment(),
                                                            false));

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp Sat Jul 10 00:06:30 2010
@@ -2295,7 +2295,7 @@
 ARMTargetLowering::getVFPCmp(SDValue &LHS, SDValue &RHS, ISD::CondCode CC,
                              SDValue &ARMCC, SelectionDAG &DAG,
                              DebugLoc dl) const {
-  if (UnsafeFPMath &&
+  if (UnsafeFPMath && FiniteOnlyFPMath() &&
       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
        CC == ISD::SETNE || CC == ISD::SETUNE) &&
       canBitcastToInt(LHS.getNode()) && canBitcastToInt(RHS.getNode())) {

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td Sat Jul 10 00:06:30 2010
@@ -153,11 +153,11 @@
 
 // Pseudo 256-bit registers to represent pairs of Q registers. These should
 // never be present in the emitted code.
-// These are used for NEON load / store instructions, e.g. vld4, vst3.
-// NOTE: It's possible to define more QQ registers since technical the
-// starting D register number doesn't have to be multiple of 4. e.g. 
-// D1, D2, D3, D4 would be a legal quad. But that would make the sub-register
-// stuffs very messy.
+// These are used for NEON load / store instructions, e.g., vld4, vst3.
+// NOTE: It's possible to define more QQ registers since technically the
+// starting D register number doesn't have to be multiple of 4, e.g.,
+// D1, D2, D3, D4 would be a legal quad, but that would make the subregister
+// stuff very messy.
 let SubRegIndices = [qsub_0, qsub_1] in {
 let CompositeIndices = [(dsub_2 qsub_1, dsub_0), (dsub_3 qsub_1, dsub_1),
                         (ssub_4 qsub_1, ssub_0), (ssub_5 qsub_1, ssub_1),

Modified: llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp Sat Jul 10 00:06:30 2010
@@ -307,7 +307,7 @@
       unsigned DRegLo = TM.getRegisterInfo()->getSubReg(Reg, ARM::dsub_0);
       unsigned DRegHi = TM.getRegisterInfo()->getSubReg(Reg, ARM::dsub_1);
       O << '{'
-        << getRegisterName(DRegLo) << ',' << getRegisterName(DRegHi)
+        << getRegisterName(DRegLo) << ", " << getRegisterName(DRegHi)
         << '}';
     } else if (Modifier && strcmp(Modifier, "lane") == 0) {
       unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);

Modified: llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp Sat Jul 10 00:06:30 2010
@@ -407,7 +407,7 @@
            "expected a virtual register");
     // Extracting from a Q or QQ register.
     MachineInstr *DefMI = MRI->getVRegDef(VirtReg);
-    if (!DefMI || !DefMI->isExtractSubreg())
+    if (!DefMI || !DefMI->isCopy() || !DefMI->getOperand(1).getSubReg())
       return false;
     VirtReg = DefMI->getOperand(1).getReg();
     if (LastSrcReg && LastSrcReg != VirtReg)
@@ -418,7 +418,7 @@
         RC != ARM::QQPRRegisterClass &&
         RC != ARM::QQQQPRRegisterClass)
       return false;
-    unsigned SubIdx = DefMI->getOperand(2).getImm();
+    unsigned SubIdx = DefMI->getOperand(1).getSubReg();
     if (LastSubIdx) {
       if (LastSubIdx != SubIdx-Stride)
         return false;
@@ -445,7 +445,7 @@
     MachineOperand &MO = MI->getOperand(FirstOpnd + R);
     unsigned OldReg = MO.getReg();
     MachineInstr *DefMI = MRI->getVRegDef(OldReg);
-    assert(DefMI->isExtractSubreg());
+    assert(DefMI->isCopy());
     MO.setReg(LastSrcReg);
     MO.setSubReg(SubIds[R]);
     MO.setIsKill(false);
@@ -468,40 +468,7 @@
       continue;
     if (FormsRegSequence(MI, FirstOpnd, NumRegs, Offset, Stride))
       continue;
-
-    MachineBasicBlock::iterator NextI = llvm::next(MBBI);
-    for (unsigned R = 0; R < NumRegs; ++R) {
-      MachineOperand &MO = MI->getOperand(FirstOpnd + R);
-      assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
-      unsigned VirtReg = MO.getReg();
-      assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
-             "expected a virtual register");
-
-      // For now, just assign a fixed set of adjacent registers.
-      // This leaves plenty of room for future improvements.
-      static const unsigned NEONDRegs[] = {
-        ARM::D0, ARM::D1, ARM::D2, ARM::D3,
-        ARM::D4, ARM::D5, ARM::D6, ARM::D7
-      };
-      MO.setReg(NEONDRegs[Offset + R * Stride]);
-
-      if (MO.isUse()) {
-        // Insert a copy from VirtReg.
-        TII->copyRegToReg(MBB, MBBI, MO.getReg(), VirtReg,
-                          ARM::DPRRegisterClass, ARM::DPRRegisterClass,
-                          DebugLoc());
-        if (MO.isKill()) {
-          MachineInstr *CopyMI = prior(MBBI);
-          CopyMI->findRegisterUseOperand(VirtReg)->setIsKill();
-        }
-        MO.setIsKill();
-      } else if (MO.isDef() && !MO.isDead()) {
-        // Add a copy to VirtReg.
-        TII->copyRegToReg(MBB, NextI, VirtReg, MO.getReg(),
-                          ARM::DPRRegisterClass, ARM::DPRRegisterClass,
-                          DebugLoc());
-      }
-    }
+    llvm_unreachable("expected a REG_SEQUENCE");
   }
 
   return Modified;

Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUCallingConv.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUCallingConv.td?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUCallingConv.td (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUCallingConv.td Sat Jul 10 00:06:30 2010
@@ -34,76 +34,19 @@
 
 //===----------------------------------------------------------------------===//
 // CellSPU Argument Calling Conventions
-// (note: this isn't used, but presumably should be at some point when other
-//  targets do.)
 //===----------------------------------------------------------------------===//
-/*
-def CC_SPU : CallingConv<[
-  CCIfType<[i8],  CCAssignToReg<[R3,   R4,  R5,  R6,  R7,  R8,  R9, R10, R11,
-                                 R12, R13, R14, R15, R16, R17, R18, R19, R20,
-                                 R21, R22, R23, R24, R25, R26, R27, R28, R29,
-                                 R30, R31, R32, R33, R34, R35, R36, R37, R38,
-                                 R39, R40, R41, R42, R43, R44, R45, R46, R47,
-                                 R48, R49, R50, R51, R52, R53, R54, R55, R56,
-                                 R57, R58, R59, R60, R61, R62, R63, R64, R65,
-                                 R66, R67, R68, R69, R70, R71, R72, R73, R74,
-                                 R75, R76, R77, R78, R79]>>,
-  CCIfType<[i16], CCAssignToReg<[R3,   R4,  R5,  R6,  R7,  R8,  R9, R10, R11,
-                                 R12, R13, R14, R15, R16, R17, R18, R19, R20,
-                                 R21, R22, R23, R24, R25, R26, R27, R28, R29,
-                                 R30, R31, R32, R33, R34, R35, R36, R37, R38,
-                                 R39, R40, R41, R42, R43, R44, R45, R46, R47,
-                                 R48, R49, R50, R51, R52, R53, R54, R55, R56,
-                                 R57, R58, R59, R60, R61, R62, R63, R64, R65,
-                                 R66, R67, R68, R69, R70, R71, R72, R73, R74,
-                                 R75, R76, R77, R78, R79]>>,
-  CCIfType<[i32], CCAssignToReg<[R3,   R4,  R5,  R6,  R7,  R8,  R9, R10, R11,
-                                 R12, R13, R14, R15, R16, R17, R18, R19, R20,
-                                 R21, R22, R23, R24, R25, R26, R27, R28, R29,
-                                 R30, R31, R32, R33, R34, R35, R36, R37, R38,
-                                 R39, R40, R41, R42, R43, R44, R45, R46, R47,
-                                 R48, R49, R50, R51, R52, R53, R54, R55, R56,
-                                 R57, R58, R59, R60, R61, R62, R63, R64, R65,
-                                 R66, R67, R68, R69, R70, R71, R72, R73, R74,
-                                 R75, R76, R77, R78, R79]>>,
-  CCIfType<[f32], CCAssignToReg<[R3,   R4,  R5,  R6,  R7,  R8,  R9, R10, R11,
-                                 R12, R13, R14, R15, R16, R17, R18, R19, R20,
-                                 R21, R22, R23, R24, R25, R26, R27, R28, R29,
-                                 R30, R31, R32, R33, R34, R35, R36, R37, R38,
-                                 R39, R40, R41, R42, R43, R44, R45, R46, R47,
-                                 R48, R49, R50, R51, R52, R53, R54, R55, R56,
-                                 R57, R58, R59, R60, R61, R62, R63, R64, R65,
-                                 R66, R67, R68, R69, R70, R71, R72, R73, R74,
-                                 R75, R76, R77, R78, R79]>>,
-  CCIfType<[i64], CCAssignToReg<[R3,   R4,  R5,  R6,  R7,  R8,  R9, R10, R11,
-                                 R12, R13, R14, R15, R16, R17, R18, R19, R20,
-                                 R21, R22, R23, R24, R25, R26, R27, R28, R29,
-                                 R30, R31, R32, R33, R34, R35, R36, R37, R38,
-                                 R39, R40, R41, R42, R43, R44, R45, R46, R47,
-                                 R48, R49, R50, R51, R52, R53, R54, R55, R56,
-                                 R57, R58, R59, R60, R61, R62, R63, R64, R65,
-                                 R66, R67, R68, R69, R70, R71, R72, R73, R74,
-                                 R75, R76, R77, R78, R79]>>,
-  CCIfType<[f64], CCAssignToReg<[R3,   R4,  R5,  R6,  R7,  R8,  R9, R10, R11,
-                                 R12, R13, R14, R15, R16, R17, R18, R19, R20,
-                                 R21, R22, R23, R24, R25, R26, R27, R28, R29,
-                                 R30, R31, R32, R33, R34, R35, R36, R37, R38,
-                                 R39, R40, R41, R42, R43, R44, R45, R46, R47,
-                                 R48, R49, R50, R51, R52, R53, R54, R55, R56,
-                                 R57, R58, R59, R60, R61, R62, R63, R64, R65,
-                                 R66, R67, R68, R69, R70, R71, R72, R73, R74,
-                                 R75, R76, R77, R78, R79]>>,
-  CCIfType<[v16i8, v8i16, v4i32, v4f32, v2i64, v2f64],
-                  CCAssignToReg<[R3,   R4,  R5,  R6,  R7,  R8,  R9, R10, R11,
-                                 R12, R13, R14, R15, R16, R17, R18, R19, R20,
-                                 R21, R22, R23, R24, R25, R26, R27, R28, R29,
-                                 R30, R31, R32, R33, R34, R35, R36, R37, R38,
-                                 R39, R40, R41, R42, R43, R44, R45, R46, R47,
-                                 R48, R49, R50, R51, R52, R53, R54, R55, R56,
-                                 R57, R58, R59, R60, R61, R62, R63, R64, R65,
-                                 R66, R67, R68, R69, R70, R71, R72, R73, R74,
-                                 R75, R76, R77, R78, R79]>>,
-  
+def CCC_SPU : CallingConv<[
+  CCIfType<[i8, i16, i32, i64, i128, f32, f64, 
+            v16i8, v8i16, v4i32, v4f32, v2i64, v2f64],
+            CCAssignToReg<[R3,   R4,  R5,  R6,  R7,  R8,  R9, R10, R11,
+                           R12, R13, R14, R15, R16, R17, R18, R19, R20,
+                           R21, R22, R23, R24, R25, R26, R27, R28, R29,
+                           R30, R31, R32, R33, R34, R35, R36, R37, R38,
+                           R39, R40, R41, R42, R43, R44, R45, R46, R47,
+                           R48, R49, R50, R51, R52, R53, R54, R55, R56,
+                           R57, R58, R59, R60, R61, R62, R63, R64, R65,
+                           R66, R67, R68, R69, R70, R71, R72, R73, R74,
+                           R75, R76, R77, R78, R79]>>,
   // Integer/FP values get stored in stack slots that are 8 bytes in size and
   // 8-byte aligned if there are no more registers to hold them.
   CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
@@ -112,4 +55,3 @@
   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
               CCAssignToStack<16, 16>>
 ]>;
-*/

Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp Sat Jul 10 00:06:30 2010
@@ -1014,22 +1014,26 @@
   MachineRegisterInfo &RegInfo = MF.getRegInfo();
   SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
 
-  const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
-  const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
-
   unsigned ArgOffset = SPUFrameInfo::minStackSize();
   unsigned ArgRegIdx = 0;
   unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
 
   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
 
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
+                 *DAG.getContext());
+  // FIXME: allow for other calling conventions
+  CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
+
   // Add DAG nodes to load the arguments or copy them out of registers.
   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
     EVT ObjectVT = Ins[ArgNo].VT;
     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
     SDValue ArgVal;
+    CCValAssign &VA = ArgLocs[ArgNo];
 
-    if (ArgRegIdx < NumArgRegs) {
+    if (VA.isRegLoc()) {
       const TargetRegisterClass *ArgRegClass;
 
       switch (ObjectVT.getSimpleVT().SimpleTy) {
@@ -1068,7 +1072,7 @@
       }
 
       unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
-      RegInfo.addLiveIn(ArgRegs[ArgRegIdx], VReg);
+      RegInfo.addLiveIn(VA.getLocReg(), VReg);
       ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
       ++ArgRegIdx;
     } else {
@@ -1088,12 +1092,28 @@
 
   // vararg handling:
   if (isVarArg) {
-    // unsigned int ptr_size = PtrVT.getSizeInBits() / 8;
+    // FIXME: we should be able to query the argument registers from 
+    //        tablegen generated code. 
+    static const unsigned ArgRegs[] = {
+      SPU::R3,  SPU::R4,  SPU::R5,  SPU::R6,  SPU::R7,  SPU::R8,  SPU::R9,
+      SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
+      SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
+      SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
+      SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
+      SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
+      SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
+      SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
+      SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
+      SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
+      SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
+    };
+    // size of ArgRegs array
+    unsigned NumArgRegs = 77;
+
     // We will spill (79-3)+1 registers to the stack
     SmallVector<SDValue, 79-3+1> MemOps;
 
     // Create the frame slot
-
     for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
       FuncInfo->setVarArgsFrameIndex(
         MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
@@ -1145,8 +1165,15 @@
   const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
   unsigned NumOps     = Outs.size();
   unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
-  const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
-  const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
+
+  SmallVector<CCValAssign, 16> ArgLocs;
+  CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
+                 *DAG.getContext()); 
+  // FIXME: allow for other calling conventions
+  CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
+  
+  const unsigned NumArgRegs = ArgLocs.size();
+
 
   // Handy pointer type
   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@@ -1166,8 +1193,9 @@
   // And the arguments passed on the stack
   SmallVector<SDValue, 8> MemOpChains;
 
-  for (unsigned i = 0; i != NumOps; ++i) {
-    SDValue Arg = OutVals[i];
+  for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
+    SDValue Arg = OutVals[ArgRegIdx];
+    CCValAssign &VA = ArgLocs[ArgRegIdx];
 
     // PtrOff will be used to store the current argument to the stack if a
     // register cannot be found for it.
@@ -1190,7 +1218,7 @@
     case MVT::v8i16:
     case MVT::v16i8:
       if (ArgRegIdx != NumArgRegs) {
-        RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
+        RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
       } else {
         MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
                                            false, false, 0));

Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.cpp Sat Jul 10 00:06:30 2010
@@ -191,33 +191,6 @@
 {
 }
 
-// SPU's 128-bit registers used for argument passing:
-static const unsigned SPU_ArgRegs[] = {
-  SPU::R3,  SPU::R4,  SPU::R5,  SPU::R6,  SPU::R7,  SPU::R8,  SPU::R9,
-  SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
-  SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
-  SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
-  SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
-  SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
-  SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
-  SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
-  SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
-  SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
-  SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
-};
-
-const unsigned *
-SPURegisterInfo::getArgRegs()
-{
-  return SPU_ArgRegs;
-}
-
-unsigned
-SPURegisterInfo::getNumArgRegs()
-{
-  return sizeof(SPU_ArgRegs) / sizeof(SPU_ArgRegs[0]);
-}
-
 /// getPointerRegClass - Return the register class to use to hold pointers.
 /// This is used for addressing modes.
 const TargetRegisterClass *

Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPURegisterInfo.h Sat Jul 10 00:06:30 2010
@@ -86,15 +86,6 @@
     // New methods added:
     //------------------------------------------------------------------------
 
-    //! Return the array of argument passing registers
-    /*!
-      \note The size of this array is returned by getArgRegsSize().
-     */
-    static const unsigned *getArgRegs();
-
-    //! Return the size of the argument passing register array
-    static unsigned getNumArgRegs();
-
     //! Get DWARF debugging register number
     int getDwarfRegNum(unsigned RegNum, bool isEH) const;
 

Modified: llvm/branches/wendling/eh/lib/Target/README.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/README.txt?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/README.txt (original)
+++ llvm/branches/wendling/eh/lib/Target/README.txt Sat Jul 10 00:06:30 2010
@@ -906,17 +906,6 @@
 
 //===---------------------------------------------------------------------===//
 
-From GCC Bug 3756:
-int
-pn (int n)
-{
- return (n >= 0 ? 1 : -1);
-}
-Should combine to (n >> 31) | 1.  Currently not optimized with "clang
--emit-llvm-bc | opt -std-compile-opts | llc".
-
-//===---------------------------------------------------------------------===//
-
 void a(int variable)
 {
  if (variable == 4 || variable == 6)

Modified: llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp Sat Jul 10 00:06:30 2010
@@ -85,11 +85,18 @@
   }
 }
 
-void X86ATTInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
-                                             raw_ostream &O) {
+void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
+                                          raw_ostream &O) {
   const MCOperand &BaseReg  = MI->getOperand(Op);
   const MCOperand &IndexReg = MI->getOperand(Op+2);
   const MCOperand &DispSpec = MI->getOperand(Op+3);
+  const MCOperand &SegReg = MI->getOperand(Op+4);
+  
+  // If this has a segment register, print it.
+  if (SegReg.getReg()) {
+    printOperand(MI, Op+4, O);
+    O << ':';
+  }
   
   if (DispSpec.isImm()) {
     int64_t DispVal = DispSpec.getImm();
@@ -115,13 +122,3 @@
     O << ')';
   }
 }
-
-void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
-                                          raw_ostream &O) {
-  // If this has a segment register, print it.
-  if (MI->getOperand(Op+4).getReg()) {
-    printOperand(MI, Op+4, O);
-    O << ':';
-  }
-  printLeaMemReference(MI, Op, O);
-}

Modified: llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h Sat Jul 10 00:06:30 2010
@@ -34,7 +34,6 @@
 
   void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
   void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
-  void printLeaMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
   void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &OS);
   void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
   
@@ -69,14 +68,8 @@
   void printf128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
     printMemReference(MI, OpNo, O);
   }
-  void printlea32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
-    printLeaMemReference(MI, OpNo, O);
-  }
-  void printlea64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
-    printLeaMemReference(MI, OpNo, O);
-  }
-  void printlea64_32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
-    printLeaMemReference(MI, OpNo, O);
+  void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+    printMemReference(MI, OpNo, O);
   }
 };
   

Modified: llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp Sat Jul 10 00:06:30 2010
@@ -81,12 +81,19 @@
   }
 }
 
-void X86IntelInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
-                                               raw_ostream &O) {
+void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
+                                            raw_ostream &O) {
   const MCOperand &BaseReg  = MI->getOperand(Op);
   unsigned ScaleVal         = MI->getOperand(Op+1).getImm();
   const MCOperand &IndexReg = MI->getOperand(Op+2);
   const MCOperand &DispSpec = MI->getOperand(Op+3);
+  const MCOperand &SegReg   = MI->getOperand(Op+4);
+  
+  // If this has a segment register, print it.
+  if (SegReg.getReg()) {
+    printOperand(MI, Op+4, O);
+    O << ':';
+  }
   
   O << '[';
   
@@ -104,7 +111,7 @@
     NeedPlus = true;
   }
   
- 
+  
   if (!DispSpec.isImm()) {
     if (NeedPlus) O << " + ";
     assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
@@ -126,13 +133,3 @@
   
   O << ']';
 }
-
-void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
-                                            raw_ostream &O) {
-  // If this has a segment register, print it.
-  if (MI->getOperand(Op+4).getReg()) {
-    printOperand(MI, Op+4, O);
-    O << ':';
-  }
-  printLeaMemReference(MI, Op, O);
-}

Modified: llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h Sat Jul 10 00:06:30 2010
@@ -36,7 +36,6 @@
 
   void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
   void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
-  void printLeaMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
   void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O);
   void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
   
@@ -81,17 +80,9 @@
     O << "XMMWORD PTR ";
     printMemReference(MI, OpNo, O);
   }
-  void printlea32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
-    O << "DWORD PTR ";
-    printLeaMemReference(MI, OpNo, O);
-  }
-  void printlea64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
-    O << "QWORD PTR ";
-    printLeaMemReference(MI, OpNo, O);
-  }
-  void printlea64_32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
-    O << "QWORD PTR ";
-    printLeaMemReference(MI, OpNo, O);
+  void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+    O << "YMMWORD PTR ";
+    printMemReference(MI, OpNo, O);
   }
 };
   

Modified: llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp Sat Jul 10 00:06:30 2010
@@ -349,6 +349,15 @@
   switch (OutMI.getOpcode()) {
   case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
     lower_lea64_32mem(&OutMI, 1);
+    // FALL THROUGH.
+  case X86::LEA64r:
+  case X86::LEA16r:
+  case X86::LEA32r:
+    // LEA should have a segment register, but it must be empty.
+    assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&
+           "Unexpected # of LEA operands");
+    assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
+           "LEA has segment specified!");
     break;
   case X86::MOVZX16rr8:   LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
   case X86::MOVZX16rm8:   LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
@@ -386,10 +395,9 @@
     LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
     break;
 
-  // TAILJMPr, TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have
+  // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have
   // register inputs modeled as normal uses instead of implicit uses.  As such,
   // truncate off all but the first operand (the callee).  FIXME: Change isel.
-  case X86::TAILJMPr:
   case X86::TAILJMPr64:
   case X86::CALL64r:
   case X86::CALL64pcrel32: {
@@ -402,11 +410,20 @@
   }
 
   // TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions.
+  case X86::TAILJMPr:
   case X86::TAILJMPd:
   case X86::TAILJMPd64: {
+    unsigned Opcode;
+    switch (OutMI.getOpcode()) {
+    default: assert(0 && "Invalid opcode");
+    case X86::TAILJMPr: Opcode = X86::JMP32r; break;
+    case X86::TAILJMPd:
+    case X86::TAILJMPd64: Opcode = X86::JMP_1; break;
+    }
+    
     MCOperand Saved = OutMI.getOperand(0);
     OutMI = MCInst();
-    OutMI.setOpcode(X86::TAILJMP_1);
+    OutMI.setOpcode(Opcode);
     OutMI.addOperand(Saved);
     break;
   }
@@ -540,6 +557,13 @@
     }
     return;
 
+  case X86::TAILJMPr:
+  case X86::TAILJMPd:
+  case X86::TAILJMPd64:
+    // Lower these as normal, but add some comments.
+    OutStreamer.AddComment("TAILCALL");
+    break;
+      
   case X86::MOVPC32r: {
     MCInst TmpInst;
     // This is a pseudo op for a two instruction sequence with a label, which
@@ -605,7 +629,6 @@
   
   MCInst TmpInst;
   MCInstLowering.Lower(MI, TmpInst);
-  
   OutStreamer.EmitInstruction(TmpInst);
 }
 

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86AsmBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86AsmBackend.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86AsmBackend.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86AsmBackend.cpp Sat Jul 10 00:06:30 2010
@@ -77,7 +77,6 @@
   case X86::JG_1:  return X86::JG_4;
   case X86::JLE_1: return X86::JLE_4;
   case X86::JL_1:  return X86::JL_4;
-  case X86::TAILJMP_1:
   case X86::JMP_1: return X86::JMP_4;
   case X86::JNE_1: return X86::JNE_4;
   case X86::JNO_1: return X86::JNO_4;

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86CodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86CodeEmitter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86CodeEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86CodeEmitter.cpp Sat Jul 10 00:06:30 2010
@@ -730,9 +730,9 @@
   case X86II::MRMDestMem: {
     MCE.emitByte(BaseOpcode);
     emitMemModRMByte(MI, CurOp,
-                     getX86RegNum(MI.getOperand(CurOp + X86AddrNumOperands)
+                     getX86RegNum(MI.getOperand(CurOp + X86::AddrNumOperands)
                                   .getReg()));
-    CurOp +=  X86AddrNumOperands + 1;
+    CurOp +=  X86::AddrNumOperands + 1;
     if (CurOp != NumOps)
       emitConstant(MI.getOperand(CurOp++).getImm(),
                    X86II::getSizeOfImm(Desc->TSFlags));
@@ -750,13 +750,7 @@
     break;
 
   case X86II::MRMSrcMem: {
-    // FIXME: Maybe lea should have its own form?
-    int AddrOperands;
-    if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
-        Opcode == X86::LEA16r || Opcode == X86::LEA32r)
-      AddrOperands = X86AddrNumOperands - 1; // No segment register
-    else
-      AddrOperands = X86AddrNumOperands;
+    int AddrOperands = X86::AddrNumOperands;
 
     intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ?
       X86II::getSizeOfImm(Desc->TSFlags) : 0;
@@ -810,14 +804,14 @@
   case X86II::MRM2m: case X86II::MRM3m:
   case X86II::MRM4m: case X86II::MRM5m:
   case X86II::MRM6m: case X86II::MRM7m: {
-    intptr_t PCAdj = (CurOp + X86AddrNumOperands != NumOps) ?
-      (MI.getOperand(CurOp+X86AddrNumOperands).isImm() ? 
+    intptr_t PCAdj = (CurOp + X86::AddrNumOperands != NumOps) ?
+      (MI.getOperand(CurOp+X86::AddrNumOperands).isImm() ? 
           X86II::getSizeOfImm(Desc->TSFlags) : 4) : 0;
 
     MCE.emitByte(BaseOpcode);
     emitMemModRMByte(MI, CurOp, (Desc->TSFlags & X86II::FormMask)-X86II::MRM0m,
                      PCAdj);
-    CurOp += X86AddrNumOperands;
+    CurOp += X86::AddrNumOperands;
 
     if (CurOp == NumOps)
       break;

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp Sat Jul 10 00:06:30 2010
@@ -1039,11 +1039,10 @@
   TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC, DL);
 
   // The shift instruction uses X86::CL. If we defined a super-register
-  // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what
-  // we're doing here.
+  // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
   if (CReg != X86::CL)
-    BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL)
-      .addReg(CReg).addImm(X86::sub_8bit);
+    BuildMI(MBB, DL, TII.get(TargetOpcode::KILL), X86::CL)
+      .addReg(CReg, RegState::Kill);
 
   unsigned ResultReg = createResultReg(RC);
   BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
@@ -1729,7 +1728,7 @@
       else
         Opc = X86::LEA64r;
       unsigned ResultReg = createResultReg(RC);
-      addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+      addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
       return ResultReg;
     }
     return 0;
@@ -1782,7 +1781,7 @@
   unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
   TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
   unsigned ResultReg = createResultReg(RC);
-  addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+  addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
   return ResultReg;
 }
 

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86FloatingPoint.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86FloatingPoint.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86FloatingPoint.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86FloatingPoint.cpp Sat Jul 10 00:06:30 2010
@@ -164,6 +164,8 @@
     void handleCompareFP(MachineBasicBlock::iterator &I);
     void handleCondMovFP(MachineBasicBlock::iterator &I);
     void handleSpecialFP(MachineBasicBlock::iterator &I);
+
+    bool translateCopy(MachineInstr*);
   };
   char FPS::ID = 0;
 }
@@ -237,7 +239,10 @@
     unsigned FPInstClass = Flags & X86II::FPTypeMask;
     if (MI->isInlineAsm())
       FPInstClass = X86II::SpecialFP;
-    
+
+    if (MI->isCopy() && translateCopy(MI))
+      FPInstClass = X86II::SpecialFP;
+
     if (FPInstClass == X86II::NotFP)
       continue;  // Efficiently ignore non-fp insts!
 
@@ -628,7 +633,7 @@
 void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
   MachineInstr *MI = I;
   unsigned NumOps = MI->getDesc().getNumOperands();
-  assert((NumOps == X86AddrNumOperands + 1 || NumOps == 1) &&
+  assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
          "Can only handle fst* & ftst instructions!");
 
   // Is this the last use of the source register?
@@ -1206,3 +1211,33 @@
   I = MBB->erase(I);  // Remove the pseudo instruction
   --I;
 }
+
+// Translate a COPY instruction to a pseudo-op that handleSpecialFP understands.
+bool FPS::translateCopy(MachineInstr *MI) {
+  unsigned DstReg = MI->getOperand(0).getReg();
+  unsigned SrcReg = MI->getOperand(1).getReg();
+
+  if (DstReg == X86::ST0) {
+    MI->setDesc(TII->get(X86::FpSET_ST0_80));
+    MI->RemoveOperand(0);
+    return true;
+  }
+  if (DstReg == X86::ST1) {
+    MI->setDesc(TII->get(X86::FpSET_ST1_80));
+    MI->RemoveOperand(0);
+    return true;
+  }
+  if (SrcReg == X86::ST0) {
+    MI->setDesc(TII->get(X86::FpGET_ST0_80));
+    return true;
+  }
+  if (SrcReg == X86::ST1) {
+    MI->setDesc(TII->get(X86::FpGET_ST1_80));
+    return true;
+  }
+  if (X86::RFP80RegClass.contains(DstReg, SrcReg)) {
+    MI->setDesc(TII->get(X86::MOV_Fp8080));
+    return true;
+  }
+  return false;
+}

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp Sat Jul 10 00:06:30 2010
@@ -190,9 +190,11 @@
                     SDValue &Scale, SDValue &Index, SDValue &Disp,
                     SDValue &Segment);
     bool SelectLEAAddr(SDNode *Op, SDValue N, SDValue &Base,
-                       SDValue &Scale, SDValue &Index, SDValue &Disp);
+                       SDValue &Scale, SDValue &Index, SDValue &Disp,
+                       SDValue &Segment);
     bool SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
-                       SDValue &Scale, SDValue &Index, SDValue &Disp);
+                           SDValue &Scale, SDValue &Index, SDValue &Disp,
+                           SDValue &Segment);
     bool SelectScalarSSELoad(SDNode *Root, SDValue N,
                              SDValue &Base, SDValue &Scale,
                              SDValue &Index, SDValue &Disp,
@@ -1205,7 +1207,8 @@
 /// mode it matches can be cost effectively emitted as an LEA instruction.
 bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
                                     SDValue &Base, SDValue &Scale,
-                                    SDValue &Index, SDValue &Disp) {
+                                    SDValue &Index, SDValue &Disp,
+                                    SDValue &Segment) {
   X86ISelAddressMode AM;
 
   // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
@@ -1259,7 +1262,6 @@
   if (Complexity <= 2)
     return false;
   
-  SDValue Segment;
   getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
   return true;
 }
@@ -1267,7 +1269,7 @@
 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
                                         SDValue &Scale, SDValue &Index,
-                                        SDValue &Disp) {
+                                        SDValue &Disp, SDValue &Segment) {
   assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
     
@@ -1284,7 +1286,6 @@
     AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
   }
   
-  SDValue Segment;
   getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
   return true;
 }

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp Sat Jul 10 00:06:30 2010
@@ -1347,17 +1347,34 @@
       report_fatal_error("SSE register return with SSE disabled");
     }
 
+    SDValue Val;
+
     // If this is a call to a function that returns an fp value on the floating
-    // point stack, but where we prefer to use the value in xmm registers, copy
-    // it out as F80 and use a truncate to move it from fp stack reg to xmm reg.
-    if ((VA.getLocReg() == X86::ST0 ||
-         VA.getLocReg() == X86::ST1) &&
-        isScalarFPTypeInSSEReg(VA.getValVT())) {
-      CopyVT = MVT::f80;
-    }
+    // point stack, we must guarantee the the value is popped from the stack, so
+    // a CopyFromReg is not good enough - the copy instruction may be eliminated
+    // if the return value is not used. We use the FpGET_ST0 instructions
+    // instead.
+    if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) {
+      // If we prefer to use the value in xmm registers, copy it out as f80 and
+      // use a truncate to move it from fp stack reg to xmm reg.
+      if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80;
+      bool isST0 = VA.getLocReg() == X86::ST0;
+      unsigned Opc = 0;
+      if (CopyVT == MVT::f32) Opc = isST0 ? X86::FpGET_ST0_32:X86::FpGET_ST1_32;
+      if (CopyVT == MVT::f64) Opc = isST0 ? X86::FpGET_ST0_64:X86::FpGET_ST1_64;
+      if (CopyVT == MVT::f80) Opc = isST0 ? X86::FpGET_ST0_80:X86::FpGET_ST1_80;
+      SDValue Ops[] = { Chain, InFlag };
+      Chain = SDValue(DAG.getMachineNode(Opc, dl, CopyVT, MVT::Other, MVT::Flag,
+                                         Ops, 2), 1);
+      Val = Chain.getValue(0);
 
-    SDValue Val;
-    if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) {
+      // Round the f80 to the right size, which also moves it to the appropriate
+      // xmm register.
+      if (CopyVT != VA.getValVT())
+        Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
+                          // This truncation won't change the value.
+                          DAG.getIntPtrConstant(1));
+    } else if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) {
       // For x86-64, MMX values are returned in XMM0 / XMM1 except for v1i64.
       if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
         Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
@@ -1377,15 +1394,6 @@
       Val = Chain.getValue(0);
     }
     InFlag = Chain.getValue(2);
-
-    if (CopyVT != VA.getValVT()) {
-      // Round the F80 the right size, which also moves to the appropriate xmm
-      // register.
-      Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
-                        // This truncation won't change the value.
-                        DAG.getIntPtrConstant(1));
-    }
-
     InVals.push_back(Val);
   }
 
@@ -6756,7 +6764,7 @@
   Store = DAG.getStore(Op.getOperand(0), dl,
                        DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
                                        MVT::i32),
-                       FIN, SV, 0, false, false, 0);
+                       FIN, SV, 4, false, false, 0);
   MemOps.push_back(Store);
 
   // Store ptr to overflow_arg_area
@@ -6764,7 +6772,7 @@
                     FIN, DAG.getIntPtrConstant(4));
   SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
                                     getPointerTy());
-  Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 0,
+  Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 8,
                        false, false, 0);
   MemOps.push_back(Store);
 
@@ -6773,7 +6781,7 @@
                     FIN, DAG.getIntPtrConstant(8));
   SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
                                     getPointerTy());
-  Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 0,
+  Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 16,
                        false, false, 0);
   MemOps.push_back(Store);
   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
@@ -8027,17 +8035,17 @@
   newMBB->addSuccessor(newMBB);
 
   // Insert instructions into newMBB based on incoming instruction
-  assert(bInstr->getNumOperands() < X86AddrNumOperands + 4 &&
+  assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
          "unexpected number of operands");
   DebugLoc dl = bInstr->getDebugLoc();
   MachineOperand& destOper = bInstr->getOperand(0);
-  MachineOperand* argOpers[2 + X86AddrNumOperands];
+  MachineOperand* argOpers[2 + X86::AddrNumOperands];
   int numArgs = bInstr->getNumOperands() - 1;
   for (int i=0; i < numArgs; ++i)
     argOpers[i] = &bInstr->getOperand(i+1);
 
   // x86 address has 4 operands: base, index, scale, and displacement
-  int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+  int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
   int valArgIndx = lastAddrIndx + 1;
 
   unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
@@ -8141,12 +8149,12 @@
   DebugLoc dl = bInstr->getDebugLoc();
   // Insert instructions into newMBB based on incoming instruction
   // There are 8 "real" operands plus 9 implicit def/uses, ignored here.
-  assert(bInstr->getNumOperands() < X86AddrNumOperands + 14 &&
+  assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 &&
          "unexpected number of operands");
   MachineOperand& dest1Oper = bInstr->getOperand(0);
   MachineOperand& dest2Oper = bInstr->getOperand(1);
-  MachineOperand* argOpers[2 + X86AddrNumOperands];
-  for (int i=0; i < 2 + X86AddrNumOperands; ++i) {
+  MachineOperand* argOpers[2 + X86::AddrNumOperands];
+  for (int i=0; i < 2 + X86::AddrNumOperands; ++i) {
     argOpers[i] = &bInstr->getOperand(i+2);
 
     // We use some of the operands multiple times, so conservatively just
@@ -8156,7 +8164,7 @@
   }
 
   // x86 address has 5 operands: base, index, scale, displacement, and segment.
-  int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+  int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
 
   unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
   MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1);
@@ -8295,16 +8303,16 @@
 
   DebugLoc dl = mInstr->getDebugLoc();
   // Insert instructions into newMBB based on incoming instruction
-  assert(mInstr->getNumOperands() < X86AddrNumOperands + 4 &&
+  assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
          "unexpected number of operands");
   MachineOperand& destOper = mInstr->getOperand(0);
-  MachineOperand* argOpers[2 + X86AddrNumOperands];
+  MachineOperand* argOpers[2 + X86::AddrNumOperands];
   int numArgs = mInstr->getNumOperands() - 1;
   for (int i=0; i < numArgs; ++i)
     argOpers[i] = &mInstr->getOperand(i+1);
 
   // x86 address has 4 operands: base, index, scale, and displacement
-  int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+  int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
   int valArgIndx = lastAddrIndx + 1;
 
   unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
@@ -8580,7 +8588,7 @@
                       MI->getOperand(3).getTargetFlags())
     .addReg(0);
     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
-    addDirectMem(MIB, X86::RDI).addReg(0);
+    addDirectMem(MIB, X86::RDI);
   } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
     MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
                                       TII->get(X86::MOV32rm), X86::EAX)
@@ -8590,7 +8598,7 @@
                       MI->getOperand(3).getTargetFlags())
     .addReg(0);
     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
-    addDirectMem(MIB, X86::EAX).addReg(0);
+    addDirectMem(MIB, X86::EAX);
   } else {
     MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
                                       TII->get(X86::MOV32rm), X86::EAX)
@@ -8600,7 +8608,7 @@
                       MI->getOperand(3).getTargetFlags())
     .addReg(0);
     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
-    addDirectMem(MIB, X86::EAX).addReg(0);
+    addDirectMem(MIB, X86::EAX);
   }
   
   MI->eraseFromParent(); // The pseudo instruction is gone now.
@@ -8705,7 +8713,7 @@
       AM.Disp = Op.getImm();
     }
     addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
-                      .addReg(MI->getOperand(X86AddrNumOperands).getReg());
+                      .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
 
     // Reload the original control word now.
     addFrameReference(BuildMI(*BB, MI, DL,

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td Sat Jul 10 00:06:30 2010
@@ -35,6 +35,14 @@
   let ParserMatchClass = ImmSExti64i8AsmOperand;
 }
 
+def lea64_32mem : Operand<i32> {
+  let PrintMethod = "printi32mem";
+  let AsmOperandLowerMethod = "lower_lea64_32mem";
+  let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
+  let ParserMatchClass = X86MemAsmOperand;
+}
+
+
 // Special i64mem for addresses of load folding tail calls. These are not
 // allowed to use callee-saved registers since they must be scheduled
 // after callee-saved register are popped.
@@ -44,27 +52,14 @@
   let ParserMatchClass = X86MemAsmOperand;
 }
 
-def lea64mem : Operand<i64> {
-  let PrintMethod = "printlea64mem";
-  let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm);
-  let ParserMatchClass = X86NoSegMemAsmOperand;
-}
-
-def lea64_32mem : Operand<i32> {
-  let PrintMethod = "printlea64_32mem";
-  let AsmOperandLowerMethod = "lower_lea64_32mem";
-  let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
-  let ParserMatchClass = X86NoSegMemAsmOperand;
-}
-
 //===----------------------------------------------------------------------===//
 // Complex Pattern Definitions.
 //
-def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
+def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
                         [add, sub, mul, X86mul_imm, shl, or, frameindex,
                          X86WrapperRIP], []>;
 
-def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr",
+def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
                                [tglobaltlsaddr], []>;
                                
 //===----------------------------------------------------------------------===//
@@ -289,7 +284,7 @@
                   [(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
 
 let isReMaterializable = 1 in
-def LEA64r   : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
+def LEA64r   : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
                   "lea{q}\t{$src|$dst}, {$dst|$src}",
                   [(set GR64:$dst, lea64addr:$src)]>;
 
@@ -1697,7 +1692,7 @@
             XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
             XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
     Uses = [RSP] in
-def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
+def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
                    ".byte\t0x66; "
                    "leaq\t$sym(%rip), %rdi; "
                    ".word\t0x6666; "

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrBuilder.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrBuilder.h (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrBuilder.h Sat Jul 10 00:06:30 2010
@@ -64,19 +64,15 @@
 ///
 static inline const MachineInstrBuilder &
 addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg) {
-  // Because memory references are always represented with four
-  // values, this adds: Reg, [1, NoReg, 0] to the instruction.
-  return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0);
+  // Because memory references are always represented with five
+  // values, this adds: Reg, 1, NoReg, 0, NoReg to the instruction.
+  return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0).addReg(0);
 }
 
-static inline const MachineInstrBuilder &
-addLeaOffset(const MachineInstrBuilder &MIB, int Offset) {
-  return MIB.addImm(1).addReg(0).addImm(Offset);
-}
 
 static inline const MachineInstrBuilder &
 addOffset(const MachineInstrBuilder &MIB, int Offset) {
-  return addLeaOffset(MIB, Offset).addReg(0);
+  return MIB.addImm(1).addReg(0).addImm(Offset).addReg(0);
 }
 
 /// addRegOffset - This function is used to add a memory reference of the form
@@ -89,25 +85,20 @@
   return addOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
 }
 
-static inline const MachineInstrBuilder &
-addLeaRegOffset(const MachineInstrBuilder &MIB,
-                unsigned Reg, bool isKill, int Offset) {
-  return addLeaOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
-}
-
 /// addRegReg - This function is used to add a memory reference of the form:
 /// [Reg + Reg].
 static inline const MachineInstrBuilder &addRegReg(const MachineInstrBuilder &MIB,
                                             unsigned Reg1, bool isKill1,
                                             unsigned Reg2, bool isKill2) {
   return MIB.addReg(Reg1, getKillRegState(isKill1)).addImm(1)
-    .addReg(Reg2, getKillRegState(isKill2)).addImm(0);
+    .addReg(Reg2, getKillRegState(isKill2)).addImm(0).addReg(0);
 }
 
 static inline const MachineInstrBuilder &
-addLeaAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM) {
-  assert (AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
-
+addFullAddress(const MachineInstrBuilder &MIB,
+               const X86AddressMode &AM) {
+  assert(AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
+  
   if (AM.BaseType == X86AddressMode::RegBase)
     MIB.addReg(AM.Base.Reg);
   else if (AM.BaseType == X86AddressMode::FrameIndexBase)
@@ -116,15 +107,11 @@
     assert (0);
   MIB.addImm(AM.Scale).addReg(AM.IndexReg);
   if (AM.GV)
-    return MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
+    MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
   else
-    return MIB.addImm(AM.Disp);
-}
-
-static inline const MachineInstrBuilder &
-addFullAddress(const MachineInstrBuilder &MIB,
-               const X86AddressMode &AM) {
-  return addLeaAddress(MIB, AM).addReg(0);
+    MIB.addImm(AM.Disp);
+    
+  return MIB.addReg(0);
 }
 
 /// addFrameReference - This function is used to add a reference to the base of

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp Sat Jul 10 00:06:30 2010
@@ -784,7 +784,9 @@
   case X86::MOV8rm:
   case X86::MOV16rm:
   case X86::MOV32rm:
+  case X86::MOV32rm_TC:
   case X86::MOV64rm:
+  case X86::MOV64rm_TC:
   case X86::LD_Fp64m:
   case X86::MOVSSrm:
   case X86::MOVSDrm:
@@ -805,7 +807,9 @@
   case X86::MOV8mr:
   case X86::MOV16mr:
   case X86::MOV32mr:
+  case X86::MOV32mr_TC:
   case X86::MOV64mr:
+  case X86::MOV64mr_TC:
   case X86::ST_FpP64m:
   case X86::MOVSSmr:
   case X86::MOVSDmr:
@@ -863,7 +867,7 @@
                                           int &FrameIndex) const {
   if (isFrameStoreOpcode(MI->getOpcode()))
     if (isFrameOperand(MI, 0, FrameIndex))
-      return MI->getOperand(X86AddrNumOperands).getReg();
+      return MI->getOperand(X86::AddrNumOperands).getReg();
   return 0;
 }
 
@@ -1145,10 +1149,9 @@
   // least on modern x86 machines).
   BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
   MachineInstr *InsMI =
-    BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg)
-    .addReg(leaInReg)
-    .addReg(Src, getKillRegState(isKill))
-    .addImm(X86::sub_16bit);
+    BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
+    .addReg(leaInReg, RegState::Define, X86::sub_16bit)
+    .addReg(Src, getKillRegState(isKill));
 
   MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
                                     get(Opc), leaOutReg);
@@ -1159,20 +1162,20 @@
   case X86::SHL16ri: {
     unsigned ShAmt = MI->getOperand(2).getImm();
     MIB.addReg(0).addImm(1 << ShAmt)
-       .addReg(leaInReg, RegState::Kill).addImm(0);
+       .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
     break;
   }
   case X86::INC16r:
   case X86::INC64_16r:
-    addLeaRegOffset(MIB, leaInReg, true, 1);
+    addRegOffset(MIB, leaInReg, true, 1);
     break;
   case X86::DEC16r:
   case X86::DEC64_16r:
-    addLeaRegOffset(MIB, leaInReg, true, -1);
+    addRegOffset(MIB, leaInReg, true, -1);
     break;
   case X86::ADD16ri:
   case X86::ADD16ri8:
-    addLeaRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());    
+    addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());    
     break;
   case X86::ADD16rr: {
     unsigned Src2 = MI->getOperand(2).getReg();
@@ -1189,10 +1192,9 @@
       // well be shifting and then extracting the lower 16-bits. 
       BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
       InsMI2 =
-        BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg2)
-        .addReg(leaInReg2)
-        .addReg(Src2, getKillRegState(isKill2))
-        .addImm(X86::sub_16bit);
+        BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
+        .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
+        .addReg(Src2, getKillRegState(isKill2));
       addRegReg(MIB, leaInReg, true, leaInReg2, true);
     }
     if (LV && isKill2 && InsMI2)
@@ -1203,10 +1205,9 @@
 
   MachineInstr *NewMI = MIB;
   MachineInstr *ExtMI =
-    BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::EXTRACT_SUBREG))
+    BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
     .addReg(Dest, RegState::Define | getDeadRegState(isDead))
-    .addReg(leaOutReg, RegState::Kill)
-    .addImm(X86::sub_16bit);
+    .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
 
   if (LV) {
     // Update live variables
@@ -1277,7 +1278,7 @@
       .addReg(Dest, RegState::Define | getDeadRegState(isDead))
       .addReg(0).addImm(1 << ShAmt)
       .addReg(Src, getKillRegState(isKill))
-      .addImm(0);
+      .addImm(0).addReg(0);
     break;
   }
   case X86::SHL32ri: {
@@ -1291,7 +1292,7 @@
     NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
       .addReg(Dest, RegState::Define | getDeadRegState(isDead))
       .addReg(0).addImm(1 << ShAmt)
-      .addReg(Src, getKillRegState(isKill)).addImm(0);
+      .addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0);
     break;
   }
   case X86::SHL16ri: {
@@ -1307,7 +1308,7 @@
       .addReg(Dest, RegState::Define | getDeadRegState(isDead))
       .addReg(0).addImm(1 << ShAmt)
       .addReg(Src, getKillRegState(isKill))
-      .addImm(0);
+      .addImm(0).addReg(0);
     break;
   }
   default: {
@@ -1325,7 +1326,7 @@
       assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
       unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
         : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
-      NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
                               .addReg(Dest, RegState::Define |
                                       getDeadRegState(isDead)),
                               Src, isKill, 1);
@@ -1347,7 +1348,7 @@
       assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
       unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
         : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
-      NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
                               .addReg(Dest, RegState::Define |
                                       getDeadRegState(isDead)),
                               Src, isKill, -1);
@@ -1395,7 +1396,7 @@
     case X86::ADD64ri32:
     case X86::ADD64ri8:
       assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
-      NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
+      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
                               .addReg(Dest, RegState::Define |
                                       getDeadRegState(isDead)),
                               Src, isKill, MI->getOperand(2).getImm());
@@ -1404,7 +1405,7 @@
     case X86::ADD32ri8: {
       assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
       unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
-      NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
                               .addReg(Dest, RegState::Define |
                                       getDeadRegState(isDead)),
                                 Src, isKill, MI->getOperand(2).getImm());
@@ -1415,7 +1416,7 @@
       if (DisableLEA16)
         return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
       assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
-      NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+      NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
                               .addReg(Dest, RegState::Define |
                                       getDeadRegState(isDead)),
                               Src, isKill, MI->getOperand(2).getImm());
@@ -2061,6 +2062,68 @@
   return false;
 }
 
+void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+                               MachineBasicBlock::iterator MI, DebugLoc DL,
+                               unsigned DestReg, unsigned SrcReg,
+                               bool KillSrc) const {
+  // First deal with the normal symmetric copies.
+  unsigned Opc = 0;
+  if (X86::GR64RegClass.contains(DestReg, SrcReg))
+    Opc = X86::MOV64rr;
+  else if (X86::GR32RegClass.contains(DestReg, SrcReg))
+    Opc = X86::MOV32rr;
+  else if (X86::GR16RegClass.contains(DestReg, SrcReg))
+    Opc = X86::MOV16rr;
+  else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
+    // Copying to or from a physical H register on x86-64 requires a NOREX
+    // move.  Otherwise use a normal move.
+    if ((isHReg(DestReg) || isHReg(SrcReg)) &&
+        TM.getSubtarget<X86Subtarget>().is64Bit())
+      Opc = X86::MOV8rr_NOREX;
+    else
+      Opc = X86::MOV8rr;
+  } else if (X86::VR128RegClass.contains(DestReg, SrcReg))
+    Opc = X86::MOVAPSrr;
+  else if (X86::VR64RegClass.contains(DestReg, SrcReg))
+    Opc = X86::MMX_MOVQ64rr;
+
+  if (Opc) {
+    BuildMI(MBB, MI, DL, get(Opc), DestReg)
+      .addReg(SrcReg, getKillRegState(KillSrc));
+    return;
+  }
+
+  // Moving EFLAGS to / from another register requires a push and a pop.
+  if (SrcReg == X86::EFLAGS) {
+    if (X86::GR64RegClass.contains(DestReg)) {
+      BuildMI(MBB, MI, DL, get(X86::PUSHF64));
+      BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
+      return;
+    } else if (X86::GR32RegClass.contains(DestReg)) {
+      BuildMI(MBB, MI, DL, get(X86::PUSHF32));
+      BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
+      return;
+    }
+  }
+  if (DestReg == X86::EFLAGS) {
+    if (X86::GR64RegClass.contains(SrcReg)) {
+      BuildMI(MBB, MI, DL, get(X86::PUSH64r))
+        .addReg(SrcReg, getKillRegState(KillSrc));
+      BuildMI(MBB, MI, DL, get(X86::POPF64));
+      return;
+    } else if (X86::GR32RegClass.contains(SrcReg)) {
+      BuildMI(MBB, MI, DL, get(X86::PUSH32r))
+        .addReg(SrcReg, getKillRegState(KillSrc));
+      BuildMI(MBB, MI, DL, get(X86::POPF32));
+      return;
+    }
+  }
+
+  DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
+               << " to " << RI.getName(DestReg) << '\n');
+  llvm_unreachable("Cannot emit physreg copy instruction");
+}
+
 static unsigned getLoadStoreRegOpcode(unsigned Reg,
                                       const TargetRegisterClass *RC,
                                       bool isStackAligned,
@@ -2439,7 +2502,7 @@
   }
   
   // No fusion 
-  if (PrintFailedFusing)
+  if (PrintFailedFusing && !MI->isCopy())
     dbgs() << "We failed to fuse operand " << i << " in " << *MI;
   return NULL;
 }
@@ -2557,7 +2620,7 @@
   } else if (Ops.size() != 1)
     return NULL;
 
-  SmallVector<MachineOperand,X86AddrNumOperands> MOs;
+  SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
   switch (LoadMI->getOpcode()) {
   case X86::V_SET0PS:
   case X86::V_SET0PD:
@@ -2611,7 +2674,7 @@
   default: {
     // Folding a normal load. Just copy the load's address operands.
     unsigned NumOps = LoadMI->getDesc().getNumOperands();
-    for (unsigned i = NumOps - X86AddrNumOperands; i != NumOps; ++i)
+    for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
       MOs.push_back(LoadMI->getOperand(i));
     break;
   }
@@ -2674,7 +2737,7 @@
     if (I != OpcodeTablePtr->end())
       return true;
   }
-  return false;
+  return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
 }
 
 bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
@@ -2705,13 +2768,13 @@
     // conservatively assume the address is unaligned. That's bad for
     // performance.
     return false;
-  SmallVector<MachineOperand, X86AddrNumOperands> AddrOps;
+  SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
   SmallVector<MachineOperand,2> BeforeOps;
   SmallVector<MachineOperand,2> AfterOps;
   SmallVector<MachineOperand,4> ImpOps;
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
     MachineOperand &Op = MI->getOperand(i);
-    if (i >= Index && i < Index + X86AddrNumOperands)
+    if (i >= Index && i < Index + X86::AddrNumOperands)
       AddrOps.push_back(Op);
     else if (Op.isReg() && Op.isImplicit())
       ImpOps.push_back(Op);
@@ -2730,7 +2793,7 @@
     loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
     if (UnfoldStore) {
       // Address operands cannot be marked isKill.
-      for (unsigned i = 1; i != 1 + X86AddrNumOperands; ++i) {
+      for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
         MachineOperand &MO = NewMIs[0]->getOperand(i);
         if (MO.isReg())
           MO.setIsKill(false);
@@ -2827,7 +2890,7 @@
   unsigned NumOps = N->getNumOperands();
   for (unsigned i = 0; i != NumOps-1; ++i) {
     SDValue Op = N->getOperand(i);
-    if (i >= Index-NumDefs && i < Index-NumDefs + X86AddrNumOperands)
+    if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
       AddrOps.push_back(Op);
     else if (i < Index-NumDefs)
       BeforeOps.push_back(Op);
@@ -3088,6 +3151,8 @@
   case X86::R12B:  case X86::R13B:  case X86::R14B:  case X86::R15B:
   case X86::XMM8:  case X86::XMM9:  case X86::XMM10: case X86::XMM11:
   case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
+  case X86::YMM8:  case X86::YMM9:  case X86::YMM10: case X86::YMM11:
+  case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
     return true;
   }
   return false;
@@ -3159,7 +3224,7 @@
     case X86II::MRM4m: case X86II::MRM5m:
     case X86II::MRM6m: case X86II::MRM7m:
     case X86II::MRMDestMem: {
-      unsigned e = (isTwoAddr ? X86AddrNumOperands+1 : X86AddrNumOperands);
+      unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
       i = isTwoAddr ? 1 : 0;
       if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e)))
         REX |= 1 << 2;
@@ -3511,7 +3576,7 @@
   case X86II::MRMDestMem: {
     ++FinalSize;
     FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
-    CurOp +=  X86AddrNumOperands + 1;
+    CurOp +=  X86::AddrNumOperands + 1;
     if (CurOp != NumOps) {
       ++CurOp;
       FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
@@ -3530,16 +3595,9 @@
     break;
 
   case X86II::MRMSrcMem: {
-    int AddrOperands;
-    if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
-        Opcode == X86::LEA16r || Opcode == X86::LEA32r)
-      AddrOperands = X86AddrNumOperands - 1; // No segment register
-    else
-      AddrOperands = X86AddrNumOperands;
-
     ++FinalSize;
     FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode);
-    CurOp += AddrOperands + 1;
+    CurOp += X86::AddrNumOperands + 1;
     if (CurOp != NumOps) {
       ++CurOp;
       FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
@@ -3593,7 +3651,7 @@
     
     ++FinalSize;
     FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
-    CurOp += X86AddrNumOperands;
+    CurOp += X86::AddrNumOperands;
 
     if (CurOp != NumOps) {
       const MachineOperand &MO = MI.getOperand(CurOp++);

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h Sat Jul 10 00:06:30 2010
@@ -24,6 +24,24 @@
   class X86TargetMachine;
 
 namespace X86 {
+  // Enums for memory operand decoding.  Each memory operand is represented with
+  // a 5 operand sequence in the form:
+  //   [BaseReg, ScaleAmt, IndexReg, Disp, Segment]
+  // These enums help decode this.
+  enum {
+    AddrBaseReg = 0,
+    AddrScaleAmt = 1,
+    AddrIndexReg = 2,
+    AddrDisp = 3,
+    
+    /// AddrSegmentReg - The operand # of the segment in the memory operand.
+    AddrSegmentReg = 4,
+
+    /// AddrNumOperands - Total number of operands in a memory reference.
+    AddrNumOperands = 5
+  };
+  
+  
   // X86 specific condition code. These correspond to X86_*_COND in
   // X86InstrInfo.td. They must be kept in synch.
   enum CondCode {
@@ -417,32 +435,27 @@
     SSEDomainShift = 22,
 
     OpcodeShift   = 24,
-    OpcodeMask    = 0xFF << OpcodeShift
+    OpcodeMask    = 0xFF << OpcodeShift,
 
-  };
-  
-  // FIXME: The enum opcode space is over and more bits are needed. Anywhere
-  // those enums below are used, TSFlags must be shifted right by 32 first.
-  enum {
     //===------------------------------------------------------------------===//
-    // VEX - A prefix used by AVX instructions
-    VEX         = 1,
+    // VEX - The opcode prefix used by AVX instructions
+    VEX         = 1ULL << 32,
 
-    // VEX_W is has a opcode specific functionality, but is used in the same
+    // VEX_W - Has a opcode specific functionality, but is used in the same
     // way as REX_W is for regular SSE instructions.
-    VEX_W       = 1 << 1,
+    VEX_W       = 1ULL << 33,
 
-    // VEX_4V is used to specify an additional AVX/SSE register. Several 2
+    // VEX_4V - Used to specify an additional AVX/SSE register. Several 2
     // address instructions in SSE are represented as 3 address ones in AVX
     // and the additional register is encoded in VEX_VVVV prefix.
-    VEX_4V      = 1 << 2,
+    VEX_4V      = 1ULL << 34,
 
-    // VEX_I8IMM specifies that the last register used in a AVX instruction,
+    // VEX_I8IMM - Specifies that the last register used in a AVX instruction,
     // must be encoded in the i8 immediate field. This usually happens in
     // instructions with 4 operands.
-    VEX_I8IMM   = 1 << 3
+    VEX_I8IMM   = 1ULL << 35
   };
-
+  
   // getBaseOpcodeFor - This function returns the "base" X86 opcode for the
   // specified machine instruction.
   //
@@ -473,22 +486,75 @@
   /// TSFlags indicates that it is pc relative.
   static inline unsigned isImmPCRel(uint64_t TSFlags) {
     switch (TSFlags & X86II::ImmMask) {
-      default: assert(0 && "Unknown immediate size");
-      case X86II::Imm8PCRel:
-      case X86II::Imm16PCRel:
-      case X86II::Imm32PCRel:
-        return true;
-      case X86II::Imm8:
-      case X86II::Imm16:
-      case X86II::Imm32:
-      case X86II::Imm64:
-        return false;
+    default: assert(0 && "Unknown immediate size");
+    case X86II::Imm8PCRel:
+    case X86II::Imm16PCRel:
+    case X86II::Imm32PCRel:
+      return true;
+    case X86II::Imm8:
+    case X86II::Imm16:
+    case X86II::Imm32:
+    case X86II::Imm64:
+      return false;
     }
-  }    
+  }
+  
+  /// getMemoryOperandNo - The function returns the MCInst operand # for the
+  /// first field of the memory operand.  If the instruction doesn't have a
+  /// memory operand, this returns -1.
+  ///
+  /// Note that this ignores tied operands.  If there is a tied register which
+  /// is duplicated in the MCInst (e.g. "EAX = addl EAX, [mem]") it is only
+  /// counted as one operand.
+  ///
+  static inline int getMemoryOperandNo(uint64_t TSFlags) {
+    switch (TSFlags & X86II::FormMask) {
+    case X86II::MRMInitReg:  assert(0 && "FIXME: Remove this form");
+    default: assert(0 && "Unknown FormMask value in getMemoryOperandNo!");
+    case X86II::Pseudo:
+    case X86II::RawFrm:
+    case X86II::AddRegFrm:
+    case X86II::MRMDestReg:
+    case X86II::MRMSrcReg:
+       return -1;
+    case X86II::MRMDestMem:
+      return 0;
+    case X86II::MRMSrcMem: {
+      bool HasVEX_4V = TSFlags & X86II::VEX_4V;
+      unsigned FirstMemOp = 1;
+      if (HasVEX_4V)
+        ++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV).
+      
+      // FIXME: Maybe lea should have its own form?  This is a horrible hack.
+      //if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
+      //    Opcode == X86::LEA16r || Opcode == X86::LEA32r)
+      return FirstMemOp;
+    }
+    case X86II::MRM0r: case X86II::MRM1r:
+    case X86II::MRM2r: case X86II::MRM3r:
+    case X86II::MRM4r: case X86II::MRM5r:
+    case X86II::MRM6r: case X86II::MRM7r:
+      return -1;
+    case X86II::MRM0m: case X86II::MRM1m:
+    case X86II::MRM2m: case X86II::MRM3m:
+    case X86II::MRM4m: case X86II::MRM5m:
+    case X86II::MRM6m: case X86II::MRM7m:
+      return 0;
+    case X86II::MRM_C1:
+    case X86II::MRM_C2:
+    case X86II::MRM_C3:
+    case X86II::MRM_C4:
+    case X86II::MRM_C8:
+    case X86II::MRM_C9:
+    case X86II::MRM_E8:
+    case X86II::MRM_F0:
+    case X86II::MRM_F8:
+    case X86II::MRM_F9:
+      return -1;
+    }
+  }
 }
 
-const int X86AddrNumOperands = 5;
-
 inline static bool isScale(const MachineOperand &MO) {
   return MO.isImm() &&
     (MO.getImm() == 1 || MO.getImm() == 2 ||
@@ -632,6 +698,10 @@
                             const TargetRegisterClass *DestRC,
                             const TargetRegisterClass *SrcRC,
                             DebugLoc DL) const;
+  virtual void copyPhysReg(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, DebugLoc DL,
+                           unsigned DestReg, unsigned SrcReg,
+                           bool KillSrc) const;
   virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator MI,
                                    unsigned SrcReg, bool isKill, int FrameIndex,

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.td?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.td Sat Jul 10 00:06:30 2010
@@ -202,13 +202,9 @@
   let Name = "Mem";
   let SuperClasses = [];
 }
-def X86NoSegMemAsmOperand : AsmOperandClass {
-  let Name = "NoSegMem";
-  let SuperClasses = [X86MemAsmOperand];
-}
 def X86AbsMemAsmOperand : AsmOperandClass {
   let Name = "AbsMem";
-  let SuperClasses = [X86NoSegMemAsmOperand];
+  let SuperClasses = [X86MemAsmOperand];
 }
 class X86MemOperand<string printMethod> : Operand<iPTR> {
   let PrintMethod = printMethod;
@@ -231,7 +227,7 @@
 def f64mem  : X86MemOperand<"printf64mem">;
 def f80mem  : X86MemOperand<"printf80mem">;
 def f128mem : X86MemOperand<"printf128mem">;
-//def f256mem : X86MemOperand<"printf256mem">;
+def f256mem : X86MemOperand<"printf256mem">;
 
 // A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
 // plain GR64, so that it doesn't potentially require a REX prefix.
@@ -250,11 +246,6 @@
   let ParserMatchClass = X86MemAsmOperand;
 }
 
-def lea32mem : Operand<i32> {
-  let PrintMethod = "printlea32mem";
-  let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
-  let ParserMatchClass = X86NoSegMemAsmOperand;
-}
 
 let ParserMatchClass = X86AbsMemAsmOperand,
     PrintMethod = "print_pcrel_imm" in {
@@ -289,26 +280,31 @@
 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
 
-// [0, 0x7FFFFFFF]                                            | [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
+// [0, 0x7FFFFFFF]                                            |
+//   [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
   let Name = "ImmSExti64i32";
 }
 
-// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
+//   [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
   let Name = "ImmSExti16i8";
   let SuperClasses = [ImmSExti64i32AsmOperand];
 }
 
-// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
+//   [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
   let Name = "ImmSExti32i8";
 }
 
-// [0, 0x0000007F]                                            | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+// [0, 0x0000007F]                                            |
+//   [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
   let Name = "ImmSExti64i8";
-  let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand, ImmSExti64i32AsmOperand];
+  let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
+                      ImmSExti64i32AsmOperand];
 }
 
 // A couple of more descriptive operand definitions.
@@ -327,10 +323,10 @@
 
 // Define X86 specific addressing mode.
 def addr      : ComplexPattern<iPTR, 5, "SelectAddr", [], []>;
-def lea32addr : ComplexPattern<i32, 4, "SelectLEAAddr",
+def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
                                [add, sub, mul, X86mul_imm, shl, or, frameindex],
                                []>;
-def tls32addr : ComplexPattern<i32, 4, "SelectTLSADDRAddr",
+def tls32addr : ComplexPattern<i32, 5, "SelectTLSADDRAddr",
                                [tglobaltlsaddr], []>;
 
 //===----------------------------------------------------------------------===//
@@ -749,18 +745,10 @@
                  "jmp\t$dst  # TAILCALL",
                  []>;
   def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops), 
-                   "jmp{l}\t{*}$dst  # TAILCALL",
-                 []>;     
+                   "", []>;  // FIXME: Remove encoding when JIT is dead.
   let mayLoad = 1 in
   def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
                    "jmp{l}\t{*}$dst  # TAILCALL", []>;
-
-  // FIXME: This is a hack so that MCInst lowering can preserve the TAILCALL
-  // marker on instructions, while still being able to relax.
-  let isCodeGenOnly = 1 in {
-    def TAILJMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
-                         "jmp\t$dst  # TAILCALL", []>;
-  }
 }
 
 //===----------------------------------------------------------------------===//
@@ -878,11 +866,11 @@
 
 let neverHasSideEffects = 1 in
 def LEA16r   : I<0x8D, MRMSrcMem,
-                 (outs GR16:$dst), (ins lea32mem:$src),
+                 (outs GR16:$dst), (ins i32mem:$src),
                  "lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
 let isReMaterializable = 1 in
 def LEA32r   : I<0x8D, MRMSrcMem,
-                 (outs GR32:$dst), (ins lea32mem:$src),
+                 (outs GR32:$dst), (ins i32mem:$src),
                  "lea{l}\t{$src|$dst}, {$dst|$src}",
                  [(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
 
@@ -3864,7 +3852,7 @@
             XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
             XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
     Uses = [ESP] in
-def TLS_addr32 : I<0, Pseudo, (outs), (ins lea32mem:$sym),
+def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
                   "leal\t$sym, %eax; "
                   "call\t___tls_get_addr at PLT",
                   [(X86tlsaddr tls32addr:$sym)]>,

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td Sat Jul 10 00:06:30 2010
@@ -106,6 +106,12 @@
 def loadv4i32    : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
 def loadv2i64    : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
 
+// FIXME: move this to a more appropriate place after all AVX is done.
+def loadv8f32    : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
+def loadv4f64    : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
+def loadv8i32    : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
+def loadv4i64    : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
+
 // Like 'store', but always requires vector alignment.
 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
                            (store node:$val, node:$ptr), [{
@@ -130,6 +136,16 @@
 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
                                (v2i64 (alignedload node:$ptr))>;
 
+// FIXME: move this to a more appropriate place after all AVX is done.
+def alignedloadv8f32 : PatFrag<(ops node:$ptr),
+                               (v8f32 (alignedload node:$ptr))>;
+def alignedloadv4f64 : PatFrag<(ops node:$ptr),
+                               (v4f64 (alignedload node:$ptr))>;
+def alignedloadv8i32 : PatFrag<(ops node:$ptr),
+                               (v8i32 (alignedload node:$ptr))>;
+def alignedloadv4i64 : PatFrag<(ops node:$ptr),
+                               (v4i64 (alignedload node:$ptr))>;
+
 // Like 'load', but uses special alignment checks suitable for use in
 // memory operands in most SSE instructions, which are required to
 // be naturally aligned on some targets but not on others.  If the subtarget
@@ -149,6 +165,10 @@
 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
 def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
 
+// FIXME: move this to a more appropriate place after all AVX is done.
+def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
+def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
+
 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
 // 16-byte boundary.
 // FIXME: 8 byte alignment for mmx reads is not required
@@ -583,6 +603,15 @@
                               "movups", SSEPackedSingle>, VEX;
 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
                               "movupd", SSEPackedDouble, 0>, OpSize, VEX;
+
+defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
+                              "movaps", SSEPackedSingle>, VEX;
+defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
+                              "movapd", SSEPackedDouble>, OpSize, VEX;
+defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
+                              "movups", SSEPackedSingle>, VEX;
+defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
+                              "movupd", SSEPackedDouble, 0>, OpSize, VEX;
 }
 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
                               "movaps", SSEPackedSingle>, TB;
@@ -606,6 +635,18 @@
 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
                    "movupd\t{$src, $dst|$dst, $src}",
                    [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
+def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+                   "movaps\t{$src, $dst|$dst, $src}",
+                   [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
+def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+                   "movapd\t{$src, $dst|$dst, $src}",
+                   [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
+def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+                   "movups\t{$src, $dst|$dst, $src}",
+                   [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
+def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+                   "movupd\t{$src, $dst|$dst, $src}",
+                   [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
 }
 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
                    "movaps\t{$src, $dst|$dst, $src}",
@@ -1502,6 +1543,19 @@
     defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
           VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                          SSEPackedDouble>, OpSize, VEX_4V;
+
+    defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
+          VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+                         SSEPackedSingle>, VEX_4V;
+    defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
+          VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+                         SSEPackedDouble>, OpSize, VEX_4V;
+    defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
+          VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+                         SSEPackedSingle>, VEX_4V;
+    defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
+          VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+                         SSEPackedDouble>, OpSize, VEX_4V;
   }
 
   let Constraints = "$src1 = $dst" in {
@@ -2063,6 +2117,28 @@
                         "movntdq\t{$src, $dst|$dst, $src}",
                         [(alignednontemporalstore (v4f32 VR128:$src),
                                                   addr:$dst)]>, VEX;
+
+    def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
+                         (ins f256mem:$dst, VR256:$src),
+                         "movntps\t{$src, $dst|$dst, $src}",
+                         [(alignednontemporalstore (v8f32 VR256:$src),
+                                                   addr:$dst)]>, VEX;
+    def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
+                         (ins f256mem:$dst, VR256:$src),
+                         "movntpd\t{$src, $dst|$dst, $src}",
+                         [(alignednontemporalstore (v4f64 VR256:$src),
+                                                   addr:$dst)]>, VEX;
+    def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
+                          (ins f256mem:$dst, VR256:$src),
+                          "movntdq\t{$src, $dst|$dst, $src}",
+                          [(alignednontemporalstore (v4f64 VR256:$src),
+                                                    addr:$dst)]>, VEX;
+    let ExeDomain = SSEPackedInt in
+    def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
+                        (ins f256mem:$dst, VR256:$src),
+                        "movntdq\t{$src, $dst|$dst, $src}",
+                        [(alignednontemporalstore (v8f32 VR256:$src),
+                                                  addr:$dst)]>, VEX;
   }
 }
 

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp Sat Jul 10 00:06:30 2010
@@ -30,7 +30,7 @@
   MCContext &Ctx;
   bool Is64BitMode;
 public:
-  X86MCCodeEmitter(TargetMachine &tm, MCContext &ctx, bool is64Bit) 
+  X86MCCodeEmitter(TargetMachine &tm, MCContext &ctx, bool is64Bit)
     : TM(tm), TII(*TM.getInstrInfo()), Ctx(ctx) {
     Is64BitMode = is64Bit;
   }
@@ -49,7 +49,7 @@
       { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
       { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }
     };
-    
+
     if (Kind < FirstTargetFixupKind)
       return MCCodeEmitter::getFixupKindInfo(Kind);
 
@@ -57,7 +57,7 @@
            "Invalid kind!");
     return Infos[Kind - FirstTargetFixupKind];
   }
-  
+
   static unsigned GetX86RegNum(const MCOperand &MO) {
     return X86RegisterInfo::getX86RegNum(MO.getReg());
   }
@@ -75,19 +75,20 @@
                                               unsigned OpNum) {
     unsigned SrcReg = MI.getOperand(OpNum).getReg();
     unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
-    if (SrcReg >= X86::XMM8 && SrcReg <= X86::XMM15)
+    if ((SrcReg >= X86::XMM8 && SrcReg <= X86::XMM15) ||
+        (SrcReg >= X86::YMM8 && SrcReg <= X86::YMM15))
       SrcRegNum += 8;
-  
+
     // The registers represented through VEX_VVVV should
     // be encoded in 1's complement form.
     return (~SrcRegNum) & 0xf;
   }
-  
+
   void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
     OS << (char)C;
     ++CurByte;
   }
-  
+
   void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
                     raw_ostream &OS) const {
     // Output the constant in little endian byte order.
@@ -97,46 +98,47 @@
     }
   }
 
-  void EmitImmediate(const MCOperand &Disp, 
+  void EmitImmediate(const MCOperand &Disp,
                      unsigned ImmSize, MCFixupKind FixupKind,
                      unsigned &CurByte, raw_ostream &OS,
                      SmallVectorImpl<MCFixup> &Fixups,
                      int ImmOffset = 0) const;
-  
+
   inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
                                         unsigned RM) {
     assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
     return RM | (RegOpcode << 3) | (Mod << 6);
   }
-  
+
   void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
                         unsigned &CurByte, raw_ostream &OS) const {
     EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
   }
-  
+
   void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
                    unsigned &CurByte, raw_ostream &OS) const {
     // SIB byte is in the same format as the ModRMByte.
     EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
   }
-  
-  
-  void EmitSegmentOverridePrefix(const MCOperand &Op, unsigned TSFlags,
-                                 unsigned &CurByte, raw_ostream &OS) const;
+
 
   void EmitMemModRMByte(const MCInst &MI, unsigned Op,
-                        unsigned RegOpcodeField, 
+                        unsigned RegOpcodeField,
                         uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
                         SmallVectorImpl<MCFixup> &Fixups) const;
-  
+
   void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
                          SmallVectorImpl<MCFixup> &Fixups) const;
-  
-  void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+
+  void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
                            const MCInst &MI, const TargetInstrDesc &Desc,
                            raw_ostream &OS) const;
 
-  void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+  void EmitSegmentOverridePrefix(uint64_t TSFlags, unsigned &CurByte,
+                                 int MemOperand, const MCInst &MI,
+                                 raw_ostream &OS) const;
+
+  void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
                         const MCInst &MI, const TargetInstrDesc &Desc,
                         raw_ostream &OS) const;
 };
@@ -156,8 +158,8 @@
   return new X86MCCodeEmitter(TM, Ctx, true);
 }
 
-/// isDisp8 - Return true if this signed displacement fits in a 8-bit 
-/// sign-extended field. 
+/// isDisp8 - Return true if this signed displacement fits in a 8-bit
+/// sign-extended field.
 static bool isDisp8(int Value) {
   return Value == (signed char)Value;
 }
@@ -167,7 +169,7 @@
 static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
   unsigned Size = X86II::getSizeOfImm(TSFlags);
   bool isPCRel = X86II::isImmPCRel(TSFlags);
-  
+
   switch (Size) {
   default: assert(0 && "Unknown immediate size");
   case 1: return isPCRel ? MCFixupKind(X86::reloc_pcrel_1byte) : FK_Data_1;
@@ -193,7 +195,7 @@
 
   // If we have an immoffset, add it to the expression.
   const MCExpr *Expr = DispOp.getExpr();
-  
+
   // If the fixup is pc-relative, we need to bias the value to be relative to
   // the start of the field, not the end of the field.
   if (FixupKind == MCFixupKind(X86::reloc_pcrel_4byte) ||
@@ -204,37 +206,16 @@
     ImmOffset -= 2;
   if (FixupKind == MCFixupKind(X86::reloc_pcrel_1byte))
     ImmOffset -= 1;
-  
+
   if (ImmOffset)
     Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx),
                                    Ctx);
-  
+
   // Emit a symbolic constant as a fixup and 4 zeros.
   Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind));
   EmitConstant(0, Size, CurByte, OS);
 }
 
-void X86MCCodeEmitter::EmitSegmentOverridePrefix(const MCOperand &Op,
-                                                 unsigned TSFlags,
-                                                 unsigned &CurByte,
-                                                 raw_ostream &OS) const {
-  // If no segment register is present, we don't need anything.
-  if (Op.getReg() == 0)
-    return;
-
-  // Check if we need an override.
-  switch (Op.getReg()) {
-  case X86::CS: EmitByte(0x2E, CurByte, OS); return;
-  case X86::SS: EmitByte(0x36, CurByte, OS); return;
-  case X86::DS: EmitByte(0x3E, CurByte, OS); return;
-  case X86::ES: EmitByte(0x26, CurByte, OS); return;
-  case X86::FS: EmitByte(0x64, CurByte, OS); return;
-  case X86::GS: EmitByte(0x65, CurByte, OS); return;
-  }
-
-  assert(0 && "Invalid segment register!");
-}
-
 void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
                                         unsigned RegOpcodeField,
                                         uint64_t TSFlags, unsigned &CurByte,
@@ -245,43 +226,43 @@
   const MCOperand &Scale    = MI.getOperand(Op+1);
   const MCOperand &IndexReg = MI.getOperand(Op+2);
   unsigned BaseReg = Base.getReg();
-  
+
   // Handle %rip relative addressing.
   if (BaseReg == X86::RIP) {    // [disp32+RIP] in X86-64 mode
     assert(Is64BitMode && "Rip-relative addressing requires 64-bit mode");
     assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
     EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
-    
+
     unsigned FixupKind = X86::reloc_riprel_4byte;
-    
+
     // movq loads are handled with a special relocation form which allows the
     // linker to eliminate some loads for GOT references which end up in the
     // same linkage unit.
     if (MI.getOpcode() == X86::MOV64rm ||
         MI.getOpcode() == X86::MOV64rm_TC)
       FixupKind = X86::reloc_riprel_4byte_movq_load;
-    
+
     // rip-relative addressing is actually relative to the *next* instruction.
     // Since an immediate can follow the mod/rm byte for an instruction, this
     // means that we need to bias the immediate field of the instruction with
     // the size of the immediate field.  If we have this case, add it into the
     // expression to emit.
     int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
-    
+
     EmitImmediate(Disp, 4, MCFixupKind(FixupKind),
                   CurByte, OS, Fixups, -ImmSize);
     return;
   }
-  
+
   unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
-  
+
   // Determine whether a SIB byte is needed.
-  // If no BaseReg, issue a RIP relative instruction only if the MCE can 
+  // If no BaseReg, issue a RIP relative instruction only if the MCE can
   // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
   // 2-7) and absolute references.
 
   if (// The SIB byte must be used if there is an index register.
-      IndexReg.getReg() == 0 && 
+      IndexReg.getReg() == 0 &&
       // The SIB byte must be used if the base is ESP/RSP/R12, all of which
       // encode to an R/M value of 4, which indicates that a SIB byte is
       // present.
@@ -295,7 +276,7 @@
       EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
       return;
     }
-    
+
     // If the base is not EBP/ESP and there is no displacement, use simple
     // indirect register encoding, this handles addresses like [EAX].  The
     // encoding for [EBP] with no displacement means [disp32] so we handle it
@@ -304,24 +285,24 @@
       EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
       return;
     }
-    
+
     // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
     if (Disp.isImm() && isDisp8(Disp.getImm())) {
       EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
       EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
       return;
     }
-    
+
     // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
     EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
     EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
     return;
   }
-    
+
   // We need a SIB byte, so start by outputting the ModR/M byte first
   assert(IndexReg.getReg() != X86::ESP &&
          IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
-  
+
   bool ForceDisp32 = false;
   bool ForceDisp8  = false;
   if (BaseReg == 0) {
@@ -347,13 +328,13 @@
     // Emit the normal disp32 encoding.
     EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
   }
-  
+
   // Calculate what the SS field value should be...
   static const unsigned SSTable[] = { ~0, 0, 1, ~0, 2, ~0, ~0, ~0, 3 };
   unsigned SS = SSTable[Scale.getImm()];
-  
+
   if (BaseReg == 0) {
-    // Handle the SIB byte for the case where there is no base, see Intel 
+    // Handle the SIB byte for the case where there is no base, see Intel
     // Manual 2A, table 2-7. The displacement has already been output.
     unsigned IndexRegNo;
     if (IndexReg.getReg())
@@ -369,7 +350,7 @@
       IndexRegNo = 4;   // For example [ESP+1*<noreg>+4]
     EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
   }
-  
+
   // Do we need to output a displacement?
   if (ForceDisp8)
     EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
@@ -380,15 +361,11 @@
 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
 /// called VEX.
 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
-                            const MCInst &MI, const TargetInstrDesc &Desc,
-                            raw_ostream &OS) const {
-
-  // Pseudo instructions never have a VEX prefix.
-  if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
-    return;
-
+                                           int MemOperand, const MCInst &MI,
+                                           const TargetInstrDesc &Desc,
+                                           raw_ostream &OS) const {
   bool HasVEX_4V = false;
-  if ((TSFlags >> 32) & X86II::VEX_4V)
+  if (TSFlags & X86II::VEX_4V)
     HasVEX_4V = true;
 
   // VEX_R: opcode externsion equivalent to REX.R in
@@ -452,7 +429,7 @@
   if (TSFlags & X86II::OpSize)
     VEX_PP = 0x01;
 
-  if ((TSFlags >> 32) & X86II::VEX_W)
+  if (TSFlags & X86II::VEX_W)
     VEX_W = 1;
 
   switch (TSFlags & X86II::Op0Mask) {
@@ -478,6 +455,15 @@
     break;  // No prefix!
   }
 
+  // Set the vector length to 256-bit if YMM0-YMM15 is used
+  for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
+    if (!MI.getOperand(i).isReg())
+      continue;
+    unsigned SrcReg = MI.getOperand(i).getReg();
+    if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15)
+      VEX_L = 1;
+  }
+
   unsigned NumOps = MI.getNumOperands();
   unsigned CurOp = 0;
 
@@ -488,7 +474,7 @@
   case X86II::MRM4m: case X86II::MRM5m:
   case X86II::MRM6m: case X86II::MRM7m:
   case X86II::MRMDestMem:
-    NumOps = CurOp = X86AddrNumOperands;
+    NumOps = CurOp = X86::AddrNumOperands;
   case X86II::MRMSrcMem:
   case X86II::MRMSrcReg:
     if (MI.getNumOperands() > CurOp && MI.getOperand(CurOp).isReg() &&
@@ -506,7 +492,7 @@
 
     // If the last register should be encoded in the immediate field
     // do not use any bit from VEX prefix to this register, ignore it
-    if ((TSFlags >> 32) & X86II::VEX_I8IMM)
+    if (TSFlags & X86II::VEX_I8IMM)
       NumOps--;
 
     for (; CurOp != NumOps; ++CurOp) {
@@ -538,6 +524,9 @@
     assert(0 && "Not implemented!");
   }
 
+  // Emit segment override opcode prefix as needed.
+  EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
+
   // VEX opcode prefix can have 2 or 3 bytes
   //
   //  3 bytes:
@@ -568,21 +557,17 @@
 /// size, and 3) use of X86-64 extended registers.
 static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
                                    const TargetInstrDesc &Desc) {
-  // Pseudo instructions never have a rex byte.
-  if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
-    return 0;
-  
   unsigned REX = 0;
   if (TSFlags & X86II::REX_W)
     REX |= 1 << 3; // set REX.W
-  
+
   if (MI.getNumOperands() == 0) return REX;
-  
+
   unsigned NumOps = MI.getNumOperands();
   // FIXME: MCInst should explicitize the two-addrness.
   bool isTwoAddr = NumOps > 1 &&
                       Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
-  
+
   // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
   unsigned i = isTwoAddr ? 1 : 0;
   for (; i != NumOps; ++i) {
@@ -595,7 +580,7 @@
     REX |= 0x40; // REX fixed encoding prefix
     break;
   }
-  
+
   switch (TSFlags & X86II::FormMask) {
   case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
   case X86II::MRMSrcReg:
@@ -630,7 +615,7 @@
   case X86II::MRM4m: case X86II::MRM5m:
   case X86II::MRM6m: case X86II::MRM7m:
   case X86II::MRMDestMem: {
-    unsigned e = (isTwoAddr ? X86AddrNumOperands+1 : X86AddrNumOperands);
+    unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
     i = isTwoAddr ? 1 : 0;
     if (NumOps > e && MI.getOperand(e).isReg() &&
         X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
@@ -661,20 +646,28 @@
   return REX;
 }
 
-/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
-void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
-                                        const MCInst &MI, 
-                                        const TargetInstrDesc &Desc,
+/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
+void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
+                                        unsigned &CurByte, int MemOperand,
+                                        const MCInst &MI,
                                         raw_ostream &OS) const {
-
-  // Emit the lock opcode prefix as needed.
-  if (TSFlags & X86II::LOCK)
-    EmitByte(0xF0, CurByte, OS);
-  
-  // Emit segment override opcode prefix as needed.
   switch (TSFlags & X86II::SegOvrMask) {
   default: assert(0 && "Invalid segment!");
-  case 0: break;  // No segment override!
+  case 0:
+    // No segment override, check for explicit one on memory operand.
+    if (MemOperand != -1) {   // If the instruction has a memory operand.
+      switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
+      default: assert(0 && "Unknown segment register!");
+      case 0: break;
+      case X86::CS: EmitByte(0x2E, CurByte, OS); break;
+      case X86::SS: EmitByte(0x36, CurByte, OS); break;
+      case X86::DS: EmitByte(0x3E, CurByte, OS); break;
+      case X86::ES: EmitByte(0x26, CurByte, OS); break;
+      case X86::FS: EmitByte(0x64, CurByte, OS); break;
+      case X86::GS: EmitByte(0x65, CurByte, OS); break;
+      }
+    }
+    break;
   case X86II::FS:
     EmitByte(0x64, CurByte, OS);
     break;
@@ -682,19 +675,36 @@
     EmitByte(0x65, CurByte, OS);
     break;
   }
-  
+}
+
+/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
+///
+/// MemOperand is the operand # of the start of a memory operand if present.  If
+/// Not present, it is -1.
+void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+                                        int MemOperand, const MCInst &MI,
+                                        const TargetInstrDesc &Desc,
+                                        raw_ostream &OS) const {
+
+  // Emit the lock opcode prefix as needed.
+  if (TSFlags & X86II::LOCK)
+    EmitByte(0xF0, CurByte, OS);
+
+  // Emit segment override opcode prefix as needed.
+  EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
+
   // Emit the repeat opcode prefix as needed.
   if ((TSFlags & X86II::Op0Mask) == X86II::REP)
     EmitByte(0xF3, CurByte, OS);
-  
+
   // Emit the operand size opcode prefix as needed.
   if (TSFlags & X86II::OpSize)
     EmitByte(0x66, CurByte, OS);
-  
+
   // Emit the address size opcode prefix as needed.
   if (TSFlags & X86II::AdSize)
     EmitByte(0x67, CurByte, OS);
-  
+
   bool Need0FPrefix = false;
   switch (TSFlags & X86II::Op0Mask) {
   default: assert(0 && "Invalid prefix!");
@@ -726,18 +736,18 @@
   case X86II::DE: EmitByte(0xDE, CurByte, OS); break;
   case X86II::DF: EmitByte(0xDF, CurByte, OS); break;
   }
-  
+
   // Handle REX prefix.
   // FIXME: Can this come before F2 etc to simplify emission?
   if (Is64BitMode) {
     if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
       EmitByte(0x40 | REX, CurByte, OS);
   }
-  
+
   // 0x0F escape code must be emitted just before the opcode.
   if (Need0FPrefix)
     EmitByte(0x0F, CurByte, OS);
-  
+
   // FIXME: Pull this up into previous switch if REX can be moved earlier.
   switch (TSFlags & X86II::Op0Mask) {
   case X86II::TF:    // F2 0F 38
@@ -757,37 +767,43 @@
   const TargetInstrDesc &Desc = TII.get(Opcode);
   uint64_t TSFlags = Desc.TSFlags;
 
+  // Pseudo instructions don't get encoded.
+  if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
+    return;
+
+  // If this is a two-address instruction, skip one of the register operands.
+  // FIXME: This should be handled during MCInst lowering.
+  unsigned NumOps = Desc.getNumOperands();
+  unsigned CurOp = 0;
+  if (NumOps > 1 && Desc.getOperandConstraint(1, TOI::TIED_TO) != -1)
+    ++CurOp;
+  else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
+    // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
+    --NumOps;
+
   // Keep track of the current byte being emitted.
   unsigned CurByte = 0;
-  
+
   // Is this instruction encoded using the AVX VEX prefix?
   bool HasVEXPrefix = false;
 
   // It uses the VEX.VVVV field?
   bool HasVEX_4V = false;
 
-  if ((TSFlags >> 32) & X86II::VEX)
+  if (TSFlags & X86II::VEX)
     HasVEXPrefix = true;
-  if ((TSFlags >> 32) & X86II::VEX_4V)
+  if (TSFlags & X86II::VEX_4V)
     HasVEX_4V = true;
 
-  // FIXME: We should emit the prefixes in exactly the same order as GAS does,
-  // in order to provide diffability.
+  // Determine where the memory operand starts, if present.
+  int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
+  if (MemoryOperand != -1) MemoryOperand += CurOp;
 
   if (!HasVEXPrefix)
-    EmitOpcodePrefix(TSFlags, CurByte, MI, Desc, OS);
+    EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
   else
-    EmitVEXOpcodePrefix(TSFlags, CurByte, MI, Desc, OS);
-  
-  // If this is a two-address instruction, skip one of the register operands.
-  unsigned NumOps = Desc.getNumOperands();
-  unsigned CurOp = 0;
-  if (NumOps > 1 && Desc.getOperandConstraint(1, TOI::TIED_TO) != -1)
-    ++CurOp;
-  else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
-    // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
-    --NumOps;
-  
+    EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
+
   unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
   unsigned SrcRegNum = 0;
   switch (TSFlags & X86II::FormMask) {
@@ -795,31 +811,31 @@
     assert(0 && "FIXME: Remove this form when the JIT moves to MCCodeEmitter!");
   default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
     assert(0 && "Unknown FormMask value in X86MCCodeEmitter!");
-  case X86II::Pseudo: return; // Pseudo instructions encode to nothing.
+  case X86II::Pseudo:
+    assert(0 && "Pseudo instruction shouldn't be emitted");
   case X86II::RawFrm:
     EmitByte(BaseOpcode, CurByte, OS);
     break;
-      
+
   case X86II::AddRegFrm:
     EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
     break;
-      
+
   case X86II::MRMDestReg:
     EmitByte(BaseOpcode, CurByte, OS);
     EmitRegModRMByte(MI.getOperand(CurOp),
                      GetX86RegNum(MI.getOperand(CurOp+1)), CurByte, OS);
     CurOp += 2;
     break;
-  
+
   case X86II::MRMDestMem:
-    EmitSegmentOverridePrefix(MI.getOperand(CurOp + 4), TSFlags, CurByte, OS);
     EmitByte(BaseOpcode, CurByte, OS);
     EmitMemModRMByte(MI, CurOp,
-                     GetX86RegNum(MI.getOperand(CurOp + X86AddrNumOperands)),
+                     GetX86RegNum(MI.getOperand(CurOp + X86::AddrNumOperands)),
                      TSFlags, CurByte, OS, Fixups);
-    CurOp += X86AddrNumOperands + 1;
+    CurOp += X86::AddrNumOperands + 1;
     break;
-      
+
   case X86II::MRMSrcReg:
     EmitByte(BaseOpcode, CurByte, OS);
     SrcRegNum = CurOp + 1;
@@ -831,26 +847,17 @@
                      GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
     CurOp = SrcRegNum + 1;
     break;
-    
+
   case X86II::MRMSrcMem: {
-    int AddrOperands = X86AddrNumOperands;
+    int AddrOperands = X86::AddrNumOperands;
     unsigned FirstMemOp = CurOp+1;
     if (HasVEX_4V) {
       ++AddrOperands;
       ++FirstMemOp;  // Skip the register source (which is encoded in VEX_VVVV).
     }
 
-    // FIXME: Maybe lea should have its own form?  This is a horrible hack.
-    if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
-        Opcode == X86::LEA16r || Opcode == X86::LEA32r)
-      --AddrOperands; // No segment register
-    else
-      EmitSegmentOverridePrefix(MI.getOperand(FirstMemOp+4),
-                                TSFlags, CurByte, OS);
-
     EmitByte(BaseOpcode, CurByte, OS);
 
-    
     EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
                      TSFlags, CurByte, OS, Fixups);
     CurOp += AddrOperands + 1;
@@ -872,11 +879,10 @@
   case X86II::MRM2m: case X86II::MRM3m:
   case X86II::MRM4m: case X86II::MRM5m:
   case X86II::MRM6m: case X86II::MRM7m:
-    EmitSegmentOverridePrefix(MI.getOperand(CurOp+4), TSFlags, CurByte, OS);
     EmitByte(BaseOpcode, CurByte, OS);
     EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
                      TSFlags, CurByte, OS, Fixups);
-    CurOp += X86AddrNumOperands;
+    CurOp += X86::AddrNumOperands;
     break;
   case X86II::MRM_C1:
     EmitByte(BaseOpcode, CurByte, OS);
@@ -919,13 +925,13 @@
     EmitByte(0xF9, CurByte, OS);
     break;
   }
-  
+
   // If there is a remaining operand, it must be a trailing immediate.  Emit it
   // according to the right size for the instruction.
   if (CurOp != NumOps) {
     // The last source register of a 4 operand instruction in AVX is encoded
     // in bits[7:4] of a immediate byte, and bits[3:0] are ignored.
-    if ((TSFlags >> 32) & X86II::VEX_I8IMM) {
+    if (TSFlags & X86II::VEX_I8IMM) {
       const MCOperand &MO = MI.getOperand(CurOp++);
       bool IsExtReg =
         X86InstrInfo::isX86_64ExtendedReg(MO.getReg());

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp Sat Jul 10 00:06:30 2010
@@ -127,21 +127,29 @@
   case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
     return RegNo-X86::ST0;
 
-  case X86::XMM0: case X86::XMM8: case X86::MM0:
+  case X86::XMM0: case X86::XMM8:
+  case X86::YMM0: case X86::YMM8: case X86::MM0:
     return 0;
-  case X86::XMM1: case X86::XMM9: case X86::MM1:
+  case X86::XMM1: case X86::XMM9:
+  case X86::YMM1: case X86::YMM9: case X86::MM1:
     return 1;
-  case X86::XMM2: case X86::XMM10: case X86::MM2:
+  case X86::XMM2: case X86::XMM10:
+  case X86::YMM2: case X86::YMM10: case X86::MM2:
     return 2;
-  case X86::XMM3: case X86::XMM11: case X86::MM3:
+  case X86::XMM3: case X86::XMM11:
+  case X86::YMM3: case X86::YMM11: case X86::MM3:
     return 3;
-  case X86::XMM4: case X86::XMM12: case X86::MM4:
+  case X86::XMM4: case X86::XMM12:
+  case X86::YMM4: case X86::YMM12: case X86::MM4:
     return 4;
-  case X86::XMM5: case X86::XMM13: case X86::MM5:
+  case X86::XMM5: case X86::XMM13:
+  case X86::YMM5: case X86::YMM13: case X86::MM5:
     return 5;
-  case X86::XMM6: case X86::XMM14: case X86::MM6:
+  case X86::XMM6: case X86::XMM14:
+  case X86::YMM6: case X86::YMM14: case X86::MM6:
     return 6;
-  case X86::XMM7: case X86::XMM15: case X86::MM7:
+  case X86::XMM7: case X86::XMM15:
+  case X86::YMM7: case X86::YMM15: case X86::MM7:
     return 7;
 
   case X86::ES:
@@ -1217,8 +1225,8 @@
     if (CSSize) {
       unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
       MachineInstr *MI =
-        addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
-                        FramePtr, false, -CSSize);
+        addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
+                     FramePtr, false, -CSSize);
       MBB.insert(MBBI, MI);
     } else {
       BuildMI(MBB, MBBI, DL,

Modified: llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td Sat Jul 10 00:06:30 2010
@@ -147,7 +147,7 @@
   def MM5 : Register<"mm5">, DwarfRegNum<[46, 34, 34]>;
   def MM6 : Register<"mm6">, DwarfRegNum<[47, 35, 35]>;
   def MM7 : Register<"mm7">, DwarfRegNum<[48, 36, 36]>;
-  
+
   // Pseudo Floating Point registers
   def FP0 : Register<"fp0">;
   def FP1 : Register<"fp1">;
@@ -155,7 +155,7 @@
   def FP3 : Register<"fp3">;
   def FP4 : Register<"fp4">;
   def FP5 : Register<"fp5">;
-  def FP6 : Register<"fp6">; 
+  def FP6 : Register<"fp6">;
 
   // XMM Registers, used by the various SSE instruction set extensions.
   // The sub_ss and sub_sd subregs are the same registers with another regclass.
@@ -357,7 +357,7 @@
   }];
 }
 
-def GR32 : RegisterClass<"X86", [i32], 32, 
+def GR32 : RegisterClass<"X86", [i32], 32,
                          [EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP,
                           R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> {
   let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
@@ -412,7 +412,7 @@
 // GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since
 // RIP isn't really a register and it can't be used anywhere except in an
 // address, but it doesn't cause trouble.
-def GR64 : RegisterClass<"X86", [i64], 64, 
+def GR64 : RegisterClass<"X86", [i64], 64,
                          [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
                           RBX, R14, R15, R12, R13, RBP, RSP, RIP]> {
   let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi),
@@ -446,7 +446,7 @@
 }
 
 // Debug registers.
-def DEBUG_REG : RegisterClass<"X86", [i32], 32, 
+def DEBUG_REG : RegisterClass<"X86", [i32], 32,
                               [DR0, DR1, DR2, DR3, DR4, DR5, DR6, DR7]> {
 }
 
@@ -787,7 +787,7 @@
                            XMM8, XMM9, XMM10, XMM11,
                            XMM12, XMM13, XMM14, XMM15]> {
   let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd)];
-  
+
   let MethodProtos = [{
     iterator allocation_order_end(const MachineFunction &MF) const;
   }];
@@ -803,11 +803,27 @@
     }
   }];
 }
-def VR256 : RegisterClass<"X86", [ v8i32, v4i64, v8f32, v4f64],256,
+
+def VR256 : RegisterClass<"X86", [v8i32, v4i64, v8f32, v4f64], 256,
                           [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
                            YMM8, YMM9, YMM10, YMM11,
                            YMM12, YMM13, YMM14, YMM15]> {
   let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd), (VR128 sub_xmm)];
+
+  let MethodProtos = [{
+    iterator allocation_order_end(const MachineFunction &MF) const;
+  }];
+  let MethodBodies = [{
+    VR256Class::iterator
+    VR256Class::allocation_order_end(const MachineFunction &MF) const {
+      const TargetMachine &TM = MF.getTarget();
+      const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+      if (!Subtarget.is64Bit())
+        return end()-8; // Only YMM0 to YMM7 are available in 32-bit mode.
+      else
+        return end();
+    }
+  }];
 }
 
 // Status flags registers.

Modified: llvm/branches/wendling/eh/lib/Transforms/IPO/GlobalOpt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/IPO/GlobalOpt.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/IPO/GlobalOpt.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/IPO/GlobalOpt.cpp Sat Jul 10 00:06:30 2010
@@ -160,13 +160,12 @@
 static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
                           SmallPtrSet<const PHINode*, 16> &PHIUsers) {
   for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
-       ++UI)
-    if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
+       ++UI) {
+    const User *U = *UI;
+    if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
       GS.HasNonInstructionUser = true;
-
       if (AnalyzeGlobal(CE, GS, PHIUsers)) return true;
-
-    } else if (const Instruction *I = dyn_cast<Instruction>(*UI)) {
+    } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
       if (!GS.HasMultipleAccessingFunctions) {
         const Function *F = I->getParent()->getParent();
         if (GS.AccessingFunction == 0)
@@ -235,7 +234,7 @@
       } else {
         return true;  // Any other non-load instruction might take address!
       }
-    } else if (const Constant *C = dyn_cast<Constant>(*UI)) {
+    } else if (const Constant *C = dyn_cast<Constant>(U)) {
       GS.HasNonInstructionUser = true;
       // We might have a dead and dangling constant hanging off of here.
       if (!SafeToDestroyConstant(C))
@@ -245,6 +244,7 @@
       // Otherwise must be some other user.
       return true;
     }
+  }
 
   return false;
 }

Modified: llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp Sat Jul 10 00:06:30 2010
@@ -365,10 +365,11 @@
     return 0;
   for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
        UI != E; ++UI) {
-    if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
+    User *U = *UI;
+    if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(U))
       return DI;
-    if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
-      if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
+    if (isa<BitCastInst>(U) && U->hasOneUse()) {
+      if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(U->use_begin()))
         return DI;
       }
   }

Modified: llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineSelect.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineSelect.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineSelect.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineSelect.cpp Sat Jul 10 00:06:30 2010
@@ -329,6 +329,37 @@
       }
     }
 
+  // Transform (X >s -1) ? C1 : C2 --> ((X >>s 31) & (C2 - C1)) + C1
+  // and       (X <s  0) ? C2 : C1 --> ((X >>s 31) & (C2 - C1)) + C1
+  // FIXME: Type and constness constraints could be lifted, but we have to
+  //        watch code size carefully. We should consider xor instead of
+  //        sub/add when we decide to do that.
+  if (const IntegerType *Ty = dyn_cast<IntegerType>(CmpLHS->getType())) {
+    if (TrueVal->getType() == Ty) {
+      if (ConstantInt *Cmp = dyn_cast<ConstantInt>(CmpRHS)) {
+        ConstantInt *C1 = NULL, *C2 = NULL;
+        if (Pred == ICmpInst::ICMP_SGT && Cmp->isAllOnesValue()) {
+          C1 = dyn_cast<ConstantInt>(TrueVal);
+          C2 = dyn_cast<ConstantInt>(FalseVal);
+        } else if (Pred == ICmpInst::ICMP_SLT && Cmp->isNullValue()) {
+          C1 = dyn_cast<ConstantInt>(FalseVal);
+          C2 = dyn_cast<ConstantInt>(TrueVal);
+        }
+        if (C1 && C2) {
+          // This shift results in either -1 or 0.
+          Value *AShr = Builder->CreateAShr(CmpLHS, Ty->getBitWidth()-1);
+
+          // Check if we can express the operation with a single or.
+          if (C2->isAllOnesValue())
+            return ReplaceInstUsesWith(SI, Builder->CreateOr(AShr, C1));
+
+          Value *And = Builder->CreateAnd(AShr, C2->getValue()-C1->getValue());
+          return ReplaceInstUsesWith(SI, Builder->CreateAdd(And, C1));
+        }
+      }
+    }
+  }
+
   if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
     // Transform (X == Y) ? X : Y  -> Y
     if (Pred == ICmpInst::ICMP_EQ)

Modified: llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstructionCombining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstructionCombining.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstructionCombining.cpp Sat Jul 10 00:06:30 2010
@@ -715,9 +715,10 @@
 static bool IsOnlyNullComparedAndFreed(const Value &V) {
   for (Value::const_use_iterator UI = V.use_begin(), UE = V.use_end();
        UI != UE; ++UI) {
-    if (isFreeCall(*UI))
+    const User *U = *UI;
+    if (isFreeCall(U))
       continue;
-    if (const ICmpInst *ICI = dyn_cast<ICmpInst>(*UI))
+    if (const ICmpInst *ICI = dyn_cast<ICmpInst>(U))
       if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1)))
         continue;
     return false;

Modified: llvm/branches/wendling/eh/lib/Transforms/Scalar/GVN.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Scalar/GVN.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Scalar/GVN.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Scalar/GVN.cpp Sat Jul 10 00:06:30 2010
@@ -2128,26 +2128,27 @@
 
       for (pred_iterator PI = pred_begin(CurrentBlock),
            PE = pred_end(CurrentBlock); PI != PE; ++PI) {
+        BasicBlock *P = *PI;
         // We're not interested in PRE where the block is its
         // own predecessor, or in blocks with predecessors
         // that are not reachable.
-        if (*PI == CurrentBlock) {
+        if (P == CurrentBlock) {
           NumWithout = 2;
           break;
-        } else if (!localAvail.count(*PI))  {
+        } else if (!localAvail.count(P))  {
           NumWithout = 2;
           break;
         }
 
         DenseMap<uint32_t, Value*>::iterator predV =
-                                            localAvail[*PI]->table.find(ValNo);
-        if (predV == localAvail[*PI]->table.end()) {
-          PREPred = *PI;
+                                            localAvail[P]->table.find(ValNo);
+        if (predV == localAvail[P]->table.end()) {
+          PREPred = P;
           ++NumWithout;
         } else if (predV->second == CurInst) {
           NumWithout = 2;
         } else {
-          predMap[*PI] = predV->second;
+          predMap[P] = predV->second;
           ++NumWith;
         }
       }
@@ -2213,8 +2214,10 @@
                                      CurInst->getName() + ".pre-phi",
                                      CurrentBlock->begin());
       for (pred_iterator PI = pred_begin(CurrentBlock),
-           PE = pred_end(CurrentBlock); PI != PE; ++PI)
-        Phi->addIncoming(predMap[*PI], *PI);
+           PE = pred_end(CurrentBlock); PI != PE; ++PI) {
+        BasicBlock *P = *PI;
+        Phi->addIncoming(predMap[P], P);
+      }
 
       VN.add(Phi, ValNo);
       localAvail[CurrentBlock]->table[ValNo] = Phi;

Modified: llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp Sat Jul 10 00:06:30 2010
@@ -771,8 +771,9 @@
     bool UsedInLoop = false;
     for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
          UI != UE; ++UI) {
-      BasicBlock *UseBB = cast<Instruction>(UI)->getParent();
-      if (PHINode *P = dyn_cast<PHINode>(UI)) {
+      User *U = *UI;
+      BasicBlock *UseBB = cast<Instruction>(U)->getParent();
+      if (PHINode *P = dyn_cast<PHINode>(U)) {
         unsigned i =
           PHINode::getIncomingValueNumForOperand(UI.getOperandNo());
         UseBB = P->getIncomingBlock(i);

Modified: llvm/branches/wendling/eh/lib/Transforms/Utils/BreakCriticalEdges.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Utils/BreakCriticalEdges.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Utils/BreakCriticalEdges.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Utils/BreakCriticalEdges.cpp Sat Jul 10 00:06:30 2010
@@ -106,11 +106,12 @@
   // If AllowIdenticalEdges is true, then we allow this edge to be considered
   // non-critical iff all preds come from TI's block.
   while (I != E) {
-    if (*I != FirstPred)
+    const BasicBlock *P = *I;
+    if (P != FirstPred)
       return true;
     // Note: leave this as is until no one ever compiles with either gcc 4.0.1
     // or Xcode 2. This seems to work around the pred_iterator assert in PR 2207
-    E = pred_end(*I);
+    E = pred_end(P);
     ++I;
   }
   return false;
@@ -277,11 +278,13 @@
         OtherPreds.push_back(PN->getIncomingBlock(i));
   } else {
     for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB);
-         I != E; ++I)
-      if (*I != NewBB)
-        OtherPreds.push_back(*I);
+         I != E; ++I) {
+      BasicBlock *P = *I;
+      if (P != NewBB)
+          OtherPreds.push_back(P);
+    }
   }
-  
+
   bool NewBBDominatesDestBB = true;
   
   // Should we update DominatorTree information?
@@ -400,11 +403,13 @@
           bool HasPredOutsideOfLoop = false;
           BasicBlock *Exit = ExitBlocks[i];
           for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit);
-               I != E; ++I)
-            if (TIL->contains(*I))
-              Preds.push_back(*I);
+               I != E; ++I) {
+            BasicBlock *P = *I;
+            if (TIL->contains(P))
+              Preds.push_back(P);
             else
               HasPredOutsideOfLoop = true;
+          }
           // If there are any preds not in the loop, we'll need to split
           // the edges. The Preds.empty() check is needed because a block
           // may appear multiple times in the list. We can't use

Modified: llvm/branches/wendling/eh/lib/Transforms/Utils/LCSSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Utils/LCSSA.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Utils/LCSSA.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Utils/LCSSA.cpp Sat Jul 10 00:06:30 2010
@@ -190,14 +190,15 @@
   
   for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
        UI != E; ++UI) {
-    BasicBlock *UserBB = cast<Instruction>(*UI)->getParent();
-    if (PHINode *PN = dyn_cast<PHINode>(*UI))
+    User *U = *UI;
+    BasicBlock *UserBB = cast<Instruction>(U)->getParent();
+    if (PHINode *PN = dyn_cast<PHINode>(U))
       UserBB = PN->getIncomingBlock(UI);
     
     if (InstBB != UserBB && !inLoop(UserBB))
       UsesToRewrite.push_back(&UI.getUse());
   }
-  
+
   // If there are no uses outside the loop, exit with no change.
   if (UsesToRewrite.empty()) return false;
   

Modified: llvm/branches/wendling/eh/lib/Transforms/Utils/LoopSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Utils/LoopSimplify.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Utils/LoopSimplify.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Utils/LoopSimplify.cpp Sat Jul 10 00:06:30 2010
@@ -142,9 +142,11 @@
     if (*BB == L->getHeader()) continue;
 
     SmallPtrSet<BasicBlock *, 4> BadPreds;
-    for (pred_iterator PI = pred_begin(*BB), PE = pred_end(*BB); PI != PE; ++PI)
-      if (!L->contains(*PI))
-        BadPreds.insert(*PI);
+    for (pred_iterator PI = pred_begin(*BB), PE = pred_end(*BB); PI != PE; ++PI){
+      BasicBlock *P = *PI;
+      if (!L->contains(P))
+        BadPreds.insert(P);
+    }
 
     // Delete each unique out-of-loop (and thus dead) predecessor.
     for (SmallPtrSet<BasicBlock *, 4>::iterator I = BadPreds.begin(),
@@ -353,16 +355,18 @@
   // Compute the set of predecessors of the loop that are not in the loop.
   SmallVector<BasicBlock*, 8> OutsideBlocks;
   for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
-       PI != PE; ++PI)
-    if (!L->contains(*PI)) {         // Coming in from outside the loop?
+       PI != PE; ++PI) {
+    BasicBlock *P = *PI;
+    if (!L->contains(P)) {         // Coming in from outside the loop?
       // If the loop is branched to from an indirect branch, we won't
       // be able to fully transform the loop, because it prohibits
       // edge splitting.
-      if (isa<IndirectBrInst>((*PI)->getTerminator())) return 0;
+      if (isa<IndirectBrInst>(P->getTerminator())) return 0;
 
       // Keep track of it.
-      OutsideBlocks.push_back(*PI);
+      OutsideBlocks.push_back(P);
     }
+  }
 
   // Split out the loop pre-header.
   BasicBlock *NewBB =
@@ -385,13 +389,15 @@
 /// outside of the loop.
 BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
   SmallVector<BasicBlock*, 8> LoopBlocks;
-  for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I)
-    if (L->contains(*I)) {
+  for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I) {
+    BasicBlock *P = *I;
+    if (L->contains(P)) {
       // Don't do this if the loop is exited via an indirect branch.
-      if (isa<IndirectBrInst>((*I)->getTerminator())) return 0;
+      if (isa<IndirectBrInst>(P->getTerminator())) return 0;
 
-      LoopBlocks.push_back(*I);
+      LoopBlocks.push_back(P);
     }
+  }
 
   assert(!LoopBlocks.empty() && "No edges coming in from outside the loop?");
   BasicBlock *NewBB = SplitBlockPredecessors(Exit, &LoopBlocks[0], 
@@ -559,10 +565,11 @@
   // Determine which blocks should stay in L and which should be moved out to
   // the Outer loop now.
   std::set<BasicBlock*> BlocksInL;
-  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); PI!=E; ++PI)
-    if (DT->dominates(Header, *PI))
-      AddBlockAndPredsToSet(*PI, Header, BlocksInL);
-
+  for (pred_iterator PI=pred_begin(Header), E = pred_end(Header); PI!=E; ++PI) {
+    BasicBlock *P = *PI;
+    if (DT->dominates(Header, P))
+      AddBlockAndPredsToSet(P, Header, BlocksInL);
+  }
 
   // Scan all of the loop children of L, moving them to OuterLoop if they are
   // not part of the inner loop.
@@ -610,8 +617,10 @@
 
   // Figure out which basic blocks contain back-edges to the loop header.
   std::vector<BasicBlock*> BackedgeBlocks;
-  for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I)
-    if (*I != Preheader) BackedgeBlocks.push_back(*I);
+  for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I){
+    BasicBlock *P = *I;
+    if (P != Preheader) BackedgeBlocks.push_back(P);
+  }
 
   // Create and insert the new backedge block...
   BasicBlock *BEBlock = BasicBlock::Create(Header->getContext(),

Modified: llvm/branches/wendling/eh/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Utils/PromoteMemoryToRegister.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Utils/PromoteMemoryToRegister.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Utils/PromoteMemoryToRegister.cpp Sat Jul 10 00:06:30 2010
@@ -69,11 +69,12 @@
 
   // Only allow direct and non-volatile loads and stores...
   for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
-       UI != UE; ++UI)     // Loop over all of the uses of the alloca
-    if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+       UI != UE; ++UI) {   // Loop over all of the uses of the alloca
+    const User *U = *UI;
+    if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
       if (LI->isVolatile())
         return false;
-    } else if (const StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+    } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
       if (SI->getOperand(0) == AI)
         return false;   // Don't allow a store OF the AI, only INTO the AI.
       if (SI->isVolatile())
@@ -81,6 +82,7 @@
     } else {
       return false;
     }
+  }
 
   return true;
 }

Modified: llvm/branches/wendling/eh/lib/Transforms/Utils/SimplifyCFG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Utils/SimplifyCFG.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Utils/SimplifyCFG.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Utils/SimplifyCFG.cpp Sat Jul 10 00:06:30 2010
@@ -1697,10 +1697,11 @@
       SmallVector<BasicBlock*, 8> UncondBranchPreds;
       SmallVector<BranchInst*, 8> CondBranchPreds;
       for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
-        TerminatorInst *PTI = (*PI)->getTerminator();
+        BasicBlock *P = *PI;
+        TerminatorInst *PTI = P->getTerminator();
         if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) {
           if (BI->isUnconditional())
-            UncondBranchPreds.push_back(*PI);
+            UncondBranchPreds.push_back(P);
           else
             CondBranchPreds.push_back(BI);
         }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll Sat Jul 10 00:06:30 2010
@@ -4,19 +4,22 @@
 
 ; This tests the fast register allocator's handling of partial redefines:
 ;
-;	%reg1026<def> = VMOVv16i8 0, pred:14, pred:%reg0
-;	%reg1028:dsub_1<def> = EXTRACT_SUBREG %reg1026<kill>, 1
+;      %reg1028:dsub_0<def>, %reg1028:dsub_1<def> = VLD1q64 %reg1025...
+;      %reg1030:dsub_1<def> = COPY %reg1028:dsub_0<kill>
 ;
-; %reg1026 gets allocated %Q0, and if %reg1028 is reloaded for the partial redef,
-; it cannot also get %Q0.
+; %reg1028 gets allocated %Q0, and if %reg1030 is reloaded for the partial
+; redef, it cannot also get %Q0.
 
-; CHECK: vmov.i8 q0, #0x0
-; CHECK-NOT: vld1.64 {d0,d1}
+; CHECK: vld1.64 {d0, d1}, [r{{.}}]
+; CHECK-NOT: vld1.64 {d0, d1}
 ; CHECK: vmov.f64 d3, d0
 
-define i32 @main(i32 %argc, i8** %argv) nounwind {
+define i32 @test(i8* %arg) nounwind {
 entry:
- %0 = shufflevector <2 x i64> undef, <2 x i64> zeroinitializer, <2 x i32> <i32 1, i32 2> ; <<2 x i64>> [#uses=1]
- store <2 x i64> %0, <2 x i64>* undef, align 16
+ %0 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %arg)
+ %1 = shufflevector <2 x i64> undef, <2 x i64> %0, <2 x i32> <i32 1, i32 2>
+ store <2 x i64> %1, <2 x i64>* undef, align 16
  ret i32 undef
 }
+
+declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*) nounwind readonly

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/alloca.ll?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/alloca.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/alloca.ll Sat Jul 10 00:06:30 2010
@@ -2,11 +2,11 @@
 
 define void @f(i32 %a) {
 entry:
-; CHECK: mov r11, sp
+; CHECK: add  r11, sp, #4
         %tmp = alloca i8, i32 %a                ; <i8*> [#uses=1]
         call void @g( i8* %tmp, i32 %a, i32 1, i32 2, i32 3 )
         ret void
-; CHECK: mov sp, r11
+; CHECK: sub  sp, r11, #4
 }
 
 declare void @g(i8*, i32, i32, i32, i32)

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll Sat Jul 10 00:06:30 2010
@@ -240,8 +240,8 @@
 ; CHECK:        vldr.64
 ; CHECK-NOT:    vmov d{{.*}}, d0
 ; CHECK:        vmov.i8 d1
-; CHECK-NEXT:   vstmia r0, {d0,d1}
-; CHECK-NEXT:   vstmia r0, {d0,d1}
+; CHECK-NEXT:   vstmia r0, {d0, d1}
+; CHECK-NEXT:   vstmia r0, {d0, d1}
   %3 = bitcast double 0.000000e+00 to <2 x float> ; <<2 x float>> [#uses=2]
   %4 = shufflevector <2 x float> %3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
   store <4 x float> %4, <4 x float>* undef, align 16
@@ -270,7 +270,6 @@
 entry:
 ; CHECK: t10:
 ; CHECK: vmov.i32 q1, #0x3F000000
-; CHECK: vdup.32 q0, d0[0]
 ; CHECK: vmov d0, d1
 ; CHECK: vmla.f32 q0, q0, d0[0]
   %0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll Sat Jul 10 00:06:30 2010
@@ -56,7 +56,7 @@
 entry:
 ; CHECK: t2:
 ; CHECK: adr r{{.}}, #LCPI1_0
-; CHECK: vldmia r3, {d0,d1}
+; CHECK: vldmia r3, {d0, d1}
   br i1 undef, label %bb1, label %bb2
 
 bb1:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/sse-minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/sse-minmax.ll?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/sse-minmax.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/sse-minmax.ll Sat Jul 10 00:06:30 2010
@@ -44,15 +44,15 @@
 
 ; CHECK:      ogt_inverse:
 ; CHECK-NEXT: minsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      ogt_inverse:
 ; UNSAFE-NEXT: minsd  %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      ogt_inverse:
 ; FINITE-NEXT: minsd  %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @ogt_inverse(double %x, double %y) nounwind {
   %c = fcmp ogt double %x, %y
@@ -62,15 +62,15 @@
 
 ; CHECK:      olt_inverse:
 ; CHECK-NEXT: maxsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      olt_inverse:
 ; UNSAFE-NEXT: maxsd  %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      olt_inverse:
 ; FINITE-NEXT: maxsd  %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @olt_inverse(double %x, double %y) nounwind {
   %c = fcmp olt double %x, %y
@@ -108,11 +108,11 @@
 ; CHECK-NEXT: ucomisd %xmm1, %xmm0
 ; UNSAFE:      oge_inverse:
 ; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      oge_inverse:
 ; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @oge_inverse(double %x, double %y) nounwind {
   %c = fcmp oge double %x, %y
@@ -124,11 +124,11 @@
 ; CHECK-NEXT: ucomisd %xmm0, %xmm1
 ; UNSAFE:      ole_inverse:
 ; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      ole_inverse:
 ; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @ole_inverse(double %x, double %y) nounwind {
   %c = fcmp ole double %x, %y
@@ -175,17 +175,17 @@
 ; CHECK:      x_ogt_inverse:
 ; CHECK-NEXT: pxor   %xmm1, %xmm1
 ; CHECK-NEXT: minsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      x_ogt_inverse:
 ; UNSAFE-NEXT: pxor   %xmm1, %xmm1
 ; UNSAFE-NEXT: minsd  %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      x_ogt_inverse:
 ; FINITE-NEXT: pxor   %xmm1, %xmm1
 ; FINITE-NEXT: minsd  %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @x_ogt_inverse(double %x) nounwind {
   %c = fcmp ogt double %x, 0.000000e+00
@@ -196,17 +196,17 @@
 ; CHECK:      x_olt_inverse:
 ; CHECK-NEXT: pxor   %xmm1, %xmm1
 ; CHECK-NEXT: maxsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      x_olt_inverse:
 ; UNSAFE-NEXT: pxor   %xmm1, %xmm1
 ; UNSAFE-NEXT: maxsd  %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      x_olt_inverse:
 ; FINITE-NEXT: pxor   %xmm1, %xmm1
 ; FINITE-NEXT: maxsd  %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @x_olt_inverse(double %x) nounwind {
   %c = fcmp olt double %x, 0.000000e+00
@@ -251,12 +251,12 @@
 ; UNSAFE:      x_oge_inverse:
 ; UNSAFE-NEXT: pxor    %xmm1, %xmm1
 ; UNSAFE-NEXT: minsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      x_oge_inverse:
 ; FINITE-NEXT: pxor    %xmm1, %xmm1
 ; FINITE-NEXT: minsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @x_oge_inverse(double %x) nounwind {
   %c = fcmp oge double %x, 0.000000e+00
@@ -269,12 +269,12 @@
 ; UNSAFE:      x_ole_inverse:
 ; UNSAFE-NEXT: pxor    %xmm1, %xmm1
 ; UNSAFE-NEXT: maxsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      x_ole_inverse:
 ; FINITE-NEXT: pxor    %xmm1, %xmm1
 ; FINITE-NEXT: maxsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @x_ole_inverse(double %x) nounwind {
   %c = fcmp ole double %x, 0.000000e+00
@@ -314,11 +314,11 @@
 ; CHECK:      ucomisd %xmm0, %xmm1
 ; UNSAFE:      ugt_inverse:
 ; UNSAFE-NEXT: minsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      ugt_inverse:
 ; FINITE-NEXT: minsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @ugt_inverse(double %x, double %y) nounwind {
   %c = fcmp ugt double %x, %y
@@ -330,11 +330,11 @@
 ; CHECK:      ucomisd %xmm1, %xmm0
 ; UNSAFE:      ult_inverse:
 ; UNSAFE-NEXT: maxsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      ult_inverse:
 ; FINITE-NEXT: maxsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @ult_inverse(double %x, double %y) nounwind {
   %c = fcmp ult double %x, %y
@@ -344,7 +344,7 @@
 
 ; CHECK:      uge:
 ; CHECK-NEXT: maxsd   %xmm0, %xmm1
-; CHECK-NEXT: movapd  %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      uge:
 ; UNSAFE-NEXT: maxsd   %xmm1, %xmm0
@@ -360,7 +360,7 @@
 
 ; CHECK:      ule:
 ; CHECK-NEXT: minsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      ule:
 ; UNSAFE-NEXT: minsd   %xmm1, %xmm0
@@ -379,11 +379,11 @@
 ; CHECK-NEXT: ret
 ; UNSAFE:      uge_inverse:
 ; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      uge_inverse:
 ; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @uge_inverse(double %x, double %y) nounwind {
   %c = fcmp uge double %x, %y
@@ -396,11 +396,11 @@
 ; CHECK-NEXT: ret
 ; UNSAFE:      ule_inverse:
 ; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      ule_inverse:
 ; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @ule_inverse(double %x, double %y) nounwind {
   %c = fcmp ule double %x, %y
@@ -445,12 +445,12 @@
 ; UNSAFE:      x_ugt_inverse:
 ; UNSAFE-NEXT: pxor    %xmm1, %xmm1
 ; UNSAFE-NEXT: minsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      x_ugt_inverse:
 ; FINITE-NEXT: pxor    %xmm1, %xmm1
 ; FINITE-NEXT: minsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @x_ugt_inverse(double %x) nounwind {
   %c = fcmp ugt double %x, 0.000000e+00
@@ -463,12 +463,12 @@
 ; UNSAFE:      x_ult_inverse:
 ; UNSAFE-NEXT: pxor    %xmm1, %xmm1
 ; UNSAFE-NEXT: maxsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      x_ult_inverse:
 ; FINITE-NEXT: pxor    %xmm1, %xmm1
 ; FINITE-NEXT: maxsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @x_ult_inverse(double %x) nounwind {
   %c = fcmp ult double %x, 0.000000e+00
@@ -479,7 +479,7 @@
 ; CHECK:      x_uge:
 ; CHECK-NEXT: pxor   %xmm1, %xmm1
 ; CHECK-NEXT: maxsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      x_uge:
 ; UNSAFE-NEXT: pxor   %xmm1, %xmm1
@@ -498,7 +498,7 @@
 ; CHECK:      x_ule:
 ; CHECK-NEXT: pxor   %xmm1, %xmm1
 ; CHECK-NEXT: minsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      x_ule:
 ; UNSAFE-NEXT: pxor   %xmm1, %xmm1
@@ -521,12 +521,12 @@
 ; UNSAFE:      x_uge_inverse:
 ; UNSAFE-NEXT: pxor  %xmm1, %xmm1
 ; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      x_uge_inverse:
 ; FINITE-NEXT: pxor  %xmm1, %xmm1
 ; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @x_uge_inverse(double %x) nounwind {
   %c = fcmp uge double %x, 0.000000e+00
@@ -541,12 +541,12 @@
 ; UNSAFE:      x_ule_inverse:
 ; UNSAFE-NEXT: pxor  %xmm1, %xmm1
 ; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      x_ule_inverse:
 ; FINITE-NEXT: pxor  %xmm1, %xmm1
 ; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @x_ule_inverse(double %x) nounwind {
   %c = fcmp ule double %x, 0.000000e+00
@@ -587,17 +587,17 @@
 ; CHECK:      y_ogt_inverse:
 ; CHECK-NEXT: movsd  {{[^,]*}}, %xmm1
 ; CHECK-NEXT: minsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      y_ogt_inverse:
 ; UNSAFE-NEXT: movsd  {{[^,]*}}, %xmm1
 ; UNSAFE-NEXT: minsd  %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      y_ogt_inverse:
 ; FINITE-NEXT: movsd  {{[^,]*}}, %xmm1
 ; FINITE-NEXT: minsd  %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @y_ogt_inverse(double %x) nounwind {
   %c = fcmp ogt double %x, -0.000000e+00
@@ -608,17 +608,17 @@
 ; CHECK:      y_olt_inverse:
 ; CHECK-NEXT: movsd  {{[^,]*}}, %xmm1
 ; CHECK-NEXT: maxsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      y_olt_inverse:
 ; UNSAFE-NEXT: movsd  {{[^,]*}}, %xmm1
 ; UNSAFE-NEXT: maxsd  %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      y_olt_inverse:
 ; FINITE-NEXT: movsd  {{[^,]*}}, %xmm1
 ; FINITE-NEXT: maxsd  %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @y_olt_inverse(double %x) nounwind {
   %c = fcmp olt double %x, -0.000000e+00
@@ -659,12 +659,12 @@
 ; UNSAFE:      y_oge_inverse:
 ; UNSAFE-NEXT: movsd   {{[^,]*}}, %xmm1
 ; UNSAFE-NEXT: minsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      y_oge_inverse:
 ; FINITE-NEXT: movsd   {{[^,]*}}, %xmm1
 ; FINITE-NEXT: minsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @y_oge_inverse(double %x) nounwind {
   %c = fcmp oge double %x, -0.000000e+00
@@ -677,12 +677,12 @@
 ; UNSAFE:      y_ole_inverse:
 ; UNSAFE-NEXT: movsd   {{[^,]*}}, %xmm1
 ; UNSAFE-NEXT: maxsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      y_ole_inverse:
 ; FINITE-NEXT: movsd   {{[^,]*}}, %xmm1
 ; FINITE-NEXT: maxsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @y_ole_inverse(double %x) nounwind {
   %c = fcmp ole double %x, -0.000000e+00
@@ -723,12 +723,12 @@
 ; UNSAFE:      y_ugt_inverse:
 ; UNSAFE-NEXT: movsd   {{[^,]*}}, %xmm1
 ; UNSAFE-NEXT: minsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      y_ugt_inverse:
 ; FINITE-NEXT: movsd   {{[^,]*}}, %xmm1
 ; FINITE-NEXT: minsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @y_ugt_inverse(double %x) nounwind {
   %c = fcmp ugt double %x, -0.000000e+00
@@ -741,12 +741,12 @@
 ; UNSAFE:      y_ult_inverse:
 ; UNSAFE-NEXT: movsd   {{[^,]*}}, %xmm1
 ; UNSAFE-NEXT: maxsd   %xmm0, %xmm1
-; UNSAFE-NEXT: movapd  %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      y_ult_inverse:
 ; FINITE-NEXT: movsd   {{[^,]*}}, %xmm1
 ; FINITE-NEXT: maxsd   %xmm0, %xmm1
-; FINITE-NEXT: movapd  %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}}  %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @y_ult_inverse(double %x) nounwind {
   %c = fcmp ult double %x, -0.000000e+00
@@ -757,7 +757,7 @@
 ; CHECK:      y_uge:
 ; CHECK-NEXT: movsd  {{[^,]*}}, %xmm1
 ; CHECK-NEXT: maxsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      y_uge:
 ; UNSAFE-NEXT: maxsd  {{[^,]*}}, %xmm0
@@ -774,7 +774,7 @@
 ; CHECK:      y_ule:
 ; CHECK-NEXT: movsd  {{[^,]*}}, %xmm1
 ; CHECK-NEXT: minsd  %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
+; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; CHECK-NEXT: ret
 ; UNSAFE:      y_ule:
 ; UNSAFE-NEXT: minsd  {{[^,]*}}, %xmm0
@@ -794,12 +794,12 @@
 ; UNSAFE:      y_uge_inverse:
 ; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
 ; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      y_uge_inverse:
 ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
 ; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @y_uge_inverse(double %x) nounwind {
   %c = fcmp uge double %x, -0.000000e+00
@@ -813,12 +813,12 @@
 ; UNSAFE:      y_ule_inverse:
 ; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
 ; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
+; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; UNSAFE-NEXT: ret
 ; FINITE:      y_ule_inverse:
 ; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
 ; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
+; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
 ; FINITE-NEXT: ret
 define double @y_ule_inverse(double %x) nounwind {
   %c = fcmp ule double %x, -0.000000e+00

Modified: llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s (original)
+++ llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s Sat Jul 10 00:06:30 2010
@@ -11927,3 +11927,471 @@
           vdppd  $3, (%eax), %xmm5, %xmm1
 
 
+// CHECK: vblendvpd  %xmm2, (%eax), %xmm1, %xmm3
+// CHECK: encoding: [0xc4,0xe3,0x71,0x4b,0x18,0x20]
+          vblendvpd  %xmm2, (%eax), %xmm1, %xmm3
+
+// CHECK: vblendvps  %xmm2, %xmm5, %xmm1, %xmm3
+// CHECK: encoding: [0xc4,0xe3,0x71,0x4a,0xdd,0x20]
+          vblendvps  %xmm2, %xmm5, %xmm1, %xmm3
+
+// CHECK: vblendvps  %xmm2, (%eax), %xmm1, %xmm3
+// CHECK: encoding: [0xc4,0xe3,0x71,0x4a,0x18,0x20]
+          vblendvps  %xmm2, (%eax), %xmm1, %xmm3
+
+// CHECK: vpblendvb  %xmm2, %xmm5, %xmm1, %xmm3
+// CHECK: encoding: [0xc4,0xe3,0x71,0x4c,0xdd,0x20]
+          vpblendvb  %xmm2, %xmm5, %xmm1, %xmm3
+
+// CHECK: vpblendvb  %xmm2, (%eax), %xmm1, %xmm3
+// CHECK: encoding: [0xc4,0xe3,0x71,0x4c,0x18,0x20]
+          vpblendvb  %xmm2, (%eax), %xmm1, %xmm3
+
+// CHECK: vpmovsxbw  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x20,0xea]
+          vpmovsxbw  %xmm2, %xmm5
+
+// CHECK: vpmovsxbw  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x20,0x10]
+          vpmovsxbw  (%eax), %xmm2
+
+// CHECK: vpmovsxwd  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x23,0xea]
+          vpmovsxwd  %xmm2, %xmm5
+
+// CHECK: vpmovsxwd  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x23,0x10]
+          vpmovsxwd  (%eax), %xmm2
+
+// CHECK: vpmovsxdq  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x25,0xea]
+          vpmovsxdq  %xmm2, %xmm5
+
+// CHECK: vpmovsxdq  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x25,0x10]
+          vpmovsxdq  (%eax), %xmm2
+
+// CHECK: vpmovzxbw  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x30,0xea]
+          vpmovzxbw  %xmm2, %xmm5
+
+// CHECK: vpmovzxbw  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x30,0x10]
+          vpmovzxbw  (%eax), %xmm2
+
+// CHECK: vpmovzxwd  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x33,0xea]
+          vpmovzxwd  %xmm2, %xmm5
+
+// CHECK: vpmovzxwd  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x33,0x10]
+          vpmovzxwd  (%eax), %xmm2
+
+// CHECK: vpmovzxdq  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x35,0xea]
+          vpmovzxdq  %xmm2, %xmm5
+
+// CHECK: vpmovzxdq  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x35,0x10]
+          vpmovzxdq  (%eax), %xmm2
+
+// CHECK: vpmovsxbq  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x22,0xea]
+          vpmovsxbq  %xmm2, %xmm5
+
+// CHECK: vpmovsxbq  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x22,0x10]
+          vpmovsxbq  (%eax), %xmm2
+
+// CHECK: vpmovzxbq  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x32,0xea]
+          vpmovzxbq  %xmm2, %xmm5
+
+// CHECK: vpmovzxbq  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x32,0x10]
+          vpmovzxbq  (%eax), %xmm2
+
+// CHECK: vpmovsxbd  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x21,0xea]
+          vpmovsxbd  %xmm2, %xmm5
+
+// CHECK: vpmovsxbd  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x21,0x10]
+          vpmovsxbd  (%eax), %xmm2
+
+// CHECK: vpmovsxwq  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x24,0xea]
+          vpmovsxwq  %xmm2, %xmm5
+
+// CHECK: vpmovsxwq  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x24,0x10]
+          vpmovsxwq  (%eax), %xmm2
+
+// CHECK: vpmovzxbd  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x31,0xea]
+          vpmovzxbd  %xmm2, %xmm5
+
+// CHECK: vpmovzxbd  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x31,0x10]
+          vpmovzxbd  (%eax), %xmm2
+
+// CHECK: vpmovzxwq  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x34,0xea]
+          vpmovzxwq  %xmm2, %xmm5
+
+// CHECK: vpmovzxwq  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x34,0x10]
+          vpmovzxwq  (%eax), %xmm2
+
+// CHECK: vpextrw  $7, %xmm2, %eax
+// CHECK: encoding: [0xc5,0xf9,0xc5,0xc2,0x07]
+          vpextrw  $7, %xmm2, %eax
+
+// CHECK: vpextrw  $7, %xmm2, (%eax)
+// CHECK: encoding: [0xc4,0xe3,0x79,0x15,0x10,0x07]
+          vpextrw  $7, %xmm2, (%eax)
+
+// CHECK: vpextrd  $7, %xmm2, %eax
+// CHECK: encoding: [0xc4,0xe3,0x79,0x16,0xd0,0x07]
+          vpextrd  $7, %xmm2, %eax
+
+// CHECK: vpextrd  $7, %xmm2, (%eax)
+// CHECK: encoding: [0xc4,0xe3,0x79,0x16,0x10,0x07]
+          vpextrd  $7, %xmm2, (%eax)
+
+// CHECK: vpextrb  $7, %xmm2, %eax
+// CHECK: encoding: [0xc4,0xe3,0x79,0x14,0xd0,0x07]
+          vpextrb  $7, %xmm2, %eax
+
+// CHECK: vpextrb  $7, %xmm2, (%eax)
+// CHECK: encoding: [0xc4,0xe3,0x79,0x14,0x10,0x07]
+          vpextrb  $7, %xmm2, (%eax)
+
+// CHECK: vextractps  $7, %xmm2, (%eax)
+// CHECK: encoding: [0xc4,0xe3,0x79,0x17,0x10,0x07]
+          vextractps  $7, %xmm2, (%eax)
+
+// CHECK: vextractps  $7, %xmm2, %eax
+// CHECK: encoding: [0xc4,0xe3,0x79,0x17,0xd0,0x07]
+          vextractps  $7, %xmm2, %eax
+
+// CHECK: vpinsrw  $7, %eax, %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe9,0xc4,0xe8,0x07]
+          vpinsrw  $7, %eax, %xmm2, %xmm5
+
+// CHECK: vpinsrw  $7, (%eax), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe9,0xc4,0x28,0x07]
+          vpinsrw  $7, (%eax), %xmm2, %xmm5
+
+// CHECK: vpinsrb  $7, %eax, %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x69,0x20,0xe8,0x07]
+          vpinsrb  $7, %eax, %xmm2, %xmm5
+
+// CHECK: vpinsrb  $7, (%eax), %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x69,0x20,0x28,0x07]
+          vpinsrb  $7, (%eax), %xmm2, %xmm5
+
+// CHECK: vpinsrd  $7, %eax, %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x69,0x22,0xe8,0x07]
+          vpinsrd  $7, %eax, %xmm2, %xmm5
+
+// CHECK: vpinsrd  $7, (%eax), %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x69,0x22,0x28,0x07]
+          vpinsrd  $7, (%eax), %xmm2, %xmm5
+
+// CHECK: vinsertps  $7, %xmm2, %xmm5, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x51,0x21,0xca,0x07]
+          vinsertps  $7, %xmm2, %xmm5, %xmm1
+
+// CHECK: vinsertps  $7, (%eax), %xmm5, %xmm1
+// CHECK: encoding: [0xc4,0xe3,0x51,0x21,0x08,0x07]
+          vinsertps  $7, (%eax), %xmm5, %xmm1
+
+// CHECK: vptest  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0x17,0xea]
+          vptest  %xmm2, %xmm5
+
+// CHECK: vptest  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x17,0x10]
+          vptest  (%eax), %xmm2
+
+// CHECK: vmovntdqa  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0x2a,0x10]
+          vmovntdqa  (%eax), %xmm2
+
+// CHECK: vpcmpgtq  %xmm2, %xmm5, %xmm1
+// CHECK: encoding: [0xc4,0xe2,0x51,0x37,0xca]
+          vpcmpgtq  %xmm2, %xmm5, %xmm1
+
+// CHECK: vpcmpgtq  (%eax), %xmm5, %xmm3
+// CHECK: encoding: [0xc4,0xe2,0x51,0x37,0x18]
+          vpcmpgtq  (%eax), %xmm5, %xmm3
+
+// CHECK: vpcmpistrm  $7, %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0x62,0xea,0x07]
+          vpcmpistrm  $7, %xmm2, %xmm5
+
+// CHECK: vpcmpistrm  $7, (%eax), %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0x62,0x28,0x07]
+          vpcmpistrm  $7, (%eax), %xmm5
+
+// CHECK: vpcmpestrm  $7, %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0x60,0xea,0x07]
+          vpcmpestrm  $7, %xmm2, %xmm5
+
+// CHECK: vpcmpestrm  $7, (%eax), %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0x60,0x28,0x07]
+          vpcmpestrm  $7, (%eax), %xmm5
+
+// CHECK: vpcmpistri  $7, %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0x63,0xea,0x07]
+          vpcmpistri  $7, %xmm2, %xmm5
+
+// CHECK: vpcmpistri  $7, (%eax), %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0x63,0x28,0x07]
+          vpcmpistri  $7, (%eax), %xmm5
+
+// CHECK: vpcmpestri  $7, %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0x61,0xea,0x07]
+          vpcmpestri  $7, %xmm2, %xmm5
+
+// CHECK: vpcmpestri  $7, (%eax), %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0x61,0x28,0x07]
+          vpcmpestri  $7, (%eax), %xmm5
+
+// CHECK: vaesimc  %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe2,0x79,0xdb,0xea]
+          vaesimc  %xmm2, %xmm5
+
+// CHECK: vaesimc  (%eax), %xmm2
+// CHECK: encoding: [0xc4,0xe2,0x79,0xdb,0x10]
+          vaesimc  (%eax), %xmm2
+
+// CHECK: vaesenc  %xmm2, %xmm5, %xmm1
+// CHECK: encoding: [0xc4,0xe2,0x51,0xdc,0xca]
+          vaesenc  %xmm2, %xmm5, %xmm1
+
+// CHECK: vaesenc  (%eax), %xmm5, %xmm3
+// CHECK: encoding: [0xc4,0xe2,0x51,0xdc,0x18]
+          vaesenc  (%eax), %xmm5, %xmm3
+
+// CHECK: vaesenclast  %xmm2, %xmm5, %xmm1
+// CHECK: encoding: [0xc4,0xe2,0x51,0xdd,0xca]
+          vaesenclast  %xmm2, %xmm5, %xmm1
+
+// CHECK: vaesenclast  (%eax), %xmm5, %xmm3
+// CHECK: encoding: [0xc4,0xe2,0x51,0xdd,0x18]
+          vaesenclast  (%eax), %xmm5, %xmm3
+
+// CHECK: vaesdec  %xmm2, %xmm5, %xmm1
+// CHECK: encoding: [0xc4,0xe2,0x51,0xde,0xca]
+          vaesdec  %xmm2, %xmm5, %xmm1
+
+// CHECK: vaesdec  (%eax), %xmm5, %xmm3
+// CHECK: encoding: [0xc4,0xe2,0x51,0xde,0x18]
+          vaesdec  (%eax), %xmm5, %xmm3
+
+// CHECK: vaesdeclast  %xmm2, %xmm5, %xmm1
+// CHECK: encoding: [0xc4,0xe2,0x51,0xdf,0xca]
+          vaesdeclast  %xmm2, %xmm5, %xmm1
+
+// CHECK: vaesdeclast  (%eax), %xmm5, %xmm3
+// CHECK: encoding: [0xc4,0xe2,0x51,0xdf,0x18]
+          vaesdeclast  (%eax), %xmm5, %xmm3
+
+// CHECK: vaeskeygenassist  $7, %xmm2, %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0xdf,0xea,0x07]
+          vaeskeygenassist  $7, %xmm2, %xmm5
+
+// CHECK: vaeskeygenassist  $7, (%eax), %xmm5
+// CHECK: encoding: [0xc4,0xe3,0x79,0xdf,0x28,0x07]
+          vaeskeygenassist  $7, (%eax), %xmm5
+
+// CHECK: vcmpps  $8, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x08]
+          vcmpeq_uqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $9, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x09]
+          vcmpngeps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $10, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x0a]
+          vcmpngtps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $11, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x0b]
+          vcmpfalseps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $12, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x0c]
+          vcmpneq_oqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $13, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x0d]
+          vcmpgeps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $14, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x0e]
+          vcmpgtps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $15, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x0f]
+          vcmptrueps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $16, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x10]
+          vcmpeq_osps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $17, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x11]
+          vcmplt_oqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $18, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x12]
+          vcmple_oqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $19, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x13]
+          vcmpunord_sps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $20, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x14]
+          vcmpneq_usps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $21, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x15]
+          vcmpnlt_uqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $22, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x16]
+          vcmpnle_uqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $23, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x17]
+          vcmpord_sps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $24, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x18]
+          vcmpeq_usps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $25, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x19]
+          vcmpnge_uqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $26, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x1a]
+          vcmpngt_uqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $27, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x1b]
+          vcmpfalse_osps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $28, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x1c]
+          vcmpneq_osps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $29, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x1d]
+          vcmpge_oqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $30, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x1e]
+          vcmpgt_oqps %xmm1, %xmm2, %xmm3
+
+// CHECK: vcmpps  $31, %xmm1, %xmm2, %xmm3
+// CHECK: encoding: [0xc5,0xe8,0xc2,0xd9,0x1f]
+          vcmptrue_usps %xmm1, %xmm2, %xmm3
+
+// CHECK: vmovaps  (%eax), %ymm2
+// CHECK: encoding: [0xc5,0xfc,0x28,0x10]
+          vmovaps  (%eax), %ymm2
+
+// CHECK: vmovaps  %ymm1, %ymm2
+// CHECK: encoding: [0xc5,0xfc,0x28,0xd1]
+          vmovaps  %ymm1, %ymm2
+
+// CHECK: vmovaps  %ymm1, (%eax)
+// CHECK: encoding: [0xc5,0xfc,0x29,0x08]
+          vmovaps  %ymm1, (%eax)
+
+// CHECK: vmovapd  (%eax), %ymm2
+// CHECK: encoding: [0xc5,0xfd,0x28,0x10]
+          vmovapd  (%eax), %ymm2
+
+// CHECK: vmovapd  %ymm1, %ymm2
+// CHECK: encoding: [0xc5,0xfd,0x28,0xd1]
+          vmovapd  %ymm1, %ymm2
+
+// CHECK: vmovapd  %ymm1, (%eax)
+// CHECK: encoding: [0xc5,0xfd,0x29,0x08]
+          vmovapd  %ymm1, (%eax)
+
+// CHECK: vmovups  (%eax), %ymm2
+// CHECK: encoding: [0xc5,0xfc,0x10,0x10]
+          vmovups  (%eax), %ymm2
+
+// CHECK: vmovups  %ymm1, %ymm2
+// CHECK: encoding: [0xc5,0xfc,0x10,0xd1]
+          vmovups  %ymm1, %ymm2
+
+// CHECK: vmovups  %ymm1, (%eax)
+// CHECK: encoding: [0xc5,0xfc,0x11,0x08]
+          vmovups  %ymm1, (%eax)
+
+// CHECK: vmovupd  (%eax), %ymm2
+// CHECK: encoding: [0xc5,0xfd,0x10,0x10]
+          vmovupd  (%eax), %ymm2
+
+// CHECK: vmovupd  %ymm1, %ymm2
+// CHECK: encoding: [0xc5,0xfd,0x10,0xd1]
+          vmovupd  %ymm1, %ymm2
+
+// CHECK: vmovupd  %ymm1, (%eax)
+// CHECK: encoding: [0xc5,0xfd,0x11,0x08]
+          vmovupd  %ymm1, (%eax)
+
+// CHECK: vunpckhps  %ymm1, %ymm2, %ymm4
+// CHECK: encoding: [0xc5,0xec,0x15,0xe1]
+          vunpckhps  %ymm1, %ymm2, %ymm4
+
+// CHECK: vunpckhpd  %ymm1, %ymm2, %ymm4
+// CHECK: encoding: [0xc5,0xed,0x15,0xe1]
+          vunpckhpd  %ymm1, %ymm2, %ymm4
+
+// CHECK: vunpcklps  %ymm1, %ymm2, %ymm4
+// CHECK: encoding: [0xc5,0xec,0x14,0xe1]
+          vunpcklps  %ymm1, %ymm2, %ymm4
+
+// CHECK: vunpcklpd  %ymm1, %ymm2, %ymm4
+// CHECK: encoding: [0xc5,0xed,0x14,0xe1]
+          vunpcklpd  %ymm1, %ymm2, %ymm4
+
+// CHECK: vunpckhps  -4(%ebx,%ecx,8), %ymm2, %ymm5
+// CHECK: encoding: [0xc5,0xec,0x15,0x6c,0xcb,0xfc]
+          vunpckhps  -4(%ebx,%ecx,8), %ymm2, %ymm5
+
+// CHECK: vunpckhpd  -4(%ebx,%ecx,8), %ymm2, %ymm5
+// CHECK: encoding: [0xc5,0xed,0x15,0x6c,0xcb,0xfc]
+          vunpckhpd  -4(%ebx,%ecx,8), %ymm2, %ymm5
+
+// CHECK: vunpcklps  -4(%ebx,%ecx,8), %ymm2, %ymm5
+// CHECK: encoding: [0xc5,0xec,0x14,0x6c,0xcb,0xfc]
+          vunpcklps  -4(%ebx,%ecx,8), %ymm2, %ymm5
+
+// CHECK: vunpcklpd  -4(%ebx,%ecx,8), %ymm2, %ymm5
+// CHECK: encoding: [0xc5,0xed,0x14,0x6c,0xcb,0xfc]
+          vunpcklpd  -4(%ebx,%ecx,8), %ymm2, %ymm5
+
+// CHECK: vmovntdq  %ymm1, (%eax)
+// CHECK: encoding: [0xc5,0xfd,0xe7,0x08]
+          vmovntdq  %ymm1, (%eax)
+
+// CHECK: vmovntpd  %ymm1, (%eax)
+// CHECK: encoding: [0xc5,0xfd,0x2b,0x08]
+          vmovntpd  %ymm1, (%eax)
+
+// CHECK: vmovntps  %ymm1, (%eax)
+// CHECK: encoding: [0xc5,0xfc,0x2b,0x08]
+          vmovntps  %ymm1, (%eax)
+

Modified: llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s (original)
+++ llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s Sat Jul 10 00:06:30 2010
@@ -1,5 +1,15 @@
 // RUN: llvm-mc -triple x86_64-unknown-unknown --show-encoding %s | FileCheck %s
 
+// PR7195
+// CHECK: callw 42
+// CHECK: encoding: [0x66,0xe8,A,A]
+       callw 42
+       
+// rdar://8127102
+// CHECK: movq	%gs:(%rdi), %rax
+// CHECK: encoding: [0x65,0x48,0x8b,0x07]
+movq	%gs:(%rdi), %rax
+
 // CHECK: crc32b 	%bl, %eax
 // CHECK:  encoding: [0xf2,0x0f,0x38,0xf0,0xc3]
         crc32b	%bl, %eax
@@ -1974,3 +1984,490 @@
 // CHECK: encoding: [0xc4,0x63,0x51,0x41,0x18,0x03]
           vdppd  $3, (%rax), %xmm5, %xmm11
 
+// CHECK: vblendvpd  %xmm12, %xmm5, %xmm11, %xmm13
+// CHECK: encoding: [0xc4,0x63,0x21,0x4b,0xed,0xc0]
+          vblendvpd  %xmm12, %xmm5, %xmm11, %xmm13
+
+// CHECK: vblendvpd  %xmm12, (%rax), %xmm11, %xmm13
+// CHECK: encoding: [0xc4,0x63,0x21,0x4b,0x28,0xc0]
+          vblendvpd  %xmm12, (%rax), %xmm11, %xmm13
+
+// CHECK: vblendvps  %xmm12, %xmm5, %xmm11, %xmm13
+// CHECK: encoding: [0xc4,0x63,0x21,0x4a,0xed,0xc0]
+          vblendvps  %xmm12, %xmm5, %xmm11, %xmm13
+
+// CHECK: vblendvps  %xmm12, (%rax), %xmm11, %xmm13
+// CHECK: encoding: [0xc4,0x63,0x21,0x4a,0x28,0xc0]
+          vblendvps  %xmm12, (%rax), %xmm11, %xmm13
+
+// CHECK: vpblendvb  %xmm12, %xmm5, %xmm11, %xmm13
+// CHECK: encoding: [0xc4,0x63,0x21,0x4c,0xed,0xc0]
+          vpblendvb  %xmm12, %xmm5, %xmm11, %xmm13
+
+// CHECK: vpblendvb  %xmm12, (%rax), %xmm11, %xmm13
+// CHECK: encoding: [0xc4,0x63,0x21,0x4c,0x28,0xc0]
+          vpblendvb  %xmm12, (%rax), %xmm11, %xmm13
+
+// CHECK: vpmovsxbw  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x20,0xd4]
+          vpmovsxbw  %xmm12, %xmm10
+
+// CHECK: vpmovsxbw  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x20,0x20]
+          vpmovsxbw  (%rax), %xmm12
+
+// CHECK: vpmovsxwd  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x23,0xd4]
+          vpmovsxwd  %xmm12, %xmm10
+
+// CHECK: vpmovsxwd  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x23,0x20]
+          vpmovsxwd  (%rax), %xmm12
+
+// CHECK: vpmovsxdq  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x25,0xd4]
+          vpmovsxdq  %xmm12, %xmm10
+
+// CHECK: vpmovsxdq  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x25,0x20]
+          vpmovsxdq  (%rax), %xmm12
+
+// CHECK: vpmovzxbw  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x30,0xd4]
+          vpmovzxbw  %xmm12, %xmm10
+
+// CHECK: vpmovzxbw  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x30,0x20]
+          vpmovzxbw  (%rax), %xmm12
+
+// CHECK: vpmovzxwd  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x33,0xd4]
+          vpmovzxwd  %xmm12, %xmm10
+
+// CHECK: vpmovzxwd  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x33,0x20]
+          vpmovzxwd  (%rax), %xmm12
+
+// CHECK: vpmovzxdq  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x35,0xd4]
+          vpmovzxdq  %xmm12, %xmm10
+
+// CHECK: vpmovzxdq  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x35,0x20]
+          vpmovzxdq  (%rax), %xmm12
+
+// CHECK: vpmovsxbq  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x22,0xd4]
+          vpmovsxbq  %xmm12, %xmm10
+
+// CHECK: vpmovsxbq  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x22,0x20]
+          vpmovsxbq  (%rax), %xmm12
+
+// CHECK: vpmovzxbq  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x32,0xd4]
+          vpmovzxbq  %xmm12, %xmm10
+
+// CHECK: vpmovzxbq  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x32,0x20]
+          vpmovzxbq  (%rax), %xmm12
+
+// CHECK: vpmovsxbd  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x21,0xd4]
+          vpmovsxbd  %xmm12, %xmm10
+
+// CHECK: vpmovsxbd  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x21,0x20]
+          vpmovsxbd  (%rax), %xmm12
+
+// CHECK: vpmovsxwq  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x24,0xd4]
+          vpmovsxwq  %xmm12, %xmm10
+
+// CHECK: vpmovsxwq  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x24,0x20]
+          vpmovsxwq  (%rax), %xmm12
+
+// CHECK: vpmovzxbd  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x31,0xd4]
+          vpmovzxbd  %xmm12, %xmm10
+
+// CHECK: vpmovzxbd  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x31,0x20]
+          vpmovzxbd  (%rax), %xmm12
+
+// CHECK: vpmovzxwq  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x34,0xd4]
+          vpmovzxwq  %xmm12, %xmm10
+
+// CHECK: vpmovzxwq  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x34,0x20]
+          vpmovzxwq  (%rax), %xmm12
+
+// CHECK: vpextrw  $7, %xmm12, %eax
+// CHECK: encoding: [0xc4,0xc1,0x79,0xc5,0xc4,0x07]
+          vpextrw  $7, %xmm12, %eax
+
+// CHECK: vpextrw  $7, %xmm12, (%rax)
+// CHECK: encoding: [0xc4,0x63,0x79,0x15,0x20,0x07]
+          vpextrw  $7, %xmm12, (%rax)
+
+// CHECK: vpextrd  $7, %xmm12, %eax
+// CHECK: encoding: [0xc4,0x63,0x79,0x16,0xe0,0x07]
+          vpextrd  $7, %xmm12, %eax
+
+// CHECK: vpextrd  $7, %xmm12, (%rax)
+// CHECK: encoding: [0xc4,0x63,0x79,0x16,0x20,0x07]
+          vpextrd  $7, %xmm12, (%rax)
+
+// CHECK: vpextrb  $7, %xmm12, %eax
+// CHECK: encoding: [0xc4,0x63,0x79,0x14,0xe0,0x07]
+          vpextrb  $7, %xmm12, %eax
+
+// CHECK: vpextrb  $7, %xmm12, (%rax)
+// CHECK: encoding: [0xc4,0x63,0x79,0x14,0x20,0x07]
+          vpextrb  $7, %xmm12, (%rax)
+
+// CHECK: vpextrq  $7, %xmm12, %rcx
+// CHECK: encoding: [0xc4,0x63,0xf9,0x16,0xe1,0x07]
+          vpextrq  $7, %xmm12, %rcx
+
+// CHECK: vpextrq  $7, %xmm12, (%rcx)
+// CHECK: encoding: [0xc4,0x63,0xf9,0x16,0x21,0x07]
+          vpextrq  $7, %xmm12, (%rcx)
+
+// CHECK: vextractps  $7, %xmm12, (%rax)
+// CHECK: encoding: [0xc4,0x63,0x79,0x17,0x20,0x07]
+          vextractps  $7, %xmm12, (%rax)
+
+// CHECK: vextractps  $7, %xmm12, %eax
+// CHECK: encoding: [0xc4,0x63,0x79,0x17,0xe0,0x07]
+          vextractps  $7, %xmm12, %eax
+
+// CHECK: vpinsrw  $7, %eax, %xmm12, %xmm10
+// CHECK: encoding: [0xc5,0x19,0xc4,0xd0,0x07]
+          vpinsrw  $7, %eax, %xmm12, %xmm10
+
+// CHECK: vpinsrw  $7, (%rax), %xmm12, %xmm10
+// CHECK: encoding: [0xc5,0x19,0xc4,0x10,0x07]
+          vpinsrw  $7, (%rax), %xmm12, %xmm10
+
+// CHECK: vpinsrb  $7, %eax, %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x63,0x19,0x20,0xd0,0x07]
+          vpinsrb  $7, %eax, %xmm12, %xmm10
+
+// CHECK: vpinsrb  $7, (%rax), %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x63,0x19,0x20,0x10,0x07]
+          vpinsrb  $7, (%rax), %xmm12, %xmm10
+
+// CHECK: vpinsrd  $7, %eax, %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x63,0x19,0x22,0xd0,0x07]
+          vpinsrd  $7, %eax, %xmm12, %xmm10
+
+// CHECK: vpinsrd  $7, (%rax), %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x63,0x19,0x22,0x10,0x07]
+          vpinsrd  $7, (%rax), %xmm12, %xmm10
+
+// CHECK: vpinsrq  $7, %rax, %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x63,0x99,0x22,0xd0,0x07]
+          vpinsrq  $7, %rax, %xmm12, %xmm10
+
+// CHECK: vpinsrq  $7, (%rax), %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x63,0x99,0x22,0x10,0x07]
+          vpinsrq  $7, (%rax), %xmm12, %xmm10
+
+// CHECK: vinsertps  $7, %xmm12, %xmm10, %xmm11
+// CHECK: encoding: [0xc4,0x43,0x29,0x21,0xdc,0x07]
+          vinsertps  $7, %xmm12, %xmm10, %xmm11
+
+// CHECK: vinsertps  $7, (%rax), %xmm10, %xmm11
+// CHECK: encoding: [0xc4,0x63,0x29,0x21,0x18,0x07]
+          vinsertps  $7, (%rax), %xmm10, %xmm11
+
+// CHECK: vptest  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0x17,0xd4]
+          vptest  %xmm12, %xmm10
+
+// CHECK: vptest  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x17,0x20]
+          vptest  (%rax), %xmm12
+
+// CHECK: vmovntdqa  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0x2a,0x20]
+          vmovntdqa  (%rax), %xmm12
+
+// CHECK: vpcmpgtq  %xmm12, %xmm10, %xmm11
+// CHECK: encoding: [0xc4,0x42,0x29,0x37,0xdc]
+          vpcmpgtq  %xmm12, %xmm10, %xmm11
+
+// CHECK: vpcmpgtq  (%rax), %xmm10, %xmm13
+// CHECK: encoding: [0xc4,0x62,0x29,0x37,0x28]
+          vpcmpgtq  (%rax), %xmm10, %xmm13
+
+// CHECK: vpcmpistrm  $7, %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x43,0x79,0x62,0xd4,0x07]
+          vpcmpistrm  $7, %xmm12, %xmm10
+
+// CHECK: vpcmpistrm  $7, (%rax), %xmm10
+// CHECK: encoding: [0xc4,0x63,0x79,0x62,0x10,0x07]
+          vpcmpistrm  $7, (%rax), %xmm10
+
+// CHECK: vpcmpestrm  $7, %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x43,0x79,0x60,0xd4,0x07]
+          vpcmpestrm  $7, %xmm12, %xmm10
+
+// CHECK: vpcmpestrm  $7, (%rax), %xmm10
+// CHECK: encoding: [0xc4,0x63,0x79,0x60,0x10,0x07]
+          vpcmpestrm  $7, (%rax), %xmm10
+
+// CHECK: vpcmpistri  $7, %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x43,0x79,0x63,0xd4,0x07]
+          vpcmpistri  $7, %xmm12, %xmm10
+
+// CHECK: vpcmpistri  $7, (%rax), %xmm10
+// CHECK: encoding: [0xc4,0x63,0x79,0x63,0x10,0x07]
+          vpcmpistri  $7, (%rax), %xmm10
+
+// CHECK: vpcmpestri  $7, %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x43,0x79,0x61,0xd4,0x07]
+          vpcmpestri  $7, %xmm12, %xmm10
+
+// CHECK: vpcmpestri  $7, (%rax), %xmm10
+// CHECK: encoding: [0xc4,0x63,0x79,0x61,0x10,0x07]
+          vpcmpestri  $7, (%rax), %xmm10
+
+// CHECK: vaesimc  %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x42,0x79,0xdb,0xd4]
+          vaesimc  %xmm12, %xmm10
+
+// CHECK: vaesimc  (%rax), %xmm12
+// CHECK: encoding: [0xc4,0x62,0x79,0xdb,0x20]
+          vaesimc  (%rax), %xmm12
+
+// CHECK: vaesenc  %xmm12, %xmm10, %xmm11
+// CHECK: encoding: [0xc4,0x42,0x29,0xdc,0xdc]
+          vaesenc  %xmm12, %xmm10, %xmm11
+
+// CHECK: vaesenc  (%rax), %xmm10, %xmm13
+// CHECK: encoding: [0xc4,0x62,0x29,0xdc,0x28]
+          vaesenc  (%rax), %xmm10, %xmm13
+
+// CHECK: vaesenclast  %xmm12, %xmm10, %xmm11
+// CHECK: encoding: [0xc4,0x42,0x29,0xdd,0xdc]
+          vaesenclast  %xmm12, %xmm10, %xmm11
+
+// CHECK: vaesenclast  (%rax), %xmm10, %xmm13
+// CHECK: encoding: [0xc4,0x62,0x29,0xdd,0x28]
+          vaesenclast  (%rax), %xmm10, %xmm13
+
+// CHECK: vaesdec  %xmm12, %xmm10, %xmm11
+// CHECK: encoding: [0xc4,0x42,0x29,0xde,0xdc]
+          vaesdec  %xmm12, %xmm10, %xmm11
+
+// CHECK: vaesdec  (%rax), %xmm10, %xmm13
+// CHECK: encoding: [0xc4,0x62,0x29,0xde,0x28]
+          vaesdec  (%rax), %xmm10, %xmm13
+
+// CHECK: vaesdeclast  %xmm12, %xmm10, %xmm11
+// CHECK: encoding: [0xc4,0x42,0x29,0xdf,0xdc]
+          vaesdeclast  %xmm12, %xmm10, %xmm11
+
+// CHECK: vaesdeclast  (%rax), %xmm10, %xmm13
+// CHECK: encoding: [0xc4,0x62,0x29,0xdf,0x28]
+          vaesdeclast  (%rax), %xmm10, %xmm13
+
+// CHECK: vaeskeygenassist  $7, %xmm12, %xmm10
+// CHECK: encoding: [0xc4,0x43,0x79,0xdf,0xd4,0x07]
+          vaeskeygenassist  $7, %xmm12, %xmm10
+
+// CHECK: vaeskeygenassist  $7, (%rax), %xmm10
+// CHECK: encoding: [0xc4,0x63,0x79,0xdf,0x10,0x07]
+          vaeskeygenassist  $7, (%rax), %xmm10
+
+// CHECK: vcmpps  $8, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x08]
+          vcmpeq_uqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $9, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x09]
+          vcmpngeps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $10, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x0a]
+          vcmpngtps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $11, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x0b]
+          vcmpfalseps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $12, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x0c]
+          vcmpneq_oqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $13, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x0d]
+          vcmpgeps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $14, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x0e]
+          vcmpgtps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $15, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x0f]
+          vcmptrueps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $16, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x10]
+          vcmpeq_osps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $17, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x11]
+          vcmplt_oqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $18, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x12]
+          vcmple_oqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $19, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x13]
+          vcmpunord_sps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $20, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x14]
+          vcmpneq_usps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $21, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x15]
+          vcmpnlt_uqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $22, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x16]
+          vcmpnle_uqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $23, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x17]
+          vcmpord_sps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $24, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x18]
+          vcmpeq_usps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $25, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x19]
+          vcmpnge_uqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $26, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x1a]
+          vcmpngt_uqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $27, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x1b]
+          vcmpfalse_osps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $28, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x1c]
+          vcmpneq_osps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $29, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x1d]
+          vcmpge_oqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $30, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x1e]
+          vcmpgt_oqps %xmm11, %xmm12, %xmm13
+
+// CHECK: vcmpps  $31, %xmm11, %xmm12, %xmm13
+// CHECK: encoding: [0xc4,0x41,0x18,0xc2,0xeb,0x1f]
+          vcmptrue_usps %xmm11, %xmm12, %xmm13
+
+// CHECK: vmovaps  (%rax), %ymm12
+// CHECK: encoding: [0xc5,0x7c,0x28,0x20]
+          vmovaps  (%rax), %ymm12
+
+// CHECK: vmovaps  %ymm11, %ymm12
+// CHECK: encoding: [0xc4,0x41,0x7c,0x28,0xe3]
+          vmovaps  %ymm11, %ymm12
+
+// CHECK: vmovaps  %ymm11, (%rax)
+// CHECK: encoding: [0xc5,0x7c,0x29,0x18]
+          vmovaps  %ymm11, (%rax)
+
+// CHECK: vmovapd  (%rax), %ymm12
+// CHECK: encoding: [0xc5,0x7d,0x28,0x20]
+          vmovapd  (%rax), %ymm12
+
+// CHECK: vmovapd  %ymm11, %ymm12
+// CHECK: encoding: [0xc4,0x41,0x7d,0x28,0xe3]
+          vmovapd  %ymm11, %ymm12
+
+// CHECK: vmovapd  %ymm11, (%rax)
+// CHECK: encoding: [0xc5,0x7d,0x29,0x18]
+          vmovapd  %ymm11, (%rax)
+
+// CHECK: vmovups  (%rax), %ymm12
+// CHECK: encoding: [0xc5,0x7c,0x10,0x20]
+          vmovups  (%rax), %ymm12
+
+// CHECK: vmovups  %ymm11, %ymm12
+// CHECK: encoding: [0xc4,0x41,0x7c,0x10,0xe3]
+          vmovups  %ymm11, %ymm12
+
+// CHECK: vmovups  %ymm11, (%rax)
+// CHECK: encoding: [0xc5,0x7c,0x11,0x18]
+          vmovups  %ymm11, (%rax)
+
+// CHECK: vmovupd  (%rax), %ymm12
+// CHECK: encoding: [0xc5,0x7d,0x10,0x20]
+          vmovupd  (%rax), %ymm12
+
+// CHECK: vmovupd  %ymm11, %ymm12
+// CHECK: encoding: [0xc4,0x41,0x7d,0x10,0xe3]
+          vmovupd  %ymm11, %ymm12
+
+// CHECK: vmovupd  %ymm11, (%rax)
+// CHECK: encoding: [0xc5,0x7d,0x11,0x18]
+          vmovupd  %ymm11, (%rax)
+
+// CHECK: vunpckhps  %ymm11, %ymm12, %ymm4
+// CHECK: encoding: [0xc4,0xc1,0x1c,0x15,0xe3]
+          vunpckhps  %ymm11, %ymm12, %ymm4
+
+// CHECK: vunpckhpd  %ymm11, %ymm12, %ymm4
+// CHECK: encoding: [0xc4,0xc1,0x1d,0x15,0xe3]
+          vunpckhpd  %ymm11, %ymm12, %ymm4
+
+// CHECK: vunpcklps  %ymm11, %ymm12, %ymm4
+// CHECK: encoding: [0xc4,0xc1,0x1c,0x14,0xe3]
+          vunpcklps  %ymm11, %ymm12, %ymm4
+
+// CHECK: vunpcklpd  %ymm11, %ymm12, %ymm4
+// CHECK: encoding: [0xc4,0xc1,0x1d,0x14,0xe3]
+          vunpcklpd  %ymm11, %ymm12, %ymm4
+
+// CHECK: vunpckhps  -4(%rbx,%rcx,8), %ymm12, %ymm10
+// CHECK: encoding: [0xc5,0x1c,0x15,0x54,0xcb,0xfc]
+          vunpckhps  -4(%rbx,%rcx,8), %ymm12, %ymm10
+
+// CHECK: vunpckhpd  -4(%rbx,%rcx,8), %ymm12, %ymm10
+// CHECK: encoding: [0xc5,0x1d,0x15,0x54,0xcb,0xfc]
+          vunpckhpd  -4(%rbx,%rcx,8), %ymm12, %ymm10
+
+// CHECK: vunpcklps  -4(%rbx,%rcx,8), %ymm12, %ymm10
+// CHECK: encoding: [0xc5,0x1c,0x14,0x54,0xcb,0xfc]
+          vunpcklps  -4(%rbx,%rcx,8), %ymm12, %ymm10
+
+// CHECK: vunpcklpd  -4(%rbx,%rcx,8), %ymm12, %ymm10
+// CHECK: encoding: [0xc5,0x1d,0x14,0x54,0xcb,0xfc]
+          vunpcklpd  -4(%rbx,%rcx,8), %ymm12, %ymm10
+
+// CHECK: vmovntdq  %ymm11, (%rax)
+// CHECK: encoding: [0xc5,0x7d,0xe7,0x18]
+          vmovntdq  %ymm11, (%rax)
+
+// CHECK: vmovntpd  %ymm11, (%rax)
+// CHECK: encoding: [0xc5,0x7d,0x2b,0x18]
+          vmovntpd  %ymm11, (%rax)
+
+// CHECK: vmovntps  %ymm11, (%rax)
+// CHECK: encoding: [0xc5,0x7c,0x2b,0x18]
+          vmovntps  %ymm11, (%rax)

Modified: llvm/branches/wendling/eh/test/Transforms/InstCombine/select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/InstCombine/select.ll?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/InstCombine/select.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/InstCombine/select.ll Sat Jul 10 00:06:30 2010
@@ -438,3 +438,35 @@
 ; CHECK: @test34
 ; CHECK: ret i32 %x
 }
+
+define i32 @test35(i32 %x) {
+  %cmp = icmp sge i32 %x, 0
+  %cond = select i1 %cmp, i32 60, i32 100
+  ret i32 %cond
+; CHECK: @test35
+; CHECK: ashr i32 %x, 31
+; CHECK: and i32 {{.*}}, 40
+; CHECK: add i32 {{.*}}, 60
+; CHECK: ret
+}
+
+define i32 @test36(i32 %x) {
+  %cmp = icmp slt i32 %x, 0
+  %cond = select i1 %cmp, i32 60, i32 100
+  ret i32 %cond
+; CHECK: @test36
+; CHECK: ashr i32 %x, 31
+; CHECK: and i32 {{.*}}, -40
+; CHECK: add i32 {{.*}}, 100
+; CHECK: ret
+}
+
+define i32 @test37(i32 %x) {
+  %cmp = icmp sgt i32 %x, -1
+  %cond = select i1 %cmp, i32 1, i32 -1
+  ret i32 %cond
+; CHECK: @test37
+; CHECK: ashr i32 %x, 31
+; CHECK: or i32 {{.*}}, 1
+; CHECK: ret
+}

Modified: llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp?rev=108038&r1=108037&r2=108038&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp Sat Jul 10 00:06:30 2010
@@ -306,6 +306,7 @@
   REG("RFP64");
   REG("RFP80");
   REG("VR128");
+  REG("VR256");
   REG("RST");
   REG("SEGMENT_REG");
   REG("DEBUG_REG");
@@ -339,6 +340,7 @@
   MEM("opaque80mem");
   MEM("i128mem");
   MEM("f128mem");
+  MEM("f256mem");
   MEM("opaque512mem");
   
   // all R, I, R, I





More information about the llvm-branch-commits mailing list