[llvm-commits] [llvm] r53163 [3/7] - in /llvm/branches/non-call-eh: ./ autoconf/ bindings/ocaml/llvm/ docs/ docs/CommandGuide/ docs/tutorial/ examples/BrainF/ examples/Fibonacci/ examples/HowToUseJIT/ examples/ModuleMaker/ examples/ParallelJIT/ include/llvm-c/ include/llvm/ include/llvm/ADT/ include/llvm/Analysis/ include/llvm/Bitcode/ include/llvm/CodeGen/ include/llvm/Debugger/ include/llvm/ExecutionEngine/ include/llvm/Support/ include/llvm/System/ include/llvm/Target/ include/llvm/Transforms/ include/llvm/Transform...

Nick Lewycky nicholas at mxc.ca
Sun Jul 6 13:45:51 PDT 2008


Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Sun Jul  6 15:45:41 2008
@@ -29,6 +29,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/MathExtras.h"
 #include <algorithm>
+#include <set>
 using namespace llvm;
 
 STATISTIC(NodesCombined   , "Number of dag nodes combined");
@@ -177,6 +178,7 @@
     SDOperand visitSIGN_EXTEND_INREG(SDNode *N);
     SDOperand visitTRUNCATE(SDNode *N);
     SDOperand visitBIT_CONVERT(SDNode *N);
+    SDOperand visitBUILD_PAIR(SDNode *N);
     SDOperand visitFADD(SDNode *N);
     SDOperand visitFSUB(SDNode *N);
     SDOperand visitFMUL(SDNode *N);
@@ -213,11 +215,12 @@
     SDOperand SimplifySelectCC(SDOperand N0, SDOperand N1, SDOperand N2, 
                                SDOperand N3, ISD::CondCode CC, 
                                bool NotExtCompare = false);
-    SDOperand SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
+    SDOperand SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1,
                             ISD::CondCode Cond, bool foldBooleans = true);
     SDOperand SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 
                                          unsigned HiOp);
-    SDOperand ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, MVT::ValueType);
+    SDOperand CombineConsecutiveLoads(SDNode *N, MVT VT);
+    SDOperand ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, MVT);
     SDOperand BuildSDIV(SDNode *N);
     SDOperand BuildUDIV(SDNode *N);
     SDNode *MatchRotate(SDOperand LHS, SDOperand RHS);
@@ -269,7 +272,7 @@
 public:
   explicit WorkListRemover(DAGCombiner &dc) : DC(dc) {}
   
-  virtual void NodeDeleted(SDNode *N) {
+  virtual void NodeDeleted(SDNode *N, SDNode *E) {
     DC.removeFromWorkList(N);
   }
   
@@ -479,7 +482,7 @@
 }
 
 SDOperand DAGCombiner::ReassociateOps(unsigned Opc, SDOperand N0, SDOperand N1){
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
   // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use
   // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2))
   if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) {
@@ -589,6 +592,7 @@
   AfterLegalize = RunningAfterLegalize;
 
   // Add all the dag nodes to the worklist.
+  WorkList.reserve(DAG.allnodes_size());
   for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
        E = DAG.allnodes_end(); I != E; ++I)
     WorkList.push_back(I);
@@ -710,6 +714,7 @@
   case ISD::SIGN_EXTEND_INREG:  return visitSIGN_EXTEND_INREG(N);
   case ISD::TRUNCATE:           return visitTRUNCATE(N);
   case ISD::BIT_CONVERT:        return visitBIT_CONVERT(N);
+  case ISD::BUILD_PAIR:         return visitBUILD_PAIR(N);
   case ISD::FADD:               return visitFADD(N);
   case ISD::FSUB:               return visitFSUB(N);
   case ISD::FMUL:               return visitFMUL(N);
@@ -883,7 +888,7 @@
 
 static
 SDOperand combineShlAddConstant(SDOperand N0, SDOperand N1, SelectionDAG &DAG) {
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
   SDOperand N00 = N0.getOperand(0);
   SDOperand N01 = N0.getOperand(1);
   ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01);
@@ -900,7 +905,7 @@
 static
 SDOperand combineSelectAndUse(SDNode *N, SDOperand Slct, SDOperand OtherOp,
                               SelectionDAG &DAG) {
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   unsigned Opc = N->getOpcode();
   bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
   SDOperand LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
@@ -926,8 +931,8 @@
            cast<ConstantSDNode>(RHS)->isNullValue()) {
     std::swap(LHS, RHS);
     SDOperand Op0 = Slct.getOperand(0);
-    bool isInt = MVT::isInteger(isSlctCC ? Op0.getValueType()
-                                : Op0.getOperand(0).getValueType());
+    bool isInt = (isSlctCC ? Op0.getValueType() :
+                  Op0.getOperand(0).getValueType()).isInteger();
     CC = ISD::getSetCCInverse(CC, isInt);
     DoXform = true;
     InvCC = true;
@@ -952,10 +957,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
 
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -997,14 +1002,14 @@
   if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
     return N1.getOperand(0);
 
-  if (!MVT::isVector(VT) && SimplifyDemandedBits(SDOperand(N, 0)))
+  if (!VT.isVector() && SimplifyDemandedBits(SDOperand(N, 0)))
     return SDOperand(N, 0);
   
   // fold (a+b) -> (a|b) iff a and b share no bits.
-  if (MVT::isInteger(VT) && !MVT::isVector(VT)) {
+  if (VT.isInteger() && !VT.isVector()) {
     APInt LHSZero, LHSOne;
     APInt RHSZero, RHSOne;
-    APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT));
+    APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
     DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
     if (LHSZero.getBoolValue()) {
       DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
@@ -1045,7 +1050,7 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
   
   // If the flag result is dead, turn this into an ADD.
   if (N->hasNUsesOfValue(0, 1))
@@ -1053,10 +1058,8 @@
                      DAG.getNode(ISD::CARRY_FALSE, MVT::Flag));
   
   // canonicalize constant to RHS.
-  if (N0C && !N1C) {
-    SDOperand Ops[] = { N1, N0 };
-    return DAG.getNode(ISD::ADDC, N->getVTList(), Ops, 2);
-  }
+  if (N0C && !N1C)
+    return DAG.getNode(ISD::ADDC, N->getVTList(), N1, N0);
   
   // fold (addc x, 0) -> x + no carry out
   if (N1C && N1C->isNullValue())
@@ -1065,7 +1068,7 @@
   // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
   APInt LHSZero, LHSOne;
   APInt RHSZero, RHSOne;
-  APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT));
+  APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
   DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
   if (LHSZero.getBoolValue()) {
     DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
@@ -1087,19 +1090,15 @@
   SDOperand CarryIn = N->getOperand(2);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  //MVT::ValueType VT = N0.getValueType();
+  //MVT VT = N0.getValueType();
   
   // canonicalize constant to RHS
-  if (N0C && !N1C) {
-    SDOperand Ops[] = { N1, N0, CarryIn };
-    return DAG.getNode(ISD::ADDE, N->getVTList(), Ops, 3);
-  }
+  if (N0C && !N1C)
+    return DAG.getNode(ISD::ADDE, N->getVTList(), N1, N0, CarryIn);
   
   // fold (adde x, y, false) -> (addc x, y)
-  if (CarryIn.getOpcode() == ISD::CARRY_FALSE) {
-    SDOperand Ops[] = { N1, N0 };
-    return DAG.getNode(ISD::ADDC, N->getVTList(), Ops, 2);
-  }
+  if (CarryIn.getOpcode() == ISD::CARRY_FALSE)
+    return DAG.getNode(ISD::ADDC, N->getVTList(), N1, N0);
   
   return SDOperand();
 }
@@ -1111,10 +1110,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
   
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -1154,10 +1153,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
   
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -1238,10 +1237,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -1257,7 +1256,7 @@
     return DAG.getNode(ISD::SUB, VT, DAG.getConstant(0, VT), N0);
   // If we know the sign bits of both operands are zero, strength reduce to a
   // udiv instead.  Handles (X&15) /s 4 -> X&15 >> 2
-  if (!MVT::isVector(VT)) {
+  if (!VT.isVector()) {
     if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
       return DAG.getNode(ISD::UDIV, N1.getValueType(), N0, N1);
   }
@@ -1274,12 +1273,12 @@
     unsigned lg2 = Log2_64(abs2);
     // Splat the sign bit into the register
     SDOperand SGN = DAG.getNode(ISD::SRA, VT, N0,
-                                DAG.getConstant(MVT::getSizeInBits(VT)-1,
+                                DAG.getConstant(VT.getSizeInBits()-1,
                                                 TLI.getShiftAmountTy()));
     AddToWorkList(SGN.Val);
     // Add (N0 < 0) ? abs2 - 1 : 0;
     SDOperand SRL = DAG.getNode(ISD::SRL, VT, SGN,
-                                DAG.getConstant(MVT::getSizeInBits(VT)-lg2,
+                                DAG.getConstant(VT.getSizeInBits()-lg2,
                                                 TLI.getShiftAmountTy()));
     SDOperand ADD = DAG.getNode(ISD::ADD, VT, N0, SRL);
     AddToWorkList(SRL.Val);
@@ -1316,10 +1315,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -1336,7 +1335,7 @@
   if (N1.getOpcode() == ISD::SHL) {
     if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
       if (SHC->getAPIntValue().isPowerOf2()) {
-        MVT::ValueType ADDVT = N1.getOperand(1).getValueType();
+        MVT ADDVT = N1.getOperand(1).getValueType();
         SDOperand Add = DAG.getNode(ISD::ADD, ADDVT, N1.getOperand(1),
                                     DAG.getConstant(SHC->getAPIntValue()
                                                                     .logBase2(),
@@ -1367,14 +1366,14 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (srem c1, c2) -> c1%c2
   if (N0C && N1C && !N1C->isNullValue())
     return DAG.getNode(ISD::SREM, VT, N0, N1);
   // If we know the sign bits of both operands are zero, strength reduce to a
   // urem instead.  Handles (X & 0x0FFFFFFF) %s 16 -> X&15
-  if (!MVT::isVector(VT)) {
+  if (!VT.isVector()) {
     if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
       return DAG.getNode(ISD::UREM, VT, N0, N1);
   }
@@ -1408,7 +1407,7 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (urem c1, c2) -> c1%c2
   if (N0C && N1C && !N1C->isNullValue())
@@ -1423,7 +1422,7 @@
       if (SHC->getAPIntValue().isPowerOf2()) {
         SDOperand Add =
           DAG.getNode(ISD::ADD, VT, N1,
-                 DAG.getConstant(APInt::getAllOnesValue(MVT::getSizeInBits(VT)),
+                 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()),
                                  VT));
         AddToWorkList(Add.Val);
         return DAG.getNode(ISD::AND, VT, N0, Add);
@@ -1458,7 +1457,7 @@
   SDOperand N0 = N->getOperand(0);
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (mulhs x, 0) -> 0
   if (N1C && N1C->isNullValue())
@@ -1466,7 +1465,7 @@
   // fold (mulhs x, 1) -> (sra x, size(x)-1)
   if (N1C && N1C->getAPIntValue() == 1)
     return DAG.getNode(ISD::SRA, N0.getValueType(), N0, 
-                       DAG.getConstant(MVT::getSizeInBits(N0.getValueType())-1,
+                       DAG.getConstant(N0.getValueType().getSizeInBits()-1,
                                        TLI.getShiftAmountTy()));
   // fold (mulhs x, undef) -> 0
   if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
@@ -1479,7 +1478,7 @@
   SDOperand N0 = N->getOperand(0);
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (mulhu x, 0) -> 0
   if (N1C && N1C->isNullValue())
@@ -1531,7 +1530,8 @@
     AddToWorkList(Lo.Val);
     SDOperand LoOpt = combine(Lo.Val);
     if (LoOpt.Val && LoOpt.Val != Lo.Val &&
-        TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType()))
+        (!AfterLegalize ||
+         TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType())))
       return CombineTo(N, LoOpt, LoOpt);
   }
 
@@ -1541,7 +1541,8 @@
     AddToWorkList(Hi.Val);
     SDOperand HiOpt = combine(Hi.Val);
     if (HiOpt.Val && HiOpt != Hi &&
-        TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType()))
+        (!AfterLegalize ||
+         TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType())))
       return CombineTo(N, HiOpt, HiOpt);
   }
   return SDOperand();
@@ -1579,7 +1580,7 @@
 /// two operands of the same opcode, try to simplify it.
 SDOperand DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
   SDOperand N0 = N->getOperand(0), N1 = N->getOperand(1);
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
   assert(N0.getOpcode() == N1.getOpcode() && "Bad input!");
   
   // For each of OP in AND/OR/XOR:
@@ -1620,11 +1621,11 @@
   SDOperand LL, LR, RL, RR, CC0, CC1;
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N1.getValueType();
-  unsigned BitWidth = MVT::getSizeInBits(VT);
+  MVT VT = N1.getValueType();
+  unsigned BitWidth = VT.getSizeInBits();
   
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -1679,7 +1680,7 @@
     ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
     
     if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
-        MVT::isInteger(LL.getValueType())) {
+        LL.getValueType().isInteger()) {
       // fold (X == 0) & (Y == 0) -> (X|Y == 0)
       if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) {
         SDOperand ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL);
@@ -1705,7 +1706,7 @@
       std::swap(RL, RR);
     }
     if (LL == RL && LR == RR) {
-      bool isInteger = MVT::isInteger(LL.getValueType());
+      bool isInteger = LL.getValueType().isInteger();
       ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger);
       if (Result != ISD::SETCC_INVALID)
         return DAG.getSetCC(N0.getValueType(), LL, LR, Result);
@@ -1720,19 +1721,20 @@
   
   // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
   // fold (and (sra)) -> (and (srl)) when possible.
-  if (!MVT::isVector(VT) &&
+  if (!VT.isVector() &&
       SimplifyDemandedBits(SDOperand(N, 0)))
     return SDOperand(N, 0);
   // fold (zext_inreg (extload x)) -> (zextload x)
   if (ISD::isEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val)) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
-    MVT::ValueType EVT = LN0->getMemoryVT();
+    MVT EVT = LN0->getMemoryVT();
     // If we zero all the possible extended bits, then we can turn this into
     // a zextload if we are running before legalize or the operation is legal.
     unsigned BitWidth = N1.getValueSizeInBits();
     if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
-                                     BitWidth - MVT::getSizeInBits(EVT))) &&
-        (!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
+                                     BitWidth - EVT.getSizeInBits())) &&
+        ((!AfterLegalize && !LN0->isVolatile()) ||
+         TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
       SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
                                          LN0->getBasePtr(), LN0->getSrcValue(),
                                          LN0->getSrcValueOffset(), EVT,
@@ -1747,13 +1749,14 @@
   if (ISD::isSEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) &&
       N0.hasOneUse()) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
-    MVT::ValueType EVT = LN0->getMemoryVT();
+    MVT EVT = LN0->getMemoryVT();
     // If we zero all the possible extended bits, then we can turn this into
     // a zextload if we are running before legalize or the operation is legal.
     unsigned BitWidth = N1.getValueSizeInBits();
     if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
-                                     BitWidth - MVT::getSizeInBits(EVT))) &&
-        (!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
+                                     BitWidth - EVT.getSizeInBits())) &&
+        ((!AfterLegalize && !LN0->isVolatile()) ||
+         TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
       SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
                                          LN0->getBasePtr(), LN0->getSrcValue(),
                                          LN0->getSrcValueOffset(), EVT,
@@ -1770,26 +1773,25 @@
   if (N1C && N0.getOpcode() == ISD::LOAD) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     if (LN0->getExtensionType() != ISD::SEXTLOAD &&
-        LN0->isUnindexed() && N0.hasOneUse()) {
-      MVT::ValueType EVT, LoadedVT;
-      if (N1C->getAPIntValue() == 255)
-        EVT = MVT::i8;
-      else if (N1C->getAPIntValue() == 65535)
-        EVT = MVT::i16;
-      else if (N1C->getAPIntValue() == ~0U)
-        EVT = MVT::i32;
-      else
-        EVT = MVT::Other;
-    
-      LoadedVT = LN0->getMemoryVT();
-      if (EVT != MVT::Other && LoadedVT > EVT &&
+        LN0->isUnindexed() && N0.hasOneUse() &&
+        // Do not change the width of a volatile load.
+        !LN0->isVolatile()) {
+      MVT EVT = MVT::Other;
+      uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
+      if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue()))
+        EVT = MVT::getIntegerVT(ActiveBits);
+
+      MVT LoadedVT = LN0->getMemoryVT();
+      // Do not generate loads of non-round integer types since these can
+      // be expensive (and would be wrong if the type is not byte sized).
+      if (EVT != MVT::Other && LoadedVT.bitsGT(EVT) && EVT.isRound() &&
           (!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
-        MVT::ValueType PtrType = N0.getOperand(1).getValueType();
+        MVT PtrType = N0.getOperand(1).getValueType();
         // For big endian targets, we need to add an offset to the pointer to
         // load the correct bytes.  For little endian systems, we merely need to
         // read fewer bytes from the same pointer.
-        unsigned LVTStoreBytes = MVT::getStoreSizeInBits(LoadedVT)/8;
-        unsigned EVTStoreBytes = MVT::getStoreSizeInBits(EVT)/8;
+        unsigned LVTStoreBytes = LoadedVT.getStoreSizeInBits()/8;
+        unsigned EVTStoreBytes = EVT.getStoreSizeInBits()/8;
         unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
         unsigned Alignment = LN0->getAlignment();
         SDOperand NewPtr = LN0->getBasePtr();
@@ -1819,10 +1821,10 @@
   SDOperand LL, LR, RL, RR, CC0, CC1;
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N1.getValueType();
+  MVT VT = N1.getValueType();
   
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -1864,7 +1866,7 @@
     ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
     
     if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
-        MVT::isInteger(LL.getValueType())) {
+        LL.getValueType().isInteger()) {
       // fold (X != 0) | (Y != 0) -> (X|Y != 0)
       // fold (X <  0) | (Y <  0) -> (X|Y < 0)
       if (cast<ConstantSDNode>(LR)->isNullValue() && 
@@ -1888,7 +1890,7 @@
       std::swap(RL, RR);
     }
     if (LL == RL && LR == RR) {
-      bool isInteger = MVT::isInteger(LL.getValueType());
+      bool isInteger = LL.getValueType().isInteger();
       ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger);
       if (Result != ISD::SETCC_INVALID)
         return DAG.getSetCC(N0.getValueType(), LL, LR, Result);
@@ -1954,15 +1956,15 @@
 // idioms for rotate, and if the target supports rotation instructions, generate
 // a rot[lr].
 SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) {
-  // Must be a legal type.  Expanded an promoted things won't work with rotates.
-  MVT::ValueType VT = LHS.getValueType();
+  // Must be a legal type.  Expanded 'n promoted things won't work with rotates.
+  MVT VT = LHS.getValueType();
   if (!TLI.isTypeLegal(VT)) return 0;
 
   // The target must have at least one rotate flavor.
   bool HasROTL = TLI.isOperationLegal(ISD::ROTL, VT);
   bool HasROTR = TLI.isOperationLegal(ISD::ROTR, VT);
   if (!HasROTL && !HasROTR) return 0;
-  
+
   // Match "(X shl/srl V1) & V2" where V2 may not be present.
   SDOperand LHSShift;   // The shift.
   SDOperand LHSMask;    // AND value if any.
@@ -1987,7 +1989,7 @@
     std::swap(LHSMask , RHSMask );
   }
 
-  unsigned OpSizeInBits = MVT::getSizeInBits(VT);
+  unsigned OpSizeInBits = VT.getSizeInBits();
   SDOperand LHSShiftArg = LHSShift.getOperand(0);
   SDOperand LHSShiftAmt = LHSShift.getOperand(1);
   SDOperand RHSShiftAmt = RHSShift.getOperand(1);
@@ -2111,10 +2113,10 @@
   SDOperand LHS, RHS, CC;
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
   
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -2142,7 +2144,7 @@
     return RXOR;
   // fold !(x cc y) -> (x !cc y)
   if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) {
-    bool isInt = MVT::isInteger(LHS.getValueType());
+    bool isInt = LHS.getValueType().isInteger();
     ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
                                                isInt);
     if (N0.getOpcode() == ISD::SETCC)
@@ -2201,12 +2203,12 @@
   }
   // fold (xor x, x) -> 0
   if (N0 == N1) {
-    if (!MVT::isVector(VT)) {
+    if (!VT.isVector()) {
       return DAG.getConstant(0, VT);
     } else if (!AfterLegalize || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) {
       // Produce a vector of zeros.
-      SDOperand El = DAG.getConstant(0, MVT::getVectorElementType(VT));
-      std::vector<SDOperand> Ops(MVT::getVectorNumElements(VT), El);
+      SDOperand El = DAG.getConstant(0, VT.getVectorElementType());
+      std::vector<SDOperand> Ops(VT.getVectorNumElements(), El);
       return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
     }
   }
@@ -2218,7 +2220,7 @@
   }
   
   // Simplify the expression using non-local knowledge.
-  if (!MVT::isVector(VT) &&
+  if (!VT.isVector() &&
       SimplifyDemandedBits(SDOperand(N, 0)))
     return SDOperand(N, 0);
   
@@ -2270,7 +2272,7 @@
       !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1)))
     return SDOperand();
   
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // If this is a signed shift right, and the high bit is modified
   // by the logical operation, do not perform the transformation.
@@ -2301,8 +2303,8 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N0.getValueType();
-  unsigned OpSizeInBits = MVT::getSizeInBits(VT);
+  MVT VT = N0.getValueType();
+  unsigned OpSizeInBits = VT.getSizeInBits();
   
   // fold (shl c1, c2) -> c1<<c2
   if (N0C && N1C)
@@ -2318,7 +2320,7 @@
     return N0;
   // if (shl x, c) is known to be zero, return 0
   if (DAG.MaskedValueIsZero(SDOperand(N, 0),
-                            APInt::getAllOnesValue(MVT::getSizeInBits(VT))))
+                            APInt::getAllOnesValue(VT.getSizeInBits())))
     return DAG.getConstant(0, VT);
   if (N1C && SimplifyDemandedBits(SDOperand(N, 0)))
     return SDOperand(N, 0);
@@ -2360,7 +2362,7 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N0.getValueType();
+  MVT VT = N0.getValueType();
   
   // fold (sra c1, c2) -> c1>>c2
   if (N0C && N1C)
@@ -2372,7 +2374,7 @@
   if (N0C && N0C->isAllOnesValue())
     return N0;
   // fold (sra x, c >= size(x)) -> undef
-  if (N1C && N1C->getValue() >= MVT::getSizeInBits(VT))
+  if (N1C && N1C->getValue() >= VT.getSizeInBits())
     return DAG.getNode(ISD::UNDEF, VT);
   // fold (sra x, 0) -> x
   if (N1C && N1C->isNullValue())
@@ -2380,25 +2382,19 @@
   // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
   // sext_inreg.
   if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) {
-    unsigned LowBits = MVT::getSizeInBits(VT) - (unsigned)N1C->getValue();
-    MVT::ValueType EVT;
-    switch (LowBits) {
-    default: EVT = MVT::Other; break;
-    case  1: EVT = MVT::i1;    break;
-    case  8: EVT = MVT::i8;    break;
-    case 16: EVT = MVT::i16;   break;
-    case 32: EVT = MVT::i32;   break;
-    }
-    if (EVT > MVT::Other && TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, EVT))
+    unsigned LowBits = VT.getSizeInBits() - (unsigned)N1C->getValue();
+    MVT EVT = MVT::getIntegerVT(LowBits);
+    if (EVT.isSimple() && // TODO: remove when apint codegen support lands.
+        (!AfterLegalize || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, EVT)))
       return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0.getOperand(0),
                          DAG.getValueType(EVT));
   }
-  
+
   // fold (sra (sra x, c1), c2) -> (sra x, c1+c2)
   if (N1C && N0.getOpcode() == ISD::SRA) {
     if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) {
       unsigned Sum = N1C->getValue() + C1->getValue();
-      if (Sum >= MVT::getSizeInBits(VT)) Sum = MVT::getSizeInBits(VT)-1;
+      if (Sum >= VT.getSizeInBits()) Sum = VT.getSizeInBits()-1;
       return DAG.getNode(ISD::SRA, VT, N0.getOperand(0),
                          DAG.getConstant(Sum, N1C->getValueType(0)));
     }
@@ -2414,17 +2410,17 @@
     const ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
     if (N01C && N1C) {
       // Determine what the truncate's result bitsize and type would be.
-      unsigned VTValSize = MVT::getSizeInBits(VT);
-      MVT::ValueType TruncVT = MVT::getIntegerType(VTValSize - N1C->getValue());
+      unsigned VTValSize = VT.getSizeInBits();
+      MVT TruncVT =
+        MVT::getIntegerVT(VTValSize - N1C->getValue());
       // Determine the residual right-shift amount.
       unsigned ShiftAmt = N1C->getValue() - N01C->getValue();
-      
+
       // If the shift is not a no-op (in which case this should be just a sign 
       // extend already), the truncated to type is legal, sign_extend is legal 
       // on that type, and the the truncate to that type is both legal and free, 
       // perform the transform.
       if (ShiftAmt && 
-          TLI.isTypeLegal(TruncVT) && 
           TLI.isOperationLegal(ISD::SIGN_EXTEND, TruncVT) &&
           TLI.isOperationLegal(ISD::TRUNCATE, VT) &&
           TLI.isTruncateFree(VT, TruncVT)) {
@@ -2454,8 +2450,8 @@
   SDOperand N1 = N->getOperand(1);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
-  MVT::ValueType VT = N0.getValueType();
-  unsigned OpSizeInBits = MVT::getSizeInBits(VT);
+  MVT VT = N0.getValueType();
+  unsigned OpSizeInBits = VT.getSizeInBits();
   
   // fold (srl c1, c2) -> c1 >>u c2
   if (N0C && N1C)
@@ -2488,8 +2484,8 @@
   // fold (srl (anyextend x), c) -> (anyextend (srl x, c))
   if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
     // Shifting in all undef bits?
-    MVT::ValueType SmallVT = N0.getOperand(0).getValueType();
-    if (N1C->getValue() >= MVT::getSizeInBits(SmallVT))
+    MVT SmallVT = N0.getOperand(0).getValueType();
+    if (N1C->getValue() >= SmallVT.getSizeInBits())
       return DAG.getNode(ISD::UNDEF, VT);
 
     SDOperand SmallShift = DAG.getNode(ISD::SRL, SmallVT, N0.getOperand(0), N1);
@@ -2499,16 +2495,16 @@
   
   // fold (srl (sra X, Y), 31) -> (srl X, 31).  This srl only looks at the sign
   // bit, which is unmodified by sra.
-  if (N1C && N1C->getValue()+1 == MVT::getSizeInBits(VT)) {
+  if (N1C && N1C->getValue()+1 == VT.getSizeInBits()) {
     if (N0.getOpcode() == ISD::SRA)
       return DAG.getNode(ISD::SRL, VT, N0.getOperand(0), N1);
   }
   
   // fold (srl (ctlz x), "5") -> x  iff x has one bit set (the low bit).
   if (N1C && N0.getOpcode() == ISD::CTLZ && 
-      N1C->getAPIntValue() == Log2_32(MVT::getSizeInBits(VT))) {
+      N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) {
     APInt KnownZero, KnownOne;
-    APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT));
+    APInt Mask = APInt::getAllOnesValue(VT.getSizeInBits());
     DAG.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne);
     
     // If any of the input bits are KnownOne, then the input couldn't be all
@@ -2547,7 +2543,7 @@
 
 SDOperand DAGCombiner::visitCTLZ(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // fold (ctlz c1) -> c2
   if (isa<ConstantSDNode>(N0))
@@ -2557,7 +2553,7 @@
 
 SDOperand DAGCombiner::visitCTTZ(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (cttz c1) -> c2
   if (isa<ConstantSDNode>(N0))
@@ -2567,7 +2563,7 @@
 
 SDOperand DAGCombiner::visitCTPOP(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (ctpop c1) -> c2
   if (isa<ConstantSDNode>(N0))
@@ -2582,8 +2578,8 @@
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
   ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType VT0 = N0.getValueType();
+  MVT VT = N->getValueType(0);
+  MVT VT0 = N0.getValueType();
 
   // fold select C, X, X -> X
   if (N1 == N2)
@@ -2595,16 +2591,16 @@
   if (N0C && N0C->isNullValue())
     return N2;
   // fold select C, 1, X -> C | X
-  if (MVT::i1 == VT && N1C && N1C->getAPIntValue() == 1)
+  if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1)
     return DAG.getNode(ISD::OR, VT, N0, N2);
   // fold select C, 0, 1 -> ~C
-  if (MVT::isInteger(VT) && MVT::isInteger(VT0) &&
+  if (VT.isInteger() && VT0.isInteger() &&
       N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
     SDOperand XORNode = DAG.getNode(ISD::XOR, VT0, N0, DAG.getConstant(1, VT0));
     if (VT == VT0)
       return XORNode;
     AddToWorkList(XORNode.Val);
-    if (MVT::getSizeInBits(VT) > MVT::getSizeInBits(VT0))
+    if (VT.bitsGT(VT0))
       return DAG.getNode(ISD::ZERO_EXTEND, VT, XORNode);
     return DAG.getNode(ISD::TRUNCATE, VT, XORNode);
   }
@@ -2622,19 +2618,19 @@
   }
   // fold select C, X, 0 -> C & X
   // FIXME: this should check for C type == X type, not i1?
-  if (MVT::i1 == VT && N2C && N2C->isNullValue())
+  if (VT == MVT::i1 && N2C && N2C->isNullValue())
     return DAG.getNode(ISD::AND, VT, N0, N1);
   // fold  X ? X : Y --> X ? 1 : Y --> X | Y
-  if (MVT::i1 == VT && N0 == N1)
+  if (VT == MVT::i1 && N0 == N1)
     return DAG.getNode(ISD::OR, VT, N0, N2);
   // fold X ? Y : X --> X ? Y : 0 --> X & Y
-  if (MVT::i1 == VT && N0 == N2)
+  if (VT == MVT::i1 && N0 == N2)
     return DAG.getNode(ISD::AND, VT, N0, N1);
   
   // If we can fold this based on the true/false value, do so.
   if (SimplifySelectOps(N, N1, N2))
     return SDOperand(N, 0);  // Don't revisit N.
-  
+
   // fold selects based on a setcc into other things, such as min/max/abs
   if (N0.getOpcode() == ISD::SETCC) {
     // FIXME:
@@ -2762,7 +2758,7 @@
 
 SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // fold (sext c1) -> c1
   if (isa<ConstantSDNode>(N0))
@@ -2773,24 +2769,22 @@
   if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
     return DAG.getNode(ISD::SIGN_EXTEND, VT, N0.getOperand(0));
   
-  // fold (sext (truncate (load x))) -> (sext (smaller load x))
-  // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
   if (N0.getOpcode() == ISD::TRUNCATE) {
+    // fold (sext (truncate (load x))) -> (sext (smaller load x))
+    // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
     SDOperand NarrowLoad = ReduceLoadWidth(N0.Val);
     if (NarrowLoad.Val) {
       if (NarrowLoad.Val != N0.Val)
         CombineTo(N0.Val, NarrowLoad);
       return DAG.getNode(ISD::SIGN_EXTEND, VT, NarrowLoad);
     }
-  }
 
-  // See if the value being truncated is already sign extended.  If so, just
-  // eliminate the trunc/sext pair.
-  if (N0.getOpcode() == ISD::TRUNCATE) {
+    // See if the value being truncated is already sign extended.  If so, just
+    // eliminate the trunc/sext pair.
     SDOperand Op = N0.getOperand(0);
-    unsigned OpBits   = MVT::getSizeInBits(Op.getValueType());
-    unsigned MidBits  = MVT::getSizeInBits(N0.getValueType());
-    unsigned DestBits = MVT::getSizeInBits(VT);
+    unsigned OpBits   = Op.getValueType().getSizeInBits();
+    unsigned MidBits  = N0.getValueType().getSizeInBits();
+    unsigned DestBits = VT.getSizeInBits();
     unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
     
     if (OpBits == DestBits) {
@@ -2813,9 +2807,9 @@
     // fold (sext (truncate x)) -> (sextinreg x).
     if (!AfterLegalize || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG,
                                                N0.getValueType())) {
-      if (Op.getValueType() < VT)
+      if (Op.getValueType().bitsLT(VT))
         Op = DAG.getNode(ISD::ANY_EXTEND, VT, Op);
-      else if (Op.getValueType() > VT)
+      else if (Op.getValueType().bitsGT(VT))
         Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
       return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, Op,
                          DAG.getValueType(N0.getValueType()));
@@ -2824,7 +2818,8 @@
   
   // fold (sext (load x)) -> (sext (truncate (sextload x)))
   if (ISD::isNON_EXTLoad(N0.Val) &&
-      (!AfterLegalize||TLI.isLoadXLegal(ISD::SEXTLOAD, N0.getValueType()))){
+      ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
+       TLI.isLoadXLegal(ISD::SEXTLOAD, N0.getValueType()))) {
     bool DoXform = true;
     SmallVector<SDNode*, 4> SetCCs;
     if (!N0.hasOneUse())
@@ -2864,8 +2859,9 @@
   if ((ISD::isSEXTLoad(N0.Val) || ISD::isEXTLoad(N0.Val)) &&
       ISD::isUNINDEXEDLoad(N0.Val) && N0.hasOneUse()) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
-    MVT::ValueType EVT = LN0->getMemoryVT();
-    if (!AfterLegalize || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT)) {
+    MVT EVT = LN0->getMemoryVT();
+    if ((!AfterLegalize && !LN0->isVolatile()) ||
+        TLI.isLoadXLegal(ISD::SEXTLOAD, EVT)) {
       SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
                                          LN0->getBasePtr(), LN0->getSrcValue(),
                                          LN0->getSrcValueOffset(), EVT,
@@ -2887,12 +2883,17 @@
     if (SCC.Val) return SCC;
   }
   
+  // fold (sext x) -> (zext x) if the sign bit is known zero.
+  if ((!AfterLegalize || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
+      DAG.SignBitIsZero(N0))
+    return DAG.getNode(ISD::ZERO_EXTEND, VT, N0);
+  
   return SDOperand();
 }
 
 SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // fold (zext c1) -> c1
   if (isa<ConstantSDNode>(N0))
@@ -2917,9 +2918,9 @@
   if (N0.getOpcode() == ISD::TRUNCATE &&
       (!AfterLegalize || TLI.isOperationLegal(ISD::AND, VT))) {
     SDOperand Op = N0.getOperand(0);
-    if (Op.getValueType() < VT) {
+    if (Op.getValueType().bitsLT(VT)) {
       Op = DAG.getNode(ISD::ANY_EXTEND, VT, Op);
-    } else if (Op.getValueType() > VT) {
+    } else if (Op.getValueType().bitsGT(VT)) {
       Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
     }
     return DAG.getZeroExtendInReg(Op, N0.getValueType());
@@ -2930,19 +2931,20 @@
       N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
       N0.getOperand(1).getOpcode() == ISD::Constant) {
     SDOperand X = N0.getOperand(0).getOperand(0);
-    if (X.getValueType() < VT) {
+    if (X.getValueType().bitsLT(VT)) {
       X = DAG.getNode(ISD::ANY_EXTEND, VT, X);
-    } else if (X.getValueType() > VT) {
+    } else if (X.getValueType().bitsGT(VT)) {
       X = DAG.getNode(ISD::TRUNCATE, VT, X);
     }
     APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
-    Mask.zext(MVT::getSizeInBits(VT));
+    Mask.zext(VT.getSizeInBits());
     return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(Mask, VT));
   }
   
   // fold (zext (load x)) -> (zext (truncate (zextload x)))
   if (ISD::isNON_EXTLoad(N0.Val) &&
-      (!AfterLegalize||TLI.isLoadXLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
+      ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
+       TLI.isLoadXLegal(ISD::ZEXTLOAD, N0.getValueType()))) {
     bool DoXform = true;
     SmallVector<SDNode*, 4> SetCCs;
     if (!N0.hasOneUse())
@@ -2982,16 +2984,19 @@
   if ((ISD::isZEXTLoad(N0.Val) || ISD::isEXTLoad(N0.Val)) &&
       ISD::isUNINDEXEDLoad(N0.Val) && N0.hasOneUse()) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
-    MVT::ValueType EVT = LN0->getMemoryVT();
-    SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
-                                       LN0->getBasePtr(), LN0->getSrcValue(),
-                                       LN0->getSrcValueOffset(), EVT,
-                                       LN0->isVolatile(), 
-                                       LN0->getAlignment());
-    CombineTo(N, ExtLoad);
-    CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad),
-              ExtLoad.getValue(1));
-    return SDOperand(N, 0);   // Return N so it doesn't get rechecked!
+    MVT EVT = LN0->getMemoryVT();
+    if ((!AfterLegalize && !LN0->isVolatile()) ||
+        TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT)) {
+      SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
+                                         LN0->getBasePtr(), LN0->getSrcValue(),
+                                         LN0->getSrcValueOffset(), EVT,
+                                         LN0->isVolatile(),
+                                         LN0->getAlignment());
+      CombineTo(N, ExtLoad);
+      CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad),
+                ExtLoad.getValue(1));
+      return SDOperand(N, 0);   // Return N so it doesn't get rechecked!
+    }
   }
   
   // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
@@ -3008,7 +3013,7 @@
 
 SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (aext c1) -> c1
   if (isa<ConstantSDNode>(N0))
@@ -3037,7 +3042,7 @@
     SDOperand TruncOp = N0.getOperand(0);
     if (TruncOp.getValueType() == VT)
       return TruncOp; // x iff x size == zext size.
-    if (TruncOp.getValueType() > VT)
+    if (TruncOp.getValueType().bitsGT(VT))
       return DAG.getNode(ISD::TRUNCATE, VT, TruncOp);
     return DAG.getNode(ISD::ANY_EXTEND, VT, TruncOp);
   }
@@ -3047,19 +3052,20 @@
       N0.getOperand(0).getOpcode() == ISD::TRUNCATE &&
       N0.getOperand(1).getOpcode() == ISD::Constant) {
     SDOperand X = N0.getOperand(0).getOperand(0);
-    if (X.getValueType() < VT) {
+    if (X.getValueType().bitsLT(VT)) {
       X = DAG.getNode(ISD::ANY_EXTEND, VT, X);
-    } else if (X.getValueType() > VT) {
+    } else if (X.getValueType().bitsGT(VT)) {
       X = DAG.getNode(ISD::TRUNCATE, VT, X);
     }
     APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
-    Mask.zext(MVT::getSizeInBits(VT));
+    Mask.zext(VT.getSizeInBits());
     return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(Mask, VT));
   }
   
   // fold (aext (load x)) -> (aext (truncate (extload x)))
   if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
-      (!AfterLegalize||TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
+      ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
+       TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     SDOperand ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(),
                                        LN0->getBasePtr(), LN0->getSrcValue(),
@@ -3080,7 +3086,7 @@
       !ISD::isNON_EXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) &&
       N0.hasOneUse()) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
-    MVT::ValueType EVT = LN0->getMemoryVT();
+    MVT EVT = LN0->getMemoryVT();
     SDOperand ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT,
                                        LN0->getChain(), LN0->getBasePtr(),
                                        LN0->getSrcValue(),
@@ -3147,8 +3153,8 @@
   unsigned Opc = N->getOpcode();
   ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType EVT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
+  MVT EVT = N->getValueType(0);
 
   // Special case: SIGN_EXTEND_INREG is basically truncating to EVT then
   // extended to VT.
@@ -3159,7 +3165,7 @@
       return SDOperand();
   }
 
-  unsigned EVTBits = MVT::getSizeInBits(EVT);
+  unsigned EVTBits = EVT.getSizeInBits();
   unsigned ShAmt = 0;
   bool CombineSRL =  false;
   if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) {
@@ -3168,28 +3174,27 @@
       // Is the shift amount a multiple of size of VT?
       if ((ShAmt & (EVTBits-1)) == 0) {
         N0 = N0.getOperand(0);
-        if (MVT::getSizeInBits(N0.getValueType()) <= EVTBits)
+        if (N0.getValueType().getSizeInBits() <= EVTBits)
           return SDOperand();
         CombineSRL = true;
       }
     }
   }
 
-  if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
-      // Do not allow folding to i1 here.  i1 is implicitly stored in memory in
-      // zero extended form: by shrinking the load, we lose track of the fact
-      // that it is already zero extended.
-      // FIXME: This should be reevaluated.
-      VT != MVT::i1) {
-    assert(MVT::getSizeInBits(N0.getValueType()) > EVTBits &&
+  // Do not generate loads of non-round integer types since these can
+  // be expensive (and would be wrong if the type is not byte sized).
+  if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() && VT.isRound() &&
+      // Do not change the width of a volatile load.
+      !cast<LoadSDNode>(N0)->isVolatile()) {
+    assert(N0.getValueType().getSizeInBits() > EVTBits &&
            "Cannot truncate to larger type!");
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
-    MVT::ValueType PtrType = N0.getOperand(1).getValueType();
+    MVT PtrType = N0.getOperand(1).getValueType();
     // For big endian targets, we need to adjust the offset to the pointer to
     // load the correct bytes.
     if (TLI.isBigEndian()) {
-      unsigned LVTStoreBits = MVT::getStoreSizeInBits(N0.getValueType());
-      unsigned EVTStoreBits = MVT::getStoreSizeInBits(EVT);
+      unsigned LVTStoreBits = N0.getValueType().getStoreSizeInBits();
+      unsigned EVTStoreBits = EVT.getStoreSizeInBits();
       ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
     }
     uint64_t PtrOff =  ShAmt / 8;
@@ -3228,22 +3233,22 @@
 SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
   SDOperand N1 = N->getOperand(1);
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType EVT = cast<VTSDNode>(N1)->getVT();
-  unsigned VTBits = MVT::getSizeInBits(VT);
-  unsigned EVTBits = MVT::getSizeInBits(EVT);
+  MVT VT = N->getValueType(0);
+  MVT EVT = cast<VTSDNode>(N1)->getVT();
+  unsigned VTBits = VT.getSizeInBits();
+  unsigned EVTBits = EVT.getSizeInBits();
   
   // fold (sext_in_reg c1) -> c1
   if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF)
     return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0, N1);
   
   // If the input is already sign extended, just drop the extension.
-  if (DAG.ComputeNumSignBits(N0) >= MVT::getSizeInBits(VT)-EVTBits+1)
+  if (DAG.ComputeNumSignBits(N0) >= VT.getSizeInBits()-EVTBits+1)
     return N0;
   
   // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2
   if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
-      EVT < cast<VTSDNode>(N0.getOperand(1))->getVT()) {
+      EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) {
     return DAG.getNode(ISD::SIGN_EXTEND_INREG, VT, N0.getOperand(0), N1);
   }
 
@@ -3267,11 +3272,11 @@
   // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above.
   if (N0.getOpcode() == ISD::SRL) {
     if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)))
-      if (ShAmt->getValue()+EVTBits <= MVT::getSizeInBits(VT)) {
+      if (ShAmt->getValue()+EVTBits <= VT.getSizeInBits()) {
         // We can turn this into an SRA iff the input to the SRL is already sign
         // extended enough.
         unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0));
-        if (MVT::getSizeInBits(VT)-(ShAmt->getValue()+EVTBits) < InSignBits)
+        if (VT.getSizeInBits()-(ShAmt->getValue()+EVTBits) < InSignBits)
           return DAG.getNode(ISD::SRA, VT, N0.getOperand(0), N0.getOperand(1));
       }
   }
@@ -3280,7 +3285,8 @@
   if (ISD::isEXTLoad(N0.Val) && 
       ISD::isUNINDEXEDLoad(N0.Val) &&
       EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
-      (!AfterLegalize || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
+      ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
+       TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
                                        LN0->getBasePtr(), LN0->getSrcValue(),
@@ -3295,7 +3301,8 @@
   if (ISD::isZEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) &&
       N0.hasOneUse() &&
       EVT == cast<LoadSDNode>(N0)->getMemoryVT() &&
-      (!AfterLegalize || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
+      ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
+       TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(),
                                        LN0->getBasePtr(), LN0->getSrcValue(),
@@ -3311,7 +3318,7 @@
 
 SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // noop truncate
   if (N0.getValueType() == N->getValueType(0))
@@ -3325,10 +3332,10 @@
   // fold (truncate (ext x)) -> (ext x) or (truncate x) or x
   if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::SIGN_EXTEND||
       N0.getOpcode() == ISD::ANY_EXTEND) {
-    if (N0.getOperand(0).getValueType() < VT)
+    if (N0.getOperand(0).getValueType().bitsLT(VT))
       // if the source is smaller than the dest, we still need an extend
       return DAG.getNode(N0.getOpcode(), VT, N0.getOperand(0));
-    else if (N0.getOperand(0).getValueType() > VT)
+    else if (N0.getOperand(0).getValueType().bitsGT(VT))
       // if the source is larger than the dest, than we just need the truncate
       return DAG.getNode(ISD::TRUNCATE, VT, N0.getOperand(0));
     else
@@ -3342,7 +3349,7 @@
   // -> trunc y
   SDOperand Shorter =
     GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
-                                             MVT::getSizeInBits(VT)));
+                                             VT.getSizeInBits()));
   if (Shorter.Val)
     return DAG.getNode(ISD::TRUNCATE, VT, Shorter);
 
@@ -3351,9 +3358,47 @@
   return ReduceLoadWidth(N);
 }
 
+static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
+  SDOperand Elt = N->getOperand(i);
+  if (Elt.getOpcode() != ISD::MERGE_VALUES)
+    return Elt.Val;
+  return Elt.getOperand(Elt.ResNo).Val;
+}
+
+/// CombineConsecutiveLoads - build_pair (load, load) -> load
+/// if load locations are consecutive. 
+SDOperand DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) {
+  assert(N->getOpcode() == ISD::BUILD_PAIR);
+
+  SDNode *LD1 = getBuildPairElt(N, 0);
+  if (!ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse())
+    return SDOperand();
+  MVT LD1VT = LD1->getValueType(0);
+  SDNode *LD2 = getBuildPairElt(N, 1);
+  const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+  if (ISD::isNON_EXTLoad(LD2) &&
+      LD2->hasOneUse() &&
+      // If both are volatile this would reduce the number of volatile loads.
+      // If one is volatile it might be ok, but play conservative and bail out.
+      !cast<LoadSDNode>(LD1)->isVolatile() &&
+      !cast<LoadSDNode>(LD2)->isVolatile() &&
+      TLI.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1, MFI)) {
+    LoadSDNode *LD = cast<LoadSDNode>(LD1);
+    unsigned Align = LD->getAlignment();
+    unsigned NewAlign = TLI.getTargetMachine().getTargetData()->
+      getABITypeAlignment(VT.getTypeForMVT());
+    if (NewAlign <= Align &&
+        (!AfterLegalize || TLI.isOperationLegal(ISD::LOAD, VT)))
+      return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(),
+                         LD->getSrcValue(), LD->getSrcValueOffset(),
+                         false, Align);
+  }
+  return SDOperand();
+}
+
 SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // If the input is a BUILD_VECTOR with all constant elements, fold this now.
   // Only do this before legalize, since afterward the target may be depending
@@ -3361,7 +3406,7 @@
   // First check to see if this is all constant.
   if (!AfterLegalize &&
       N0.getOpcode() == ISD::BUILD_VECTOR && N0.Val->hasOneUse() &&
-      MVT::isVector(VT)) {
+      VT.isVector()) {
     bool isSimple = true;
     for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i)
       if (N0.getOperand(i).getOpcode() != ISD::UNDEF &&
@@ -3371,8 +3416,8 @@
         break;
       }
         
-    MVT::ValueType DestEltVT = MVT::getVectorElementType(N->getValueType(0));
-    assert(!MVT::isVector(DestEltVT) &&
+    MVT DestEltVT = N->getValueType(0).getVectorElementType();
+    assert(!DestEltVT.isVector() &&
            "Element type of vector ValueType must not be vector!");
     if (isSimple) {
       return ConstantFoldBIT_CONVERTofBUILD_VECTOR(N0.Val, DestEltVT);
@@ -3391,31 +3436,33 @@
   // fold (conv (load x)) -> (load (conv*)x)
   // If the resultant load doesn't need a higher alignment than the original!
   if (ISD::isNormalLoad(N0.Val) && N0.hasOneUse() &&
-      TLI.isOperationLegal(ISD::LOAD, VT)) {
+      // Do not change the width of a volatile load.
+      !cast<LoadSDNode>(N0)->isVolatile() &&
+      (!AfterLegalize || TLI.isOperationLegal(ISD::LOAD, VT))) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     unsigned Align = TLI.getTargetMachine().getTargetData()->
-      getABITypeAlignment(MVT::getTypeForValueType(VT));
+      getABITypeAlignment(VT.getTypeForMVT());
     unsigned OrigAlign = LN0->getAlignment();
     if (Align <= OrigAlign) {
       SDOperand Load = DAG.getLoad(VT, LN0->getChain(), LN0->getBasePtr(),
                                    LN0->getSrcValue(), LN0->getSrcValueOffset(),
-                                   LN0->isVolatile(), Align);
+                                   LN0->isVolatile(), OrigAlign);
       AddToWorkList(N);
       CombineTo(N0.Val, DAG.getNode(ISD::BIT_CONVERT, N0.getValueType(), Load),
                 Load.getValue(1));
       return Load;
     }
   }
-  
+
   // Fold bitconvert(fneg(x)) -> xor(bitconvert(x), signbit)
   // Fold bitconvert(fabs(x)) -> and(bitconvert(x), ~signbit)
   // This often reduces constant pool loads.
   if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) &&
-      N0.Val->hasOneUse() && MVT::isInteger(VT) && !MVT::isVector(VT)) {
+      N0.Val->hasOneUse() && VT.isInteger() && !VT.isVector()) {
     SDOperand NewConv = DAG.getNode(ISD::BIT_CONVERT, VT, N0.getOperand(0));
     AddToWorkList(NewConv.Val);
     
-    APInt SignBit = APInt::getSignBit(MVT::getSizeInBits(VT));
+    APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
     if (N0.getOpcode() == ISD::FNEG)
       return DAG.getNode(ISD::XOR, VT, NewConv, DAG.getConstant(SignBit, VT));
     assert(N0.getOpcode() == ISD::FABS);
@@ -3427,14 +3474,15 @@
   // to an fneg or fabs.
   if (N0.getOpcode() == ISD::FCOPYSIGN && N0.Val->hasOneUse() &&
       isa<ConstantFPSDNode>(N0.getOperand(0)) &&
-      MVT::isInteger(VT) && !MVT::isVector(VT)) {
-    unsigned OrigXWidth = MVT::getSizeInBits(N0.getOperand(1).getValueType());
-    SDOperand X = DAG.getNode(ISD::BIT_CONVERT, MVT::getIntegerType(OrigXWidth),
+      VT.isInteger() && !VT.isVector()) {
+    unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
+    SDOperand X = DAG.getNode(ISD::BIT_CONVERT,
+                              MVT::getIntegerVT(OrigXWidth),
                               N0.getOperand(1));
     AddToWorkList(X.Val);
 
     // If X has a different width than the result/lhs, sext it or truncate it.
-    unsigned VTWidth = MVT::getSizeInBits(VT);
+    unsigned VTWidth = VT.getSizeInBits();
     if (OrigXWidth < VTWidth) {
       X = DAG.getNode(ISD::SIGN_EXTEND, VT, X);
       AddToWorkList(X.Val);
@@ -3448,7 +3496,7 @@
       AddToWorkList(X.Val);
     }
     
-    APInt SignBit = APInt::getSignBit(MVT::getSizeInBits(VT));
+    APInt SignBit = APInt::getSignBit(VT.getSizeInBits());
     X = DAG.getNode(ISD::AND, VT, X, DAG.getConstant(SignBit, VT));
     AddToWorkList(X.Val);
 
@@ -3458,22 +3506,34 @@
 
     return DAG.getNode(ISD::OR, VT, X, Cst);
   }
+
+  // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. 
+  if (N0.getOpcode() == ISD::BUILD_PAIR) {
+    SDOperand CombineLD = CombineConsecutiveLoads(N0.Val, VT);
+    if (CombineLD.Val)
+      return CombineLD;
+  }
   
   return SDOperand();
 }
 
+SDOperand DAGCombiner::visitBUILD_PAIR(SDNode *N) {
+  MVT VT = N->getValueType(0);
+  return CombineConsecutiveLoads(N, VT);
+}
+
 /// ConstantFoldBIT_CONVERTofBUILD_VECTOR - We know that BV is a build_vector
 /// node with Constant, ConstantFP or Undef operands.  DstEltVT indicates the 
 /// destination element value type.
 SDOperand DAGCombiner::
-ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT::ValueType DstEltVT) {
-  MVT::ValueType SrcEltVT = BV->getOperand(0).getValueType();
+ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) {
+  MVT SrcEltVT = BV->getOperand(0).getValueType();
   
   // If this is already the right type, we're done.
   if (SrcEltVT == DstEltVT) return SDOperand(BV, 0);
   
-  unsigned SrcBitSize = MVT::getSizeInBits(SrcEltVT);
-  unsigned DstBitSize = MVT::getSizeInBits(DstEltVT);
+  unsigned SrcBitSize = SrcEltVT.getSizeInBits();
+  unsigned DstBitSize = DstEltVT.getSizeInBits();
   
   // If this is a conversion of N elements of one type to N elements of another
   // type, convert each element.  This handles FP<->INT cases.
@@ -3483,29 +3543,28 @@
       Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, DstEltVT, BV->getOperand(i)));
       AddToWorkList(Ops.back().Val);
     }
-    MVT::ValueType VT =
-      MVT::getVectorType(DstEltVT,
-                         MVT::getVectorNumElements(BV->getValueType(0)));
+    MVT VT = MVT::getVectorVT(DstEltVT,
+                              BV->getValueType(0).getVectorNumElements());
     return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
   }
   
   // Otherwise, we're growing or shrinking the elements.  To avoid having to
   // handle annoying details of growing/shrinking FP values, we convert them to
   // int first.
-  if (MVT::isFloatingPoint(SrcEltVT)) {
+  if (SrcEltVT.isFloatingPoint()) {
     // Convert the input float vector to a int vector where the elements are the
     // same sizes.
     assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
-    MVT::ValueType IntVT = SrcEltVT == MVT::f32 ? MVT::i32 : MVT::i64;
+    MVT IntVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits());
     BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).Val;
     SrcEltVT = IntVT;
   }
   
   // Now we know the input is an integer vector.  If the output is a FP type,
   // convert to integer first, then to FP of the right size.
-  if (MVT::isFloatingPoint(DstEltVT)) {
+  if (DstEltVT.isFloatingPoint()) {
     assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
-    MVT::ValueType TmpVT = DstEltVT == MVT::f32 ? MVT::i32 : MVT::i64;
+    MVT TmpVT = MVT::getIntegerVT(DstEltVT.getSizeInBits());
     SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).Val;
     
     // Next, convert to FP elements of the same size.
@@ -3514,7 +3573,7 @@
   
   // Okay, we know the src/dst types are both integers of differing types.
   // Handling growing first.
-  assert(MVT::isInteger(SrcEltVT) && MVT::isInteger(DstEltVT));
+  assert(SrcEltVT.isInteger() && DstEltVT.isInteger());
   if (SrcBitSize < DstBitSize) {
     unsigned NumInputsPerOutput = DstBitSize/SrcBitSize;
     
@@ -3541,7 +3600,7 @@
         Ops.push_back(DAG.getConstant(NewBits, DstEltVT));
     }
 
-    MVT::ValueType VT = MVT::getVectorType(DstEltVT, Ops.size()); 
+    MVT VT = MVT::getVectorVT(DstEltVT, Ops.size());
     return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
   }
   
@@ -3549,8 +3608,7 @@
   // turns into multiple outputs.
   bool isS2V = ISD::isScalarToVector(BV);
   unsigned NumOutputsPerInput = SrcBitSize/DstBitSize;
-  MVT::ValueType VT = MVT::getVectorType(DstEltVT,
-                                     NumOutputsPerInput * BV->getNumOperands());
+  MVT VT = MVT::getVectorVT(DstEltVT, NumOutputsPerInput*BV->getNumOperands());
   SmallVector<SDOperand, 8> Ops;
   for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
     if (BV->getOperand(i).getOpcode() == ISD::UNDEF) {
@@ -3582,10 +3640,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -3619,10 +3677,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -3649,10 +3707,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -3696,10 +3754,10 @@
   SDOperand N1 = N->getOperand(1);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // fold vector ops
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SDOperand FoldedVOp = SimplifyVBinOp(N);
     if (FoldedVOp.Val) return FoldedVOp;
   }
@@ -3729,7 +3787,7 @@
   SDOperand N1 = N->getOperand(1);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   // fold (frem c1, c2) -> fmod(c1,c2)
   if (N0CFP && N1CFP && VT != MVT::ppcf128)
@@ -3743,7 +3801,7 @@
   SDOperand N1 = N->getOperand(1);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
   ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
 
   if (N0CFP && N1CFP && VT != MVT::ppcf128)  // Constant fold
     return DAG.getNode(ISD::FCOPYSIGN, VT, N0, N1);
@@ -3786,29 +3844,52 @@
 SDOperand DAGCombiner::visitSINT_TO_FP(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
-  MVT::ValueType VT = N->getValueType(0);
-  
+  MVT VT = N->getValueType(0);
+  MVT OpVT = N0.getValueType();
+
   // fold (sint_to_fp c1) -> c1fp
-  if (N0C && N0.getValueType() != MVT::ppcf128)
+  if (N0C && OpVT != MVT::ppcf128)
     return DAG.getNode(ISD::SINT_TO_FP, VT, N0);
+  
+  // If the input is a legal type, and SINT_TO_FP is not legal on this target,
+  // but UINT_TO_FP is legal on this target, try to convert.
+  if (!TLI.isOperationLegal(ISD::SINT_TO_FP, OpVT) &&
+      TLI.isOperationLegal(ISD::UINT_TO_FP, OpVT)) {
+    // If the sign bit is known to be zero, we can change this to UINT_TO_FP. 
+    if (DAG.SignBitIsZero(N0))
+      return DAG.getNode(ISD::UINT_TO_FP, VT, N0);
+  }
+  
+  
   return SDOperand();
 }
 
 SDOperand DAGCombiner::visitUINT_TO_FP(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
   ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
+  MVT OpVT = N0.getValueType();
 
   // fold (uint_to_fp c1) -> c1fp
-  if (N0C && N0.getValueType() != MVT::ppcf128)
+  if (N0C && OpVT != MVT::ppcf128)
     return DAG.getNode(ISD::UINT_TO_FP, VT, N0);
+  
+  // If the input is a legal type, and UINT_TO_FP is not legal on this target,
+  // but SINT_TO_FP is legal on this target, try to convert.
+  if (!TLI.isOperationLegal(ISD::UINT_TO_FP, OpVT) &&
+      TLI.isOperationLegal(ISD::SINT_TO_FP, OpVT)) {
+    // If the sign bit is known to be zero, we can change this to SINT_TO_FP. 
+    if (DAG.SignBitIsZero(N0))
+      return DAG.getNode(ISD::SINT_TO_FP, VT, N0);
+  }
+  
   return SDOperand();
 }
 
 SDOperand DAGCombiner::visitFP_TO_SINT(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (fp_to_sint c1fp) -> c1
   if (N0CFP)
@@ -3819,7 +3900,7 @@
 SDOperand DAGCombiner::visitFP_TO_UINT(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (fp_to_uint c1fp) -> c1
   if (N0CFP && VT != MVT::ppcf128)
@@ -3831,7 +3912,7 @@
   SDOperand N0 = N->getOperand(0);
   SDOperand N1 = N->getOperand(1);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (fp_round c1fp) -> c1fp
   if (N0CFP && N0.getValueType() != MVT::ppcf128)
@@ -3862,8 +3943,8 @@
 
 SDOperand DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+  MVT VT = N->getValueType(0);
+  MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
   
   // fold (fp_round_inreg c1fp) -> c1fp
@@ -3877,7 +3958,7 @@
 SDOperand DAGCombiner::visitFP_EXTEND(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded.
   if (N->hasOneUse() && 
@@ -3893,14 +3974,15 @@
   if (N0.getOpcode() == ISD::FP_ROUND && N0.Val->getConstantOperandVal(1) == 1){
     SDOperand In = N0.getOperand(0);
     if (In.getValueType() == VT) return In;
-    if (VT < In.getValueType())
+    if (VT.bitsLT(In.getValueType()))
       return DAG.getNode(ISD::FP_ROUND, VT, In, N0.getOperand(1));
     return DAG.getNode(ISD::FP_EXTEND, VT, In);
   }
       
   // fold (fpext (load x)) -> (fpext (fptrunc (extload x)))
   if (ISD::isNON_EXTLoad(N0.Val) && N0.hasOneUse() &&
-      (!AfterLegalize||TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
+      ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) ||
+       TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) {
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     SDOperand ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(),
                                        LN0->getBasePtr(), LN0->getSrcValue(),
@@ -3914,8 +3996,7 @@
               ExtLoad.getValue(1));
     return SDOperand(N, 0);   // Return N so it doesn't get rechecked!
   }
-  
-  
+
   return SDOperand();
 }
 
@@ -3928,13 +4009,13 @@
   // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
   // constant pool values.
   if (N0.getOpcode() == ISD::BIT_CONVERT && N0.Val->hasOneUse() &&
-      MVT::isInteger(N0.getOperand(0).getValueType()) &&
-      !MVT::isVector(N0.getOperand(0).getValueType())) {
+      N0.getOperand(0).getValueType().isInteger() &&
+      !N0.getOperand(0).getValueType().isVector()) {
     SDOperand Int = N0.getOperand(0);
-    MVT::ValueType IntVT = Int.getValueType();
-    if (MVT::isInteger(IntVT) && !MVT::isVector(IntVT)) {
+    MVT IntVT = Int.getValueType();
+    if (IntVT.isInteger() && !IntVT.isVector()) {
       Int = DAG.getNode(ISD::XOR, IntVT, Int, 
-                        DAG.getConstant(MVT::getIntVTSignBit(IntVT), IntVT));
+                        DAG.getConstant(IntVT.getIntegerVTSignBit(), IntVT));
       AddToWorkList(Int.Val);
       return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Int);
     }
@@ -3946,7 +4027,7 @@
 SDOperand DAGCombiner::visitFABS(SDNode *N) {
   SDOperand N0 = N->getOperand(0);
   ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // fold (fabs c1) -> fabs(c1)
   if (N0CFP && VT != MVT::ppcf128)
@@ -3962,13 +4043,13 @@
   // Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
   // constant pool values.
   if (N0.getOpcode() == ISD::BIT_CONVERT && N0.Val->hasOneUse() &&
-      MVT::isInteger(N0.getOperand(0).getValueType()) &&
-      !MVT::isVector(N0.getOperand(0).getValueType())) {
+      N0.getOperand(0).getValueType().isInteger() &&
+      !N0.getOperand(0).getValueType().isVector()) {
     SDOperand Int = N0.getOperand(0);
-    MVT::ValueType IntVT = Int.getValueType();
-    if (MVT::isInteger(IntVT) && !MVT::isVector(IntVT)) {
+    MVT IntVT = Int.getValueType();
+    if (IntVT.isInteger() && !IntVT.isVector()) {
       Int = DAG.getNode(ISD::AND, IntVT, Int, 
-                        DAG.getConstant(~MVT::getIntVTSignBit(IntVT), IntVT));
+                        DAG.getConstant(~IntVT.getIntegerVTSignBit(), IntVT));
       AddToWorkList(Int.Val);
       return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Int);
     }
@@ -4005,7 +4086,7 @@
   CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1));
   SDOperand CondLHS = N->getOperand(2), CondRHS = N->getOperand(3);
   
-  // Use SimplifySetCC  to simplify SETCC's.
+  // Use SimplifySetCC to simplify SETCC's.
   SDOperand Simp = SimplifySetCC(MVT::i1, CondLHS, CondRHS, CC->get(), false);
   if (Simp.Val) AddToWorkList(Simp.Val);
 
@@ -4028,8 +4109,8 @@
 }
 
 
-/// CombineToPreIndexedLoadStore - Try turning a load / store and a
-/// pre-indexed load / store when the base pointer is a add or subtract
+/// CombineToPreIndexedLoadStore - Try turning a load / store into a
+/// pre-indexed load / store when the base pointer is an add or subtract
 /// and it has other uses besides the load / store. After the
 /// transformation, the new indexed load / store has effectively folded
 /// the add / subtract in and all of its other uses are redirected to the
@@ -4040,7 +4121,7 @@
 
   bool isLoad = true;
   SDOperand Ptr;
-  MVT::ValueType VT;
+  MVT VT;
   if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
     if (LD->isIndexed())
       return false;
@@ -4150,7 +4231,7 @@
   return true;
 }
 
-/// CombineToPostIndexedLoadStore - Try combine a load / store with a
+/// CombineToPostIndexedLoadStore - Try to combine a load / store with a
 /// add / sub of the base pointer node into a post-indexed load / store.
 /// The transformation folded the add / subtract into the new indexed
 /// load / store effectively and all of its uses are redirected to the
@@ -4161,7 +4242,7 @@
 
   bool isLoad = true;
   SDOperand Ptr;
-  MVT::ValueType VT;
+  MVT VT;
   if (LoadSDNode *LD  = dyn_cast<LoadSDNode>(N)) {
     if (LD->isIndexed())
       return false;
@@ -4454,32 +4535,39 @@
                                  ST->isVolatile(), Align);
     }
   }
-  
+
   // If this is a store of a bit convert, store the input value if the
   // resultant store does not need a higher alignment than the original.
   if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
       ST->isUnindexed()) {
     unsigned Align = ST->getAlignment();
-    MVT::ValueType SVT = Value.getOperand(0).getValueType();
+    MVT SVT = Value.getOperand(0).getValueType();
     unsigned OrigAlign = TLI.getTargetMachine().getTargetData()->
-      getABITypeAlignment(MVT::getTypeForValueType(SVT));
-    if (Align <= OrigAlign && TLI.isOperationLegal(ISD::STORE, SVT))
+      getABITypeAlignment(SVT.getTypeForMVT());
+    if (Align <= OrigAlign &&
+        ((!AfterLegalize && !ST->isVolatile()) ||
+         TLI.isOperationLegal(ISD::STORE, SVT)))
       return DAG.getStore(Chain, Value.getOperand(0), Ptr, ST->getSrcValue(),
-                          ST->getSrcValueOffset(), ST->isVolatile(), Align);
+                          ST->getSrcValueOffset(), ST->isVolatile(), OrigAlign);
   }
-  
+
   // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) {
+    // NOTE: If the original store is volatile, this transform must not increase
+    // the number of stores.  For example, on x86-32 an f64 can be stored in one
+    // processor operation but an i64 (which is not legal) requires two.  So the
+    // transform should not be done in this case.
     if (Value.getOpcode() != ISD::TargetConstantFP) {
       SDOperand Tmp;
-      switch (CFP->getValueType(0)) {
+      switch (CFP->getValueType(0).getSimpleVT()) {
       default: assert(0 && "Unknown FP type");
       case MVT::f80:    // We don't do this for these yet.
       case MVT::f128:
       case MVT::ppcf128:
         break;
       case MVT::f32:
-        if (!AfterLegalize || TLI.isTypeLegal(MVT::i32)) {
+        if ((!AfterLegalize && !ST->isVolatile()) ||
+            TLI.isOperationLegal(ISD::STORE, MVT::i32)) {
           Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
                               convertToAPInt().getZExtValue(), MVT::i32);
           return DAG.getStore(Chain, Tmp, Ptr, ST->getSrcValue(),
@@ -4488,13 +4576,15 @@
         }
         break;
       case MVT::f64:
-        if (!AfterLegalize || TLI.isTypeLegal(MVT::i64)) {
+        if ((!AfterLegalize && !ST->isVolatile()) ||
+            TLI.isOperationLegal(ISD::STORE, MVT::i64)) {
           Tmp = DAG.getConstant(CFP->getValueAPF().convertToAPInt().
                                   getZExtValue(), MVT::i64);
           return DAG.getStore(Chain, Tmp, Ptr, ST->getSrcValue(),
                               ST->getSrcValueOffset(), ST->isVolatile(),
                               ST->getAlignment());
-        } else if (TLI.isTypeLegal(MVT::i32)) {
+        } else if (!ST->isVolatile() &&
+                   TLI.isOperationLegal(ISD::STORE, MVT::i32)) {
           // Many FP stores are not made apparent until after legalize, e.g. for
           // argument passing.  Since this is so common, custom legalize the
           // 64-bit integer store into two 32-bit stores.
@@ -4557,14 +4647,14 @@
 
   // FIXME: is there such a thing as a truncating indexed store?
   if (ST->isTruncatingStore() && ST->isUnindexed() &&
-      MVT::isInteger(Value.getValueType())) {
+      Value.getValueType().isInteger()) {
     // See if we can simplify the input to this truncstore with knowledge that
     // only the low bits are being used.  For example:
     // "truncstore (or (shl x, 8), y), i8"  -> "truncstore y, i8"
     SDOperand Shorter = 
       GetDemandedBits(Value,
                  APInt::getLowBitsSet(Value.getValueSizeInBits(),
-                                      MVT::getSizeInBits(ST->getMemoryVT())));
+                                      ST->getMemoryVT().getSizeInBits()));
     AddToWorkList(Value.Val);
     if (Shorter.Val)
       return DAG.getTruncStore(Chain, Shorter, Ptr, ST->getSrcValue(),
@@ -4576,7 +4666,7 @@
     if (SimplifyDemandedBits(Value,
                              APInt::getLowBitsSet(
                                Value.getValueSizeInBits(),
-                               MVT::getSizeInBits(ST->getMemoryVT()))))
+                               ST->getMemoryVT().getSizeInBits())))
       return SDOperand(N, 0);
   }
   
@@ -4592,19 +4682,18 @@
       return Chain;
     }
   }
-  
+
   // If this is an FP_ROUND or TRUNC followed by a store, fold this into a
   // truncating store.  We can do this even if this is already a truncstore.
   if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE)
-      && TLI.isTypeLegal(Value.getOperand(0).getValueType()) &&
-      Value.Val->hasOneUse() && ST->isUnindexed() &&
+      && Value.Val->hasOneUse() && ST->isUnindexed() &&
       TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
                             ST->getMemoryVT())) {
     return DAG.getTruncStore(Chain, Value.getOperand(0), Ptr, ST->getSrcValue(),
                              ST->getSrcValueOffset(), ST->getMemoryVT(),
                              ST->isVolatile(), ST->getAlignment());
   }
-  
+
   return SDOperand();
 }
 
@@ -4628,49 +4717,80 @@
 }
 
 SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
+  // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size)
+  // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size)
+  // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr)
+
+  // Perform only after legalization to ensure build_vector / vector_shuffle
+  // optimizations have already been done.
+  if (!AfterLegalize) return SDOperand();
+
   SDOperand InVec = N->getOperand(0);
   SDOperand EltNo = N->getOperand(1);
 
-  // (vextract (v4f32 s2v (f32 load $addr)), 0) -> (f32 load $addr)
-  // (vextract (v4i32 bc (v4f32 s2v (f32 load $addr))), 0) -> (i32 load $addr)
   if (isa<ConstantSDNode>(EltNo)) {
     unsigned Elt = cast<ConstantSDNode>(EltNo)->getValue();
     bool NewLoad = false;
-    if (Elt == 0) {
-      MVT::ValueType VT = InVec.getValueType();
-      MVT::ValueType EVT = MVT::getVectorElementType(VT);
-      MVT::ValueType LVT = EVT;
-      unsigned NumElts = MVT::getVectorNumElements(VT);
-      if (InVec.getOpcode() == ISD::BIT_CONVERT) {
-        MVT::ValueType BCVT = InVec.getOperand(0).getValueType();
-        if (!MVT::isVector(BCVT) ||
-            NumElts != MVT::getVectorNumElements(BCVT))
-          return SDOperand();
+    MVT VT = InVec.getValueType();
+    MVT EVT = VT.getVectorElementType();
+    MVT LVT = EVT;
+    if (InVec.getOpcode() == ISD::BIT_CONVERT) {
+      MVT BCVT = InVec.getOperand(0).getValueType();
+      if (!BCVT.isVector() || EVT.bitsGT(BCVT.getVectorElementType()))
+        return SDOperand();
+      InVec = InVec.getOperand(0);
+      EVT = BCVT.getVectorElementType();
+      NewLoad = true;
+    }
+
+    LoadSDNode *LN0 = NULL;
+    if (ISD::isNormalLoad(InVec.Val))
+      LN0 = cast<LoadSDNode>(InVec);
+    else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
+             InVec.getOperand(0).getValueType() == EVT &&
+             ISD::isNormalLoad(InVec.getOperand(0).Val)) {
+      LN0 = cast<LoadSDNode>(InVec.getOperand(0));
+    } else if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE) {
+      // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1)
+      // =>
+      // (load $addr+1*size)
+      unsigned Idx = cast<ConstantSDNode>(InVec.getOperand(2).
+                                          getOperand(Elt))->getValue();
+      unsigned NumElems = InVec.getOperand(2).getNumOperands();
+      InVec = (Idx < NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
+      if (InVec.getOpcode() == ISD::BIT_CONVERT)
         InVec = InVec.getOperand(0);
-        EVT = MVT::getVectorElementType(BCVT);
-        NewLoad = true;
+      if (ISD::isNormalLoad(InVec.Val)) {
+        LN0 = cast<LoadSDNode>(InVec);
+        Elt = (Idx < NumElems) ? Idx : Idx - NumElems;
       }
-      if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR &&
-          InVec.getOperand(0).getValueType() == EVT &&
-          ISD::isNormalLoad(InVec.getOperand(0).Val) &&
-          InVec.getOperand(0).hasOneUse()) {
-        LoadSDNode *LN0 = cast<LoadSDNode>(InVec.getOperand(0));
-        unsigned Align = LN0->getAlignment();
-        if (NewLoad) {
-          // Check the resultant load doesn't need a higher alignment than the
-          // original load.
-          unsigned NewAlign = TLI.getTargetMachine().getTargetData()->
-            getABITypeAlignment(MVT::getTypeForValueType(LVT));
-          if (!TLI.isOperationLegal(ISD::LOAD, LVT) || NewAlign > Align)
-            return SDOperand();
-          Align = NewAlign;
-        }
+    }
+    if (!LN0 || !LN0->hasOneUse() || LN0->isVolatile())
+      return SDOperand();
 
-        return DAG.getLoad(LVT, LN0->getChain(), LN0->getBasePtr(),
-                           LN0->getSrcValue(), LN0->getSrcValueOffset(),
-                           LN0->isVolatile(), Align);
-      }
+    unsigned Align = LN0->getAlignment();
+    if (NewLoad) {
+      // Check the resultant load doesn't need a higher alignment than the
+      // original load.
+      unsigned NewAlign = TLI.getTargetMachine().getTargetData()->
+        getABITypeAlignment(LVT.getTypeForMVT());
+      if (NewAlign > Align || !TLI.isOperationLegal(ISD::LOAD, LVT))
+        return SDOperand();
+      Align = NewAlign;
     }
+
+    SDOperand NewPtr = LN0->getBasePtr();
+    if (Elt) {
+      unsigned PtrOff = LVT.getSizeInBits() * Elt / 8;
+      MVT PtrType = NewPtr.getValueType();
+      if (TLI.isBigEndian())
+        PtrOff = VT.getSizeInBits() / 8 - PtrOff;
+      NewPtr = DAG.getNode(ISD::ADD, PtrType, NewPtr,
+                           DAG.getConstant(PtrOff, PtrType));
+    }
+    return DAG.getLoad(LVT, LN0->getChain(), NewPtr,
+                       LN0->getSrcValue(), LN0->getSrcValueOffset(),
+                       LN0->isVolatile(), Align);
   }
   return SDOperand();
 }
@@ -4678,9 +4798,9 @@
 
 SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
   unsigned NumInScalars = N->getNumOperands();
-  MVT::ValueType VT = N->getValueType(0);
-  unsigned NumElts = MVT::getVectorNumElements(VT);
-  MVT::ValueType EltType = MVT::getVectorElementType(VT);
+  MVT VT = N->getValueType(0);
+  unsigned NumElts = VT.getVectorNumElements();
+  MVT EltType = VT.getVectorElementType();
 
   // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT
   // operations.  If so, and if the EXTRACT_VECTOR_ELT vector inputs come from
@@ -4744,7 +4864,7 @@
     }
     
     // Add count and size info.
-    MVT::ValueType BuildVecVT = MVT::getVectorType(TLI.getPointerTy(), NumElts);
+    MVT BuildVecVT = MVT::getVectorVT(TLI.getPointerTy(), NumElts);
     
     // Return the new VECTOR_SHUFFLE node.
     SDOperand Ops[5];
@@ -4847,7 +4967,7 @@
     // look though conversions that change things like v4f32 to v2f64.
     if (V->getOpcode() == ISD::BIT_CONVERT) {
       SDOperand ConvInput = V->getOperand(0);
-      if (MVT::getVectorNumElements(ConvInput.getValueType()) == NumElts)
+      if (ConvInput.getValueType().getVectorNumElements() == NumElts)
         V = ConvInput.Val;
     }
 
@@ -4920,7 +5040,7 @@
       std::vector<SDOperand> IdxOps;
       unsigned NumOps = RHS.getNumOperands();
       unsigned NumElts = NumOps;
-      MVT::ValueType EVT = MVT::getVectorElementType(RHS.getValueType());
+      MVT EVT = RHS.getValueType().getVectorElementType();
       for (unsigned i = 0; i != NumElts; ++i) {
         SDOperand Elt = RHS.getOperand(i);
         if (!isa<ConstantSDNode>(Elt))
@@ -4938,7 +5058,7 @@
         return SDOperand();
 
       // Return the new VECTOR_SHUFFLE node.
-      MVT::ValueType VT = MVT::getVectorType(EVT, NumElts);
+      MVT VT = MVT::getVectorVT(EVT, NumElts);
       std::vector<SDOperand> Ops;
       LHS = DAG.getNode(ISD::BIT_CONVERT, VT, LHS);
       Ops.push_back(LHS);
@@ -4966,10 +5086,10 @@
   // things. Simplifying them may result in a loss of legality.
   if (AfterLegalize) return SDOperand();
 
-  MVT::ValueType VT = N->getValueType(0);
-  assert(MVT::isVector(VT) && "SimplifyVBinOp only works on vectors!");
+  MVT VT = N->getValueType(0);
+  assert(VT.isVector() && "SimplifyVBinOp only works on vectors!");
 
-  MVT::ValueType EltType = MVT::getVectorElementType(VT);
+  MVT EltType = VT.getVectorElementType();
   SDOperand LHS = N->getOperand(0);
   SDOperand RHS = N->getOperand(1);
   SDOperand Shuffle = XformToShuffleWithZero(N);
@@ -5009,7 +5129,7 @@
     }
     
     if (Ops.size() == LHS.getNumOperands()) {
-      MVT::ValueType VT = LHS.getValueType();
+      MVT VT = LHS.getValueType();
       return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
     }
   }
@@ -5059,6 +5179,9 @@
     // This triggers in things like "select bool X, 10.0, 123.0" after the FP
     // constants have been dropped into the constant pool.
     if (LHS.getOpcode() == ISD::LOAD &&
+        // Do not let this transformation reduce the number of volatile loads.
+        !cast<LoadSDNode>(LHS)->isVolatile() &&
+        !cast<LoadSDNode>(RHS)->isVolatile() &&
         // Token chains must be identical.
         LHS.getOperand(0) == RHS.getOperand(0)) {
       LoadSDNode *LLD = cast<LoadSDNode>(LHS);
@@ -5131,7 +5254,7 @@
                                         SDOperand N2, SDOperand N3,
                                         ISD::CondCode CC, bool NotExtCompare) {
   
-  MVT::ValueType VT = N2.getValueType();
+  MVT VT = N2.getValueType();
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
   ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.Val);
   ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.Val);
@@ -5169,32 +5292,32 @@
   // Check to see if we can perform the "gzip trick", transforming
   // select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A
   if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT &&
-      MVT::isInteger(N0.getValueType()) && 
-      MVT::isInteger(N2.getValueType()) && 
+      N0.getValueType().isInteger() &&
+      N2.getValueType().isInteger() &&
       (N1C->isNullValue() ||                         // (a < 0) ? b : 0
        (N1C->getAPIntValue() == 1 && N0 == N2))) {   // (a < 1) ? a : 0
-    MVT::ValueType XType = N0.getValueType();
-    MVT::ValueType AType = N2.getValueType();
-    if (XType >= AType) {
+    MVT XType = N0.getValueType();
+    MVT AType = N2.getValueType();
+    if (XType.bitsGE(AType)) {
       // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a
       // single-bit constant.
       if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) {
         unsigned ShCtV = N2C->getAPIntValue().logBase2();
-        ShCtV = MVT::getSizeInBits(XType)-ShCtV-1;
+        ShCtV = XType.getSizeInBits()-ShCtV-1;
         SDOperand ShCt = DAG.getConstant(ShCtV, TLI.getShiftAmountTy());
         SDOperand Shift = DAG.getNode(ISD::SRL, XType, N0, ShCt);
         AddToWorkList(Shift.Val);
-        if (XType > AType) {
+        if (XType.bitsGT(AType)) {
           Shift = DAG.getNode(ISD::TRUNCATE, AType, Shift);
           AddToWorkList(Shift.Val);
         }
         return DAG.getNode(ISD::AND, AType, Shift, N2);
       }
       SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0,
-                                    DAG.getConstant(MVT::getSizeInBits(XType)-1,
+                                    DAG.getConstant(XType.getSizeInBits()-1,
                                                     TLI.getShiftAmountTy()));
       AddToWorkList(Shift.Val);
-      if (XType > AType) {
+      if (XType.bitsGT(AType)) {
         Shift = DAG.getNode(ISD::TRUNCATE, AType, Shift);
         AddToWorkList(Shift.Val);
       }
@@ -5218,7 +5341,7 @@
     // cast from setcc result type to select result type
     if (AfterLegalize) {
       SCC  = DAG.getSetCC(TLI.getSetCCResultType(N0), N0, N1, CC);
-      if (N2.getValueType() < SCC.getValueType())
+      if (N2.getValueType().bitsLT(SCC.getValueType()))
         Temp = DAG.getZeroExtendInReg(SCC, N2.getValueType());
       else
         Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getValueType(), SCC);
@@ -5241,8 +5364,9 @@
   // FIXME: Turn all of these into setcc if setcc if setcc is legal
   // otherwise, go ahead with the folds.
   if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) {
-    MVT::ValueType XType = N0.getValueType();
-    if (TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(N0))) {
+    MVT XType = N0.getValueType();
+    if (!AfterLegalize ||
+        TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(N0))) {
       SDOperand Res = DAG.getSetCC(TLI.getSetCCResultType(N0), N0, N1, CC);
       if (Res.getValueType() != VT)
         Res = DAG.getNode(ISD::ZERO_EXTEND, VT, Res);
@@ -5251,10 +5375,11 @@
     
     // seteq X, 0 -> srl (ctlz X, log2(size(X)))
     if (N1C && N1C->isNullValue() && CC == ISD::SETEQ && 
-        TLI.isOperationLegal(ISD::CTLZ, XType)) {
+        (!AfterLegalize ||
+         TLI.isOperationLegal(ISD::CTLZ, XType))) {
       SDOperand Ctlz = DAG.getNode(ISD::CTLZ, XType, N0);
       return DAG.getNode(ISD::SRL, XType, Ctlz, 
-                         DAG.getConstant(Log2_32(MVT::getSizeInBits(XType)),
+                         DAG.getConstant(Log2_32(XType.getSizeInBits()),
                                          TLI.getShiftAmountTy()));
     }
     // setgt X, 0 -> srl (and (-X, ~X), size(X)-1)
@@ -5265,13 +5390,13 @@
                                     DAG.getConstant(~0ULL, XType));
       return DAG.getNode(ISD::SRL, XType, 
                          DAG.getNode(ISD::AND, XType, NegN0, NotN0),
-                         DAG.getConstant(MVT::getSizeInBits(XType)-1,
+                         DAG.getConstant(XType.getSizeInBits()-1,
                                          TLI.getShiftAmountTy()));
     }
     // setgt X, -1 -> xor (srl (X, size(X)-1), 1)
     if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) {
       SDOperand Sign = DAG.getNode(ISD::SRL, XType, N0,
-                                   DAG.getConstant(MVT::getSizeInBits(XType)-1,
+                                   DAG.getConstant(XType.getSizeInBits()-1,
                                                    TLI.getShiftAmountTy()));
       return DAG.getNode(ISD::XOR, XType, Sign, DAG.getConstant(1, XType));
     }
@@ -5281,10 +5406,10 @@
   // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
   if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) &&
       N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) &&
-      N2.getOperand(0) == N1 && MVT::isInteger(N0.getValueType())) {
-    MVT::ValueType XType = N0.getValueType();
+      N2.getOperand(0) == N1 && N0.getValueType().isInteger()) {
+    MVT XType = N0.getValueType();
     SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0,
-                                  DAG.getConstant(MVT::getSizeInBits(XType)-1,
+                                  DAG.getConstant(XType.getSizeInBits()-1,
                                                   TLI.getShiftAmountTy()));
     SDOperand Add = DAG.getNode(ISD::ADD, XType, N0, Shift);
     AddToWorkList(Shift.Val);
@@ -5296,10 +5421,10 @@
   if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT &&
       N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) {
     if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) {
-      MVT::ValueType XType = N0.getValueType();
-      if (SubC->isNullValue() && MVT::isInteger(XType)) {
+      MVT XType = N0.getValueType();
+      if (SubC->isNullValue() && XType.isInteger()) {
         SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0,
-                                    DAG.getConstant(MVT::getSizeInBits(XType)-1,
+                                      DAG.getConstant(XType.getSizeInBits()-1,
                                                       TLI.getShiftAmountTy()));
         SDOperand Add = DAG.getNode(ISD::ADD, XType, N0, Shift);
         AddToWorkList(Shift.Val);
@@ -5313,7 +5438,7 @@
 }
 
 /// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC.
-SDOperand DAGCombiner::SimplifySetCC(MVT::ValueType VT, SDOperand N0,
+SDOperand DAGCombiner::SimplifySetCC(MVT VT, SDOperand N0,
                                      SDOperand N1, ISD::CondCode Cond,
                                      bool foldBooleans) {
   TargetLowering::DAGCombinerInfo 
@@ -5416,13 +5541,13 @@
                         const Value *&SrcValue, int &SrcValueOffset) {
   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
     Ptr = LD->getBasePtr();
-    Size = MVT::getSizeInBits(LD->getMemoryVT()) >> 3;
+    Size = LD->getMemoryVT().getSizeInBits() >> 3;
     SrcValue = LD->getSrcValue();
     SrcValueOffset = LD->getSrcValueOffset();
     return true;
   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
     Ptr = ST->getBasePtr();
-    Size = MVT::getSizeInBits(ST->getMemoryVT()) >> 3;
+    Size = ST->getMemoryVT().getSizeInBits() >> 3;
     SrcValue = ST->getSrcValue();
     SrcValueOffset = ST->getSrcValueOffset();
   } else {

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Sun Jul  6 15:45:41 2008
@@ -128,13 +128,13 @@
   /// getTypeAction - Return how we should legalize values of this type, either
   /// it is already legal or we need to expand it into multiple registers of
   /// smaller integer type, or we need to promote it to a larger type.
-  LegalizeAction getTypeAction(MVT::ValueType VT) const {
+  LegalizeAction getTypeAction(MVT VT) const {
     return (LegalizeAction)ValueTypeActions.getTypeAction(VT);
   }
 
   /// isTypeLegal - Return true if this type is legal on this target.
   ///
-  bool isTypeLegal(MVT::ValueType VT) const {
+  bool isTypeLegal(MVT VT) const {
     return getTypeAction(VT) == Legal;
   }
 
@@ -155,6 +155,13 @@
   /// no way of lowering.  "Unroll" the vector, splitting out the scalars and
   /// operating on each element individually.
   SDOperand UnrollVectorOp(SDOperand O);
+  
+  /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
+  /// insertion index for the INSERT_VECTOR_ELT instruction.  In this case, it
+  /// is necessary to spill the vector being inserted into to memory, perform
+  /// the insert there, and then read the result back.
+  SDOperand PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val,
+                                           SDOperand Idx);
 
   /// PromoteOp - Given an operation that produces a value in an invalid type,
   /// promote it to compute the value into a larger type.  The produced value
@@ -189,7 +196,7 @@
   ///
   /// If this is a legal shuffle, this method returns the (possibly promoted)
   /// build_vector Mask.  If it's not a legal shuffle, it returns null.
-  SDNode *isShuffleLegal(MVT::ValueType VT, SDOperand Mask) const;
+  SDNode *isShuffleLegal(MVT VT, SDOperand Mask) const;
   
   bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest,
                                     SmallPtrSet<SDNode*, 32> &NodesLeadingTo);
@@ -198,20 +205,14 @@
     
   SDOperand ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned,
                           SDOperand &Hi);
-  SDOperand ExpandIntToFP(bool isSigned, MVT::ValueType DestTy,
-                          SDOperand Source);
+  SDOperand ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source);
 
-  SDOperand EmitStackConvert(SDOperand SrcOp, MVT::ValueType SlotVT, 
-                             MVT::ValueType DestVT);
+  SDOperand EmitStackConvert(SDOperand SrcOp, MVT SlotVT, MVT DestVT);
   SDOperand ExpandBUILD_VECTOR(SDNode *Node);
   SDOperand ExpandSCALAR_TO_VECTOR(SDNode *Node);
-  SDOperand ExpandLegalINT_TO_FP(bool isSigned,
-                                 SDOperand LegalOp,
-                                 MVT::ValueType DestVT);
-  SDOperand PromoteLegalINT_TO_FP(SDOperand LegalOp, MVT::ValueType DestVT,
-                                  bool isSigned);
-  SDOperand PromoteLegalFP_TO_INT(SDOperand LegalOp, MVT::ValueType DestVT,
-                                  bool isSigned);
+  SDOperand ExpandLegalINT_TO_FP(bool isSigned, SDOperand LegalOp, MVT DestVT);
+  SDOperand PromoteLegalINT_TO_FP(SDOperand LegalOp, MVT DestVT, bool isSigned);
+  SDOperand PromoteLegalFP_TO_INT(SDOperand LegalOp, MVT DestVT, bool isSigned);
 
   SDOperand ExpandBSWAP(SDOperand Op);
   SDOperand ExpandBitCount(unsigned Opc, SDOperand Op);
@@ -231,8 +232,7 @@
 ///
 /// Note that this will also return true for shuffles that are promoted to a
 /// different type.
-SDNode *SelectionDAGLegalize::isShuffleLegal(MVT::ValueType VT, 
-                                             SDOperand Mask) const {
+SDNode *SelectionDAGLegalize::isShuffleLegal(MVT VT, SDOperand Mask) const {
   switch (TLI.getOperationAction(ISD::VECTOR_SHUFFLE, VT)) {
   default: return 0;
   case TargetLowering::Legal:
@@ -241,11 +241,11 @@
   case TargetLowering::Promote: {
     // If this is promoted to a different type, convert the shuffle mask and
     // ask if it is legal in the promoted type!
-    MVT::ValueType NVT = TLI.getTypeToPromoteTo(ISD::VECTOR_SHUFFLE, VT);
+    MVT NVT = TLI.getTypeToPromoteTo(ISD::VECTOR_SHUFFLE, VT);
 
     // If we changed # elements, change the shuffle mask.
     unsigned NumEltsGrowth =
-      MVT::getVectorNumElements(NVT) / MVT::getVectorNumElements(VT);
+      NVT.getVectorNumElements() / VT.getVectorNumElements();
     assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!");
     if (NumEltsGrowth > 1) {
       // Renumber the elements.
@@ -313,8 +313,7 @@
   }
 
   assert(Order.size() == Visited.size() &&
-         Order.size() == 
-         (unsigned)std::distance(DAG.allnodes_begin(), DAG.allnodes_end()) &&
+         Order.size() == DAG.allnodes_size() &&
          "Error: DAG is cyclic!");
 }
 
@@ -451,20 +450,20 @@
 /// HandleOp - Legalize, Promote, or Expand the specified operand as
 /// appropriate for its type.
 void SelectionDAGLegalize::HandleOp(SDOperand Op) {
-  MVT::ValueType VT = Op.getValueType();
+  MVT VT = Op.getValueType();
   switch (getTypeAction(VT)) {
   default: assert(0 && "Bad type action!");
   case Legal:   (void)LegalizeOp(Op); break;
   case Promote: (void)PromoteOp(Op); break;
   case Expand:
-    if (!MVT::isVector(VT)) {
+    if (!VT.isVector()) {
       // If this is an illegal scalar, expand it into its two component
       // pieces.
       SDOperand X, Y;
       if (Op.getOpcode() == ISD::TargetConstant)
         break;  // Allow illegal target nodes.
       ExpandOp(Op, X, Y);
-    } else if (MVT::getVectorNumElements(VT) == 1) {
+    } else if (VT.getVectorNumElements() == 1) {
       // If this is an illegal single element vector, convert it to a
       // scalar operation.
       (void)ScalarizeVectorOp(Op);
@@ -490,7 +489,7 @@
   // double.  This shrinks FP constants and canonicalizes them for targets where
   // an FP extending load is the same cost as a normal load (such as on the x87
   // fp stack or PPC FP unit).
-  MVT::ValueType VT = CFP->getValueType(0);
+  MVT VT = CFP->getValueType(0);
   ConstantFP *LLVMC = ConstantFP::get(CFP->getValueAPF());
   if (!UseCP) {
     if (VT!=MVT::f64 && VT!=MVT::f32)
@@ -499,16 +498,16 @@
                            (VT == MVT::f64) ? MVT::i64 : MVT::i32);
   }
 
-  MVT::ValueType OrigVT = VT;
-  MVT::ValueType SVT = VT;
+  MVT OrigVT = VT;
+  MVT SVT = VT;
   while (SVT != MVT::f32) {
-    SVT = (unsigned)SVT - 1;
+    SVT = (MVT::SimpleValueType)(SVT.getSimpleVT() - 1);
     if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
         // Only do this if the target has a native EXTLOAD instruction from
         // smaller type.
         TLI.isLoadXLegal(ISD::EXTLOAD, SVT) &&
         TLI.ShouldShrinkFPConstant(OrigVT)) {
-      const Type *SType = MVT::getTypeForValueType(SVT);
+      const Type *SType = SVT.getTypeForMVT();
       LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
       VT = SVT;
       Extend = true;
@@ -528,13 +527,13 @@
 /// ExpandFCOPYSIGNToBitwiseOps - Expands fcopysign to a series of bitwise
 /// operations.
 static
-SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT::ValueType NVT,
+SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT NVT,
                                       SelectionDAG &DAG, TargetLowering &TLI) {
-  MVT::ValueType VT = Node->getValueType(0);
-  MVT::ValueType SrcVT = Node->getOperand(1).getValueType();
+  MVT VT = Node->getValueType(0);
+  MVT SrcVT = Node->getOperand(1).getValueType();
   assert((SrcVT == MVT::f32 || SrcVT == MVT::f64) &&
          "fcopysign expansion only supported for f32 and f64");
-  MVT::ValueType SrcNVT = (SrcVT == MVT::f64) ? MVT::i64 : MVT::i32;
+  MVT SrcNVT = (SrcVT == MVT::f64) ? MVT::i64 : MVT::i32;
 
   // First get the sign bit of second operand.
   SDOperand Mask1 = (SrcVT == MVT::f64)
@@ -544,7 +543,7 @@
   SDOperand SignBit= DAG.getNode(ISD::BIT_CONVERT, SrcNVT, Node->getOperand(1));
   SignBit = DAG.getNode(ISD::AND, SrcNVT, SignBit, Mask1);
   // Shift right or sign-extend it if the two operands have different types.
-  int SizeDiff = MVT::getSizeInBits(SrcNVT) - MVT::getSizeInBits(NVT);
+  int SizeDiff = SrcNVT.getSizeInBits() - NVT.getSizeInBits();
   if (SizeDiff > 0) {
     SignBit = DAG.getNode(ISD::SRL, SrcNVT, SignBit,
                           DAG.getConstant(SizeDiff, TLI.getShiftAmountTy()));
@@ -572,17 +571,17 @@
   SDOperand Chain = ST->getChain();
   SDOperand Ptr = ST->getBasePtr();
   SDOperand Val = ST->getValue();
-  MVT::ValueType VT = Val.getValueType();
+  MVT VT = Val.getValueType();
   int Alignment = ST->getAlignment();
   int SVOffset = ST->getSrcValueOffset();
-  if (MVT::isFloatingPoint(ST->getMemoryVT()) || 
-      MVT::isVector(ST->getMemoryVT())) {
+  if (ST->getMemoryVT().isFloatingPoint() ||
+      ST->getMemoryVT().isVector()) {
     // Expand to a bitconvert of the value to the integer type of the 
     // same size, then a (misaligned) int store.
-    MVT::ValueType intVT;
-    if (MVT::is128BitVector(VT) || VT == MVT::ppcf128 || VT == MVT::f128)
+    MVT intVT;
+    if (VT.is128BitVector() || VT == MVT::ppcf128 || VT == MVT::f128)
       intVT = MVT::i128;
-    else if (MVT::is64BitVector(VT) || VT==MVT::f64)
+    else if (VT.is64BitVector() || VT==MVT::f64)
       intVT = MVT::i64;
     else if (VT==MVT::f32)
       intVT = MVT::i32;
@@ -593,12 +592,13 @@
     return DAG.getStore(Chain, Result, Ptr, ST->getSrcValue(),
                         SVOffset, ST->isVolatile(), Alignment);
   }
-  assert(MVT::isInteger(ST->getMemoryVT()) &&
-         !MVT::isVector(ST->getMemoryVT()) &&
+  assert(ST->getMemoryVT().isInteger() &&
+         !ST->getMemoryVT().isVector() &&
          "Unaligned store of unknown type.");
   // Get the half-size VT
-  MVT::ValueType NewStoredVT = ST->getMemoryVT() - 1;
-  int NumBits = MVT::getSizeInBits(NewStoredVT);
+  MVT NewStoredVT =
+    (MVT::SimpleValueType)(ST->getMemoryVT().getSimpleVT() - 1);
+  int NumBits = NewStoredVT.getSizeInBits();
   int IncrementSize = NumBits / 8;
 
   // Divide the stored value in two parts.
@@ -628,16 +628,16 @@
   int SVOffset = LD->getSrcValueOffset();
   SDOperand Chain = LD->getChain();
   SDOperand Ptr = LD->getBasePtr();
-  MVT::ValueType VT = LD->getValueType(0);
-  MVT::ValueType LoadedVT = LD->getMemoryVT();
-  if (MVT::isFloatingPoint(VT) || MVT::isVector(VT)) {
+  MVT VT = LD->getValueType(0);
+  MVT LoadedVT = LD->getMemoryVT();
+  if (VT.isFloatingPoint() || VT.isVector()) {
     // Expand to a (misaligned) integer load of the same size,
     // then bitconvert to floating point or vector.
-    MVT::ValueType intVT;
-    if (MVT::is128BitVector(LoadedVT) || 
+    MVT intVT;
+    if (LoadedVT.is128BitVector() ||
          LoadedVT == MVT::ppcf128 || LoadedVT == MVT::f128)
       intVT = MVT::i128;
-    else if (MVT::is64BitVector(LoadedVT) || LoadedVT == MVT::f64)
+    else if (LoadedVT.is64BitVector() || LoadedVT == MVT::f64)
       intVT = MVT::i64;
     else if (LoadedVT == MVT::f32)
       intVT = MVT::i32;
@@ -648,21 +648,20 @@
                                     SVOffset, LD->isVolatile(), 
                                     LD->getAlignment());
     SDOperand Result = DAG.getNode(ISD::BIT_CONVERT, LoadedVT, newLoad);
-    if (MVT::isFloatingPoint(VT) && LoadedVT != VT)
+    if (VT.isFloatingPoint() && LoadedVT != VT)
       Result = DAG.getNode(ISD::FP_EXTEND, VT, Result);
 
     SDOperand Ops[] = { Result, Chain };
-    return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other), 
-                       Ops, 2);
+    return DAG.getMergeValues(Ops, 2);
   }
-  assert(MVT::isInteger(LoadedVT) && !MVT::isVector(LoadedVT) &&
+  assert(LoadedVT.isInteger() && !LoadedVT.isVector() &&
          "Unaligned load of unsupported type.");
 
   // Compute the new VT that is half the size of the old one.  This is an
   // integer MVT.
-  unsigned NumBits = MVT::getSizeInBits(LoadedVT);
-  MVT::ValueType NewLoadedVT;
-  NewLoadedVT = MVT::getIntegerType(NumBits/2);
+  unsigned NumBits = LoadedVT.getSizeInBits();
+  MVT NewLoadedVT;
+  NewLoadedVT = MVT::getIntegerVT(NumBits/2);
   NumBits >>= 1;
   
   unsigned Alignment = LD->getAlignment();
@@ -702,7 +701,7 @@
                              Hi.getValue(1));
 
   SDOperand Ops[] = { Result, TF };
-  return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other), Ops, 2);
+  return DAG.getMergeValues(Ops, 2);
 }
 
 /// UnrollVectorOp - We know that the given vector has a legal type, however
@@ -710,23 +709,23 @@
 /// no way of lowering.  "Unroll" the vector, splitting out the scalars and
 /// operating on each element individually.
 SDOperand SelectionDAGLegalize::UnrollVectorOp(SDOperand Op) {
-  MVT::ValueType VT = Op.getValueType();
+  MVT VT = Op.getValueType();
   assert(isTypeLegal(VT) &&
          "Caller should expand or promote operands that are not legal!");
   assert(Op.Val->getNumValues() == 1 &&
          "Can't unroll a vector with multiple results!");
-  unsigned NE = MVT::getVectorNumElements(VT);
-  MVT::ValueType EltVT = MVT::getVectorElementType(VT);
+  unsigned NE = VT.getVectorNumElements();
+  MVT EltVT = VT.getVectorElementType();
 
   SmallVector<SDOperand, 8> Scalars;
   SmallVector<SDOperand, 4> Operands(Op.getNumOperands());
   for (unsigned i = 0; i != NE; ++i) {
     for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
       SDOperand Operand = Op.getOperand(j);
-      MVT::ValueType OperandVT = Operand.getValueType();
-      if (MVT::isVector(OperandVT)) {
+      MVT OperandVT = Operand.getValueType();
+      if (OperandVT.isVector()) {
         // A vector operand; extract a single element.
-        MVT::ValueType OperandEltVT = MVT::getVectorElementType(OperandVT);
+        MVT OperandEltVT = OperandVT.getVectorElementType();
         Operands[j] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT,
                                   OperandEltVT,
                                   Operand,
@@ -744,7 +743,7 @@
 }
 
 /// GetFPLibCall - Return the right libcall for the given floating point type.
-static RTLIB::Libcall GetFPLibCall(MVT::ValueType VT,
+static RTLIB::Libcall GetFPLibCall(MVT VT,
                                    RTLIB::Libcall Call_F32,
                                    RTLIB::Libcall Call_F64,
                                    RTLIB::Libcall Call_F80,
@@ -757,6 +756,50 @@
     RTLIB::UNKNOWN_LIBCALL;
 }
 
+/// PerformInsertVectorEltInMemory - Some target cannot handle a variable
+/// insertion index for the INSERT_VECTOR_ELT instruction.  In this case, it
+/// is necessary to spill the vector being inserted into to memory, perform
+/// the insert there, and then read the result back.
+SDOperand SelectionDAGLegalize::
+PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val, SDOperand Idx) {
+  SDOperand Tmp1 = Vec;
+  SDOperand Tmp2 = Val;
+  SDOperand Tmp3 = Idx;
+  
+  // If the target doesn't support this, we have to spill the input vector
+  // to a temporary stack slot, update the element, then reload it.  This is
+  // badness.  We could also load the value into a vector register (either
+  // with a "move to register" or "extload into register" instruction, then
+  // permute it into place, if the idx is a constant and if the idx is
+  // supported by the target.
+  MVT VT    = Tmp1.getValueType();
+  MVT EltVT = VT.getVectorElementType();
+  MVT IdxVT = Tmp3.getValueType();
+  MVT PtrVT = TLI.getPointerTy();
+  SDOperand StackPtr = DAG.CreateStackTemporary(VT);
+
+  FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr.Val);
+  int SPFI = StackPtrFI->getIndex();
+
+  // Store the vector.
+  SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Tmp1, StackPtr,
+                              PseudoSourceValue::getFixedStack(),
+                              SPFI);
+
+  // Truncate or zero extend offset to target pointer type.
+  unsigned CastOpc = IdxVT.bitsGT(PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
+  Tmp3 = DAG.getNode(CastOpc, PtrVT, Tmp3);
+  // Add the offset to the index.
+  unsigned EltSize = EltVT.getSizeInBits()/8;
+  Tmp3 = DAG.getNode(ISD::MUL, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT));
+  SDOperand StackPtr2 = DAG.getNode(ISD::ADD, IdxVT, Tmp3, StackPtr);
+  // Store the scalar value.
+  Ch = DAG.getTruncStore(Ch, Tmp2, StackPtr2,
+                         PseudoSourceValue::getFixedStack(), SPFI, EltVT);
+  // Load the updated vector.
+  return DAG.getLoad(VT, Ch, StackPtr, PseudoSourceValue::getFixedStack(),SPFI);
+}
+
 /// LegalizeOp - We know that the specified value has a legal type, and
 /// that its operands are legal.  Now ensure that the operation itself
 /// is legal, recursively ensuring that the operands' operations remain
@@ -806,7 +849,6 @@
   case ISD::VALUETYPE:
   case ISD::SRCVALUE:
   case ISD::MEMOPERAND:
-  case ISD::STRING:
   case ISD::CONDCODE:
   case ISD::ARG_FLAGS:
     // Primitives must all be legal.
@@ -860,7 +902,7 @@
       Result = DAG.getConstant(0, TLI.getPointerTy());
     break;
   case ISD::FRAME_TO_ARGS_OFFSET: {
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
     switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
     default: assert(0 && "This action is not supported yet!");
     case TargetLowering::Custom:
@@ -875,7 +917,7 @@
     break;
   case ISD::EXCEPTIONADDR: {
     Tmp1 = LegalizeOp(Node->getOperand(0));
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
     switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
     default: assert(0 && "This action is not supported yet!");
     case TargetLowering::Expand: {
@@ -889,8 +931,7 @@
       // Fall Thru
     case TargetLowering::Legal: {
       SDOperand Ops[] = { DAG.getConstant(0, VT), Tmp1 };
-      Result = DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other),
-                           Ops, 2);
+      Result = DAG.getMergeValues(Ops, 2);
       break;
     }
     }
@@ -910,7 +951,7 @@
   case ISD::EHSELECTION: {
     Tmp1 = LegalizeOp(Node->getOperand(0));
     Tmp2 = LegalizeOp(Node->getOperand(1));
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
     switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
     default: assert(0 && "This action is not supported yet!");
     case TargetLowering::Expand: {
@@ -924,8 +965,7 @@
       // Fall Thru
     case TargetLowering::Legal: {
       SDOperand Ops[] = { DAG.getConstant(0, VT), Tmp2 };
-      Result = DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other),
-                           Ops, 2);
+      Result = DAG.getMergeValues(Ops, 2);
       break;
     }
     }
@@ -943,7 +983,7 @@
     AddLegalizedOperand(Op.getValue(1), Tmp2);
     return Op.ResNo ? Tmp2 : Tmp1;
   case ISD::EH_RETURN: {
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
     // The only "good" option for this node is to custom lower it.
     switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
     default: assert(0 && "This action is not supported at all!");
@@ -988,14 +1028,14 @@
     AddLegalizedOperand(Op.getValue(1), Result.getValue(1));
     return Result.getValue(Op.ResNo);
   case ISD::UNDEF: {
-    MVT::ValueType VT = Op.getValueType();
+    MVT VT = Op.getValueType();
     switch (TLI.getOperationAction(ISD::UNDEF, VT)) {
     default: assert(0 && "This action is not supported yet!");
     case TargetLowering::Expand:
-      if (MVT::isInteger(VT))
+      if (VT.isInteger())
         Result = DAG.getConstant(0, VT);
-      else if (MVT::isFloatingPoint(VT))
-        Result = DAG.getConstantFP(APFloat(APInt(MVT::getSizeInBits(VT), 0)),
+      else if (VT.isFloatingPoint())
+        Result = DAG.getConstantFP(APFloat(APInt(VT.getSizeInBits(), 0)),
                                    VT);
       else
         assert(0 && "Unknown value type!");
@@ -1034,42 +1074,36 @@
     return Result.getValue(Op.ResNo);
   }    
 
-  case ISD::LOCATION:
-    assert(Node->getNumOperands() == 5 && "Invalid LOCATION node!");
+  case ISD::DBG_STOPPOINT:
+    assert(Node->getNumOperands() == 1 && "Invalid DBG_STOPPOINT node!");
     Tmp1 = LegalizeOp(Node->getOperand(0));  // Legalize the input chain.
     
-    switch (TLI.getOperationAction(ISD::LOCATION, MVT::Other)) {
+    switch (TLI.getOperationAction(ISD::DBG_STOPPOINT, MVT::Other)) {
     case TargetLowering::Promote:
     default: assert(0 && "This action is not supported yet!");
     case TargetLowering::Expand: {
       MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
       bool useDEBUG_LOC = TLI.isOperationLegal(ISD::DEBUG_LOC, MVT::Other);
-      bool useLABEL = TLI.isOperationLegal(ISD::LABEL, MVT::Other);
+      bool useLABEL = TLI.isOperationLegal(ISD::DBG_LABEL, MVT::Other);
       
+      const DbgStopPointSDNode *DSP = cast<DbgStopPointSDNode>(Node);
       if (MMI && (useDEBUG_LOC || useLABEL)) {
-        const std::string &FName =
-          cast<StringSDNode>(Node->getOperand(3))->getValue();
-        const std::string &DirName = 
-          cast<StringSDNode>(Node->getOperand(4))->getValue();
-        unsigned SrcFile = MMI->RecordSource(DirName, FName);
+        const CompileUnitDesc *CompileUnit = DSP->getCompileUnit();
+        unsigned SrcFile = MMI->RecordSource(CompileUnit);
 
-        SmallVector<SDOperand, 8> Ops;
-        Ops.push_back(Tmp1);  // chain
-        SDOperand LineOp = Node->getOperand(1);
-        SDOperand ColOp = Node->getOperand(2);
+        unsigned Line = DSP->getLine();
+        unsigned Col = DSP->getColumn();
         
         if (useDEBUG_LOC) {
-          Ops.push_back(LineOp);  // line #
-          Ops.push_back(ColOp);  // col #
+          SmallVector<SDOperand, 8> Ops;
+          Ops.push_back(Tmp1);  // chain
+          Ops.push_back(DAG.getConstant(Line, MVT::i32));  // line #
+          Ops.push_back(DAG.getConstant(Col, MVT::i32));   // col #
           Ops.push_back(DAG.getConstant(SrcFile, MVT::i32));  // source file id
           Result = DAG.getNode(ISD::DEBUG_LOC, MVT::Other, &Ops[0], Ops.size());
         } else {
-          unsigned Line = cast<ConstantSDNode>(LineOp)->getValue();
-          unsigned Col = cast<ConstantSDNode>(ColOp)->getValue();
           unsigned ID = MMI->RecordSourceLine(Line, Col, SrcFile);
-          Ops.push_back(DAG.getConstant(ID, MVT::i32));
-          Ops.push_back(DAG.getConstant(0, MVT::i32)); // a debug label
-          Result = DAG.getNode(ISD::LABEL, MVT::Other, &Ops[0], Ops.size());
+          Result = DAG.getLabel(ISD::DBG_LABEL, Tmp1, ID);
         }
       } else {
         Result = Tmp1;  // chain
@@ -1127,15 +1161,14 @@
     }
     break;    
 
-  case ISD::LABEL:
-    assert(Node->getNumOperands() == 3 && "Invalid LABEL node!");
-    switch (TLI.getOperationAction(ISD::LABEL, MVT::Other)) {
+  case ISD::DBG_LABEL:
+  case ISD::EH_LABEL:
+    assert(Node->getNumOperands() == 1 && "Invalid LABEL node!");
+    switch (TLI.getOperationAction(Node->getOpcode(), MVT::Other)) {
     default: assert(0 && "This action is not supported yet!");
     case TargetLowering::Legal:
       Tmp1 = LegalizeOp(Node->getOperand(0));  // Legalize the chain.
-      Tmp2 = LegalizeOp(Node->getOperand(1));  // Legalize the label id.
-      Tmp3 = LegalizeOp(Node->getOperand(2));  // Legalize the "flavor" operand.
-      Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, Tmp3);
+      Result = DAG.UpdateNodeOperands(Result, Tmp1);
       break;
     case TargetLowering::Expand:
       Result = LegalizeOp(Node->getOperand(0));
@@ -1184,24 +1217,52 @@
     break;
   }
 
-  case ISD::ATOMIC_LCS:
-  case ISD::ATOMIC_LAS:
-  case ISD::ATOMIC_SWAP: {
-    assert(((Node->getNumOperands() == 4 && Node->getOpcode() == ISD::ATOMIC_LCS) ||
-            (Node->getNumOperands() == 3 && Node->getOpcode() == ISD::ATOMIC_LAS) ||
-            (Node->getNumOperands() == 3 && Node->getOpcode() == ISD::ATOMIC_SWAP)) &&
-           "Invalid Atomic node!");
-    int num = Node->getOpcode() == ISD::ATOMIC_LCS ? 4 : 3;
+  case ISD::ATOMIC_CMP_SWAP: {
+    unsigned int num_operands = 4;
+    assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!");
     SDOperand Ops[4];
-    for (int x = 0; x < num; ++x)
+    for (unsigned int x = 0; x < num_operands; ++x)
       Ops[x] = LegalizeOp(Node->getOperand(x));
-    Result = DAG.UpdateNodeOperands(Result, &Ops[0], num);
+    Result = DAG.UpdateNodeOperands(Result, &Ops[0], num_operands);
     
     switch (TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0))) {
+      default: assert(0 && "This action is not supported yet!");
+      case TargetLowering::Custom:
+        Result = TLI.LowerOperation(Result, DAG);
+        break;
+      case TargetLowering::Legal:
+        break;
+    }
+    AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0));
+    AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1));
+    return Result.getValue(Op.ResNo);
+  }
+  case ISD::ATOMIC_LOAD_ADD:
+  case ISD::ATOMIC_LOAD_SUB:
+  case ISD::ATOMIC_LOAD_AND:
+  case ISD::ATOMIC_LOAD_OR:
+  case ISD::ATOMIC_LOAD_XOR:
+  case ISD::ATOMIC_LOAD_NAND:
+  case ISD::ATOMIC_LOAD_MIN:
+  case ISD::ATOMIC_LOAD_MAX:
+  case ISD::ATOMIC_LOAD_UMIN:
+  case ISD::ATOMIC_LOAD_UMAX:
+  case ISD::ATOMIC_SWAP: {
+    unsigned int num_operands = 3;
+    assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!");
+    SDOperand Ops[3];
+    for (unsigned int x = 0; x < num_operands; ++x)
+      Ops[x] = LegalizeOp(Node->getOperand(x));
+    Result = DAG.UpdateNodeOperands(Result, &Ops[0], num_operands);
+
+    switch (TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0))) {
     default: assert(0 && "This action is not supported yet!");
     case TargetLowering::Custom:
       Result = TLI.LowerOperation(Result, DAG);
       break;
+    case TargetLowering::Expand:
+      Result = SDOperand(TLI.ReplaceNodeResults(Op.Val, DAG),0);
+      break;
     case TargetLowering::Legal:
       break;
     }
@@ -1209,7 +1270,6 @@
     AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1));
     return Result.getValue(Op.ResNo);
   }
-
   case ISD::Constant: {
     ConstantSDNode *CN = cast<ConstantSDNode>(Node);
     unsigned opAction =
@@ -1377,13 +1437,14 @@
         // SCALAR_TO_VECTOR requires that the type of the value being inserted
         // match the element type of the vector being created.
         if (Tmp2.getValueType() == 
-            MVT::getVectorElementType(Op.getValueType())) {
+            Op.getValueType().getVectorElementType()) {
           SDOperand ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, 
                                         Tmp1.getValueType(), Tmp2);
           
-          unsigned NumElts = MVT::getVectorNumElements(Tmp1.getValueType());
-          MVT::ValueType ShufMaskVT = MVT::getIntVectorWithNumElements(NumElts);
-          MVT::ValueType ShufMaskEltVT = MVT::getVectorElementType(ShufMaskVT);
+          unsigned NumElts = Tmp1.getValueType().getVectorNumElements();
+          MVT ShufMaskVT =
+            MVT::getIntVectorWithNumElements(NumElts);
+          MVT ShufMaskEltVT = ShufMaskVT.getVectorElementType();
           
           // We generate a shuffle of InVec and ScVec, so the shuffle mask
           // should be 0,1,2,3,4,5... with the appropriate element replaced with
@@ -1404,40 +1465,7 @@
           break;
         }
       }
-      
-      // If the target doesn't support this, we have to spill the input vector
-      // to a temporary stack slot, update the element, then reload it.  This is
-      // badness.  We could also load the value into a vector register (either
-      // with a "move to register" or "extload into register" instruction, then
-      // permute it into place, if the idx is a constant and if the idx is
-      // supported by the target.
-      MVT::ValueType VT    = Tmp1.getValueType();
-      MVT::ValueType EltVT = MVT::getVectorElementType(VT);
-      MVT::ValueType IdxVT = Tmp3.getValueType();
-      MVT::ValueType PtrVT = TLI.getPointerTy();
-      SDOperand StackPtr = DAG.CreateStackTemporary(VT);
-
-      FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr.Val);
-      int SPFI = StackPtrFI->getIndex();
-
-      // Store the vector.
-      SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Tmp1, StackPtr,
-                                  PseudoSourceValue::getFixedStack(),
-                                  SPFI);
-
-      // Truncate or zero extend offset to target pointer type.
-      unsigned CastOpc = (IdxVT > PtrVT) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
-      Tmp3 = DAG.getNode(CastOpc, PtrVT, Tmp3);
-      // Add the offset to the index.
-      unsigned EltSize = MVT::getSizeInBits(EltVT)/8;
-      Tmp3 = DAG.getNode(ISD::MUL, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT));
-      SDOperand StackPtr2 = DAG.getNode(ISD::ADD, IdxVT, Tmp3, StackPtr);
-      // Store the scalar value.
-      Ch = DAG.getTruncStore(Ch, Tmp2, StackPtr2,
-                             PseudoSourceValue::getFixedStack(), SPFI, EltVT);
-      // Load the updated vector.
-      Result = DAG.getLoad(VT, Ch, StackPtr,
-                           PseudoSourceValue::getFixedStack(), SPFI);
+      Result = PerformInsertVectorEltInMemory(Tmp1, Tmp2, Tmp3);
       break;
     }
     }
@@ -1487,9 +1515,9 @@
       }
       // FALLTHROUGH
     case TargetLowering::Expand: {
-      MVT::ValueType VT = Node->getValueType(0);
-      MVT::ValueType EltVT = MVT::getVectorElementType(VT);
-      MVT::ValueType PtrVT = TLI.getPointerTy();
+      MVT VT = Node->getValueType(0);
+      MVT EltVT = VT.getVectorElementType();
+      MVT PtrVT = TLI.getPointerTy();
       SDOperand Mask = Node->getOperand(2);
       unsigned NumElems = Mask.getNumOperands();
       SmallVector<SDOperand,8> Ops;
@@ -1513,8 +1541,8 @@
     }
     case TargetLowering::Promote: {
       // Change base type to a different vector type.
-      MVT::ValueType OVT = Node->getValueType(0);
-      MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
+      MVT OVT = Node->getValueType(0);
+      MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
 
       // Cast the two input vectors.
       Tmp1 = DAG.getNode(ISD::BIT_CONVERT, NVT, Tmp1);
@@ -1584,7 +1612,6 @@
     // process, no libcalls can/will be inserted, guaranteeing that no calls
     // can overlap.
     assert(!IsLegalizingCall && "Inconsistent sequentialization of calls!");
-    SDOperand InCallSEQ = LastCALLSEQ_END;
     // Note that we are selecting this call!
     LastCALLSEQ_END = SDOperand(CallEnd, 0);
     IsLegalizingCall = true;
@@ -1636,7 +1663,7 @@
       AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1));
     return Result.getValue(Op.ResNo);
   case ISD::DYNAMIC_STACKALLOC: {
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
     Tmp1 = LegalizeOp(Node->getOperand(0));  // Legalize the chain.
     Tmp2 = LegalizeOp(Node->getOperand(1));  // Legalize the size.
     Tmp3 = LegalizeOp(Node->getOperand(2));  // Legalize the alignment.
@@ -1777,7 +1804,7 @@
       SDOperand Table = Result.getOperand(1);
       SDOperand Index = Result.getOperand(2);
 
-      MVT::ValueType PTy = TLI.getPointerTy();
+      MVT PTy = TLI.getPointerTy();
       MachineFunction &MF = DAG.getMachineFunction();
       unsigned EntrySize = MF.getJumpTableInfo()->getEntrySize();
       Index= DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(EntrySize, PTy));
@@ -1896,7 +1923,7 @@
 
     ISD::LoadExtType ExtType = LD->getExtensionType();
     if (ExtType == ISD::NON_EXTLOAD) {
-      MVT::ValueType VT = Node->getValueType(0);
+      MVT VT = Node->getValueType(0);
       Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
       Tmp3 = Result.getValue(0);
       Tmp4 = Result.getValue(1);
@@ -1908,7 +1935,7 @@
         // expand it.
         if (!TLI.allowsUnalignedMemoryAccesses()) {
           unsigned ABIAlignment = TLI.getTargetData()->
-            getABITypeAlignment(MVT::getTypeForValueType(LD->getMemoryVT()));
+            getABITypeAlignment(LD->getMemoryVT().getTypeForMVT());
           if (LD->getAlignment() < ABIAlignment){
             Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG,
                                          TLI);
@@ -1928,9 +1955,9 @@
         break;
       case TargetLowering::Promote: {
         // Only promote a load of vector type to another.
-        assert(MVT::isVector(VT) && "Cannot promote this load!");
+        assert(VT.isVector() && "Cannot promote this load!");
         // Change base type to a different vector type.
-        MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
+        MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
 
         Tmp1 = DAG.getLoad(NVT, Tmp1, Tmp2, LD->getSrcValue(),
                            LD->getSrcValueOffset(),
@@ -1946,13 +1973,13 @@
       AddLegalizedOperand(SDOperand(Node, 1), Tmp4);
       return Op.ResNo ? Tmp4 : Tmp3;
     } else {
-      MVT::ValueType SrcVT = LD->getMemoryVT();
-      unsigned SrcWidth = MVT::getSizeInBits(SrcVT);
+      MVT SrcVT = LD->getMemoryVT();
+      unsigned SrcWidth = SrcVT.getSizeInBits();
       int SVOffset = LD->getSrcValueOffset();
       unsigned Alignment = LD->getAlignment();
       bool isVolatile = LD->isVolatile();
 
-      if (SrcWidth != MVT::getStoreSizeInBits(SrcVT) &&
+      if (SrcWidth != SrcVT.getStoreSizeInBits() &&
           // Some targets pretend to have an i1 loading operation, and actually
           // load an i8.  This trick is correct for ZEXTLOAD because the top 7
           // bits are guaranteed to be zero; it helps the optimizers understand
@@ -1964,8 +1991,8 @@
            TLI.getLoadXAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
         // Promote to a byte-sized load if not loading an integral number of
         // bytes.  For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
-        unsigned NewWidth = MVT::getStoreSizeInBits(SrcVT);
-        MVT::ValueType NVT = MVT::getIntegerType(NewWidth);
+        unsigned NewWidth = SrcVT.getStoreSizeInBits();
+        MVT NVT = MVT::getIntegerVT(NewWidth);
         SDOperand Ch;
 
         // The extra bits are guaranteed to be zero, since we stored them that
@@ -1993,7 +2020,7 @@
         Tmp2 = LegalizeOp(Ch);
       } else if (SrcWidth & (SrcWidth - 1)) {
         // If not loading a power-of-2 number of bits, expand as two loads.
-        assert(MVT::isExtendedVT(SrcVT) && !MVT::isVector(SrcVT) &&
+        assert(SrcVT.isExtended() && !SrcVT.isVector() &&
                "Unsupported extload!");
         unsigned RoundWidth = 1 << Log2_32(SrcWidth);
         assert(RoundWidth < SrcWidth);
@@ -2001,8 +2028,8 @@
         assert(ExtraWidth < RoundWidth);
         assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
                "Load size not an integral number of bytes!");
-        MVT::ValueType RoundVT = MVT::getIntegerType(RoundWidth);
-        MVT::ValueType ExtraVT = MVT::getIntegerType(ExtraWidth);
+        MVT RoundVT = MVT::getIntegerVT(RoundWidth);
+        MVT ExtraVT = MVT::getIntegerVT(ExtraWidth);
         SDOperand Lo, Hi, Ch;
         unsigned IncrementSize;
 
@@ -2087,7 +2114,7 @@
             // expand it.
             if (!TLI.allowsUnalignedMemoryAccesses()) {
               unsigned ABIAlignment = TLI.getTargetData()->
-                getABITypeAlignment(MVT::getTypeForValueType(LD->getMemoryVT()));
+                getABITypeAlignment(LD->getMemoryVT().getTypeForMVT());
               if (LD->getAlignment() < ABIAlignment){
                 Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG,
                                              TLI);
@@ -2137,14 +2164,14 @@
     }
   }
   case ISD::EXTRACT_ELEMENT: {
-    MVT::ValueType OpTy = Node->getOperand(0).getValueType();
+    MVT OpTy = Node->getOperand(0).getValueType();
     switch (getTypeAction(OpTy)) {
     default: assert(0 && "EXTRACT_ELEMENT action for type unimplemented!");
     case Legal:
       if (cast<ConstantSDNode>(Node->getOperand(1))->getValue()) {
         // 1 -> Hi
         Result = DAG.getNode(ISD::SRL, OpTy, Node->getOperand(0),
-                             DAG.getConstant(MVT::getSizeInBits(OpTy)/2, 
+                             DAG.getConstant(OpTy.getSizeInBits()/2,
                                              TLI.getShiftAmountTy()));
         Result = DAG.getNode(ISD::TRUNCATE, Node->getValueType(0), Result);
       } else {
@@ -2209,7 +2236,7 @@
         Result = DAG.UpdateNodeOperands(Result, Tmp1, LegalizeOp(Tmp2), Tmp3);
         break;
       case Expand:
-        if (!MVT::isVector(Tmp2.getValueType())) {
+        if (!Tmp2.getValueType().isVector()) {
           SDOperand Lo, Hi;
           ExpandOp(Tmp2, Lo, Hi);
 
@@ -2225,12 +2252,12 @@
         } else {
           SDNode *InVal = Tmp2.Val;
           int InIx = Tmp2.ResNo;
-          unsigned NumElems = MVT::getVectorNumElements(InVal->getValueType(InIx));
-          MVT::ValueType EVT = MVT::getVectorElementType(InVal->getValueType(InIx));
+          unsigned NumElems = InVal->getValueType(InIx).getVectorNumElements();
+          MVT EVT = InVal->getValueType(InIx).getVectorElementType();
           
           // Figure out if there is a simple type corresponding to this Vector
           // type.  If so, convert to the vector type.
-          MVT::ValueType TVT = MVT::getVectorType(EVT, NumElems);
+          MVT TVT = MVT::getVectorVT(EVT, NumElems);
           if (TLI.isTypeLegal(TVT)) {
             // Turn this into a return of the vector type.
             Tmp2 = LegalizeOp(Tmp2);
@@ -2278,7 +2305,7 @@
           break;
         case Expand: {
           SDOperand Lo, Hi;
-          assert(!MVT::isExtendedVT(Node->getOperand(i).getValueType()) &&
+          assert(!Node->getOperand(i).getValueType().isExtended() &&
                  "FIXME: TODO: implement returning non-legal vector types!");
           ExpandOp(Node->getOperand(i), Lo, Hi);
           NewValues.push_back(Lo);
@@ -2346,7 +2373,7 @@
             Result = DAG.getStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
                                   SVOffset, isVolatile, Alignment);
             break;
-          } else if (getTypeAction(MVT::i32) == Legal) {
+          } else if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) {
             // Otherwise, if the target supports 32-bit registers, use 2 32-bit
             // stores.  If the target supports neither 32- nor 64-bits, this
             // xform is certainly not worth it.
@@ -2374,7 +2401,7 @@
         Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2, 
                                         ST->getOffset());
 
-        MVT::ValueType VT = Tmp3.getValueType();
+        MVT VT = Tmp3.getValueType();
         switch (TLI.getOperationAction(ISD::STORE, VT)) {
         default: assert(0 && "This action is not supported yet!");
         case TargetLowering::Legal:
@@ -2382,7 +2409,7 @@
           // expand it.
           if (!TLI.allowsUnalignedMemoryAccesses()) {
             unsigned ABIAlignment = TLI.getTargetData()->
-              getABITypeAlignment(MVT::getTypeForValueType(ST->getMemoryVT()));
+              getABITypeAlignment(ST->getMemoryVT().getTypeForMVT());
             if (ST->getAlignment() < ABIAlignment)
               Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.Val), DAG,
                                             TLI);
@@ -2393,7 +2420,7 @@
           if (Tmp1.Val) Result = Tmp1;
           break;
         case TargetLowering::Promote:
-          assert(MVT::isVector(VT) && "Unknown legal promote case!");
+          assert(VT.isVector() && "Unknown legal promote case!");
           Tmp3 = DAG.getNode(ISD::BIT_CONVERT, 
                              TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3);
           Result = DAG.getStore(Tmp1, Tmp3, Tmp2,
@@ -2418,16 +2445,16 @@
         // If this is a vector type, then we have to calculate the increment as
         // the product of the element size in bytes, and the number of elements
         // in the high half of the vector.
-        if (MVT::isVector(ST->getValue().getValueType())) {
+        if (ST->getValue().getValueType().isVector()) {
           SDNode *InVal = ST->getValue().Val;
           int InIx = ST->getValue().ResNo;
-          MVT::ValueType InVT = InVal->getValueType(InIx);
-          unsigned NumElems = MVT::getVectorNumElements(InVT);
-          MVT::ValueType EVT = MVT::getVectorElementType(InVT);
+          MVT InVT = InVal->getValueType(InIx);
+          unsigned NumElems = InVT.getVectorNumElements();
+          MVT EVT = InVT.getVectorElementType();
 
           // Figure out if there is a simple type corresponding to this Vector
           // type.  If so, convert to the vector type.
-          MVT::ValueType TVT = MVT::getVectorType(EVT, NumElems);
+          MVT TVT = MVT::getVectorVT(EVT, NumElems);
           if (TLI.isTypeLegal(TVT)) {
             // Turn this into a normal store of the vector type.
             Tmp3 = LegalizeOp(ST->getValue());
@@ -2446,12 +2473,12 @@
             break;
           } else {
             SplitVectorOp(ST->getValue(), Lo, Hi);
-            IncrementSize = MVT::getVectorNumElements(Lo.Val->getValueType(0)) * 
-                            MVT::getSizeInBits(EVT)/8;
+            IncrementSize = Lo.Val->getValueType(0).getVectorNumElements() *
+                            EVT.getSizeInBits()/8;
           }
         } else {
           ExpandOp(ST->getValue(), Lo, Hi);
-          IncrementSize = Hi.Val ? MVT::getSizeInBits(Hi.getValueType())/8 : 0;
+          IncrementSize = Hi.Val ? Hi.getValueType().getSizeInBits()/8 : 0;
 
           if (TLI.isBigEndian())
             std::swap(Lo, Hi);
@@ -2494,20 +2521,20 @@
                                  SVOffset, MVT::i8, isVolatile, Alignment);
       }
 
-      MVT::ValueType StVT = ST->getMemoryVT();
-      unsigned StWidth = MVT::getSizeInBits(StVT);
+      MVT StVT = ST->getMemoryVT();
+      unsigned StWidth = StVT.getSizeInBits();
 
-      if (StWidth != MVT::getStoreSizeInBits(StVT)) {
+      if (StWidth != StVT.getStoreSizeInBits()) {
         // Promote to a byte-sized store with upper bits zero if not
         // storing an integral number of bytes.  For example, promote
         // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
-        MVT::ValueType NVT = MVT::getIntegerType(MVT::getStoreSizeInBits(StVT));
+        MVT NVT = MVT::getIntegerVT(StVT.getStoreSizeInBits());
         Tmp3 = DAG.getZeroExtendInReg(Tmp3, StVT);
         Result = DAG.getTruncStore(Tmp1, Tmp3, Tmp2, ST->getSrcValue(),
                                    SVOffset, NVT, isVolatile, Alignment);
       } else if (StWidth & (StWidth - 1)) {
         // If not storing a power-of-2 number of bits, expand as two stores.
-        assert(MVT::isExtendedVT(StVT) && !MVT::isVector(StVT) &&
+        assert(StVT.isExtended() && !StVT.isVector() &&
                "Unsupported truncstore!");
         unsigned RoundWidth = 1 << Log2_32(StWidth);
         assert(RoundWidth < StWidth);
@@ -2515,8 +2542,8 @@
         assert(ExtraWidth < RoundWidth);
         assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
                "Store size not an integral number of bytes!");
-        MVT::ValueType RoundVT = MVT::getIntegerType(RoundWidth);
-        MVT::ValueType ExtraVT = MVT::getIntegerType(ExtraWidth);
+        MVT RoundVT = MVT::getIntegerVT(RoundWidth);
+        MVT ExtraVT = MVT::getIntegerVT(ExtraWidth);
         SDOperand Lo, Hi;
         unsigned IncrementSize;
 
@@ -2569,7 +2596,7 @@
           // expand it.
           if (!TLI.allowsUnalignedMemoryAccesses()) {
             unsigned ABIAlignment = TLI.getTargetData()->
-              getABITypeAlignment(MVT::getTypeForValueType(ST->getMemoryVT()));
+              getABITypeAlignment(ST->getMemoryVT().getTypeForMVT());
             if (ST->getAlignment() < ABIAlignment)
               Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.Val), DAG,
                                             TLI);
@@ -2718,13 +2745,13 @@
       }
       break;
     case TargetLowering::Promote: {
-      MVT::ValueType NVT =
+      MVT NVT =
         TLI.getTypeToPromoteTo(ISD::SELECT, Tmp2.getValueType());
       unsigned ExtOp, TruncOp;
-      if (MVT::isVector(Tmp2.getValueType())) {
+      if (Tmp2.getValueType().isVector()) {
         ExtOp   = ISD::BIT_CONVERT;
         TruncOp = ISD::BIT_CONVERT;
-      } else if (MVT::isInteger(Tmp2.getValueType())) {
+      } else if (Tmp2.getValueType().isInteger()) {
         ExtOp   = ISD::ANY_EXTEND;
         TruncOp = ISD::TRUNCATE;
       } else {
@@ -2804,23 +2831,23 @@
       // First step, figure out the appropriate operation to use.
       // Allow SETCC to not be supported for all legal data types
       // Mostly this targets FP
-      MVT::ValueType NewInTy = Node->getOperand(0).getValueType();
-      MVT::ValueType OldVT = NewInTy; OldVT = OldVT;
+      MVT NewInTy = Node->getOperand(0).getValueType();
+      MVT OldVT = NewInTy; OldVT = OldVT;
 
       // Scan for the appropriate larger type to use.
       while (1) {
-        NewInTy = (MVT::ValueType)(NewInTy+1);
+        NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT()+1);
 
-        assert(MVT::isInteger(NewInTy) == MVT::isInteger(OldVT) &&
+        assert(NewInTy.isInteger() == OldVT.isInteger() &&
                "Fell off of the edge of the integer world");
-        assert(MVT::isFloatingPoint(NewInTy) == MVT::isFloatingPoint(OldVT) &&
+        assert(NewInTy.isFloatingPoint() == OldVT.isFloatingPoint() &&
                "Fell off of the edge of the floating point world");
           
         // If the target supports SETCC of this type, use it.
         if (TLI.isOperationLegal(ISD::SETCC, NewInTy))
           break;
       }
-      if (MVT::isInteger(NewInTy))
+      if (NewInTy.isInteger())
         assert(0 && "Cannot promote Legal Integer SETCC yet");
       else {
         Tmp1 = DAG.getNode(ISD::FP_EXTEND, NewInTy, Tmp1);
@@ -2835,13 +2862,31 @@
     case TargetLowering::Expand:
       // Expand a setcc node into a select_cc of the same condition, lhs, and
       // rhs that selects between const 1 (true) and const 0 (false).
-      MVT::ValueType VT = Node->getValueType(0);
+      MVT VT = Node->getValueType(0);
       Result = DAG.getNode(ISD::SELECT_CC, VT, Tmp1, Tmp2, 
                            DAG.getConstant(1, VT), DAG.getConstant(0, VT),
                            Tmp3);
       break;
     }
     break;
+  case ISD::VSETCC: {
+    Tmp1 = LegalizeOp(Node->getOperand(0));   // LHS
+    Tmp2 = LegalizeOp(Node->getOperand(1));   // RHS
+    SDOperand CC = Node->getOperand(2);
+    
+    Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, CC);
+
+    // Everything is legal, see if we should expand this op or something.
+    switch (TLI.getOperationAction(ISD::VSETCC, Tmp1.getValueType())) {
+    default: assert(0 && "This action is not supported yet!");
+    case TargetLowering::Legal: break;
+    case TargetLowering::Custom:
+      Tmp1 = TLI.LowerOperation(Result, DAG);
+      if (Tmp1.Val) Result = Tmp1;
+      break;
+    }
+    break;
+  }
 
   case ISD::SHL_PARTS:
   case ISD::SRA_PARTS:
@@ -2922,7 +2967,7 @@
       if (Tmp1.Val) Result = Tmp1;
       break;
     case TargetLowering::Expand: {
-      MVT::ValueType VT = Op.getValueType();
+      MVT VT = Op.getValueType();
  
       // See if multiply or divide can be lowered using two-result operations.
       SDVTList VTs = DAG.getVTList(VT, VT);
@@ -2995,7 +3040,7 @@
         break;
       }
 
-      assert(MVT::isVector(Node->getValueType(0)) &&
+      assert(Node->getValueType(0).isVector() &&
              "Cannot expand this binary operator!");
       // Expand the operation into a bunch of nasty scalar code.
       Result = LegalizeOp(UnrollVectorOp(Op));
@@ -3007,9 +3052,9 @@
       case ISD::AND:
       case ISD::OR:
       case ISD::XOR: {
-        MVT::ValueType OVT = Node->getValueType(0);
-        MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
-        assert(MVT::isVector(OVT) && "Cannot promote this BinOp!");
+        MVT OVT = Node->getValueType(0);
+        MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
+        assert(OVT.isVector() && "Cannot promote this BinOp!");
         // Bit convert each of the values to the new type.
         Tmp1 = DAG.getNode(ISD::BIT_CONVERT, NVT, Tmp1);
         Tmp2 = DAG.getNode(ISD::BIT_CONVERT, NVT, Tmp2);
@@ -3067,7 +3112,7 @@
           TLI.getOperationAction(ISD::FNEG, Tmp1.getValueType()) ==
           TargetLowering::Legal) {
         // Get the sign bit of the RHS.
-        MVT::ValueType IVT = 
+        MVT IVT =
           Tmp2.getValueType() == MVT::f32 ? MVT::i32 : MVT::i64;
         SDOperand SignBit = DAG.getNode(ISD::BIT_CONVERT, IVT, Tmp2);
         SignBit = DAG.getSetCC(TLI.getSetCCResultType(SignBit),
@@ -3085,7 +3130,7 @@
       }
       
       // Otherwise, do bitwise ops!
-      MVT::ValueType NVT = 
+      MVT NVT =
         Node->getValueType(0) == MVT::f32 ? MVT::i32 : MVT::i64;
       Result = ExpandFCOPYSIGNToBitwiseOps(Node, NVT, DAG, TLI);
       Result = DAG.getNode(ISD::BIT_CONVERT, Node->getValueType(0), Result);
@@ -3119,7 +3164,7 @@
     return Result;
     
   case ISD::BUILD_PAIR: {
-    MVT::ValueType PairTy = Node->getValueType(0);
+    MVT PairTy = Node->getValueType(0);
     // TODO: handle the case where the Lo and Hi operands are not of legal type
     Tmp1 = LegalizeOp(Node->getOperand(0));   // Lo
     Tmp2 = LegalizeOp(Node->getOperand(1));   // Hi
@@ -3135,7 +3180,7 @@
       Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, PairTy, Tmp1);
       Tmp2 = DAG.getNode(ISD::ANY_EXTEND, PairTy, Tmp2);
       Tmp2 = DAG.getNode(ISD::SHL, PairTy, Tmp2,
-                         DAG.getConstant(MVT::getSizeInBits(PairTy)/2, 
+                         DAG.getConstant(PairTy.getSizeInBits()/2,
                                          TLI.getShiftAmountTy()));
       Result = DAG.getNode(ISD::OR, PairTy, Tmp1, Tmp2);
       break;
@@ -3164,7 +3209,7 @@
     case TargetLowering::Expand: {
       unsigned DivOpc= (Node->getOpcode() == ISD::UREM) ? ISD::UDIV : ISD::SDIV;
       bool isSigned = DivOpc == ISD::SDIV;
-      MVT::ValueType VT = Node->getValueType(0);
+      MVT VT = Node->getValueType(0);
  
       // See if remainder can be lowered using two-result operations.
       SDVTList VTs = DAG.getVTList(VT, VT);
@@ -3179,14 +3224,14 @@
         break;
       }
 
-      if (MVT::isInteger(VT)) {
+      if (VT.isInteger()) {
         if (TLI.getOperationAction(DivOpc, VT) ==
             TargetLowering::Legal) {
           // X % Y -> X-X/Y*Y
           Result = DAG.getNode(DivOpc, VT, Tmp1, Tmp2);
           Result = DAG.getNode(ISD::MUL, VT, Result, Tmp2);
           Result = DAG.getNode(ISD::SUB, VT, Tmp1, Result);
-        } else if (MVT::isVector(VT)) {
+        } else if (VT.isVector()) {
           Result = LegalizeOp(UnrollVectorOp(Op));
         } else {
           assert(VT == MVT::i32 &&
@@ -3197,9 +3242,9 @@
           Result = ExpandLibCall(LC, Node, isSigned, Dummy);
         }
       } else {
-        assert(MVT::isFloatingPoint(VT) &&
+        assert(VT.isFloatingPoint() &&
                "remainder op must have integer or floating-point type");
-        if (MVT::isVector(VT)) {
+        if (VT.isVector()) {
           Result = LegalizeOp(UnrollVectorOp(Op));
         } else {
           // Floating point mod -> fmod libcall.
@@ -3217,7 +3262,7 @@
     Tmp1 = LegalizeOp(Node->getOperand(0));  // Legalize the chain.
     Tmp2 = LegalizeOp(Node->getOperand(1));  // Legalize the pointer.
 
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
     switch (TLI.getOperationAction(Node->getOpcode(), MVT::Other)) {
     default: assert(0 && "This action is not supported yet!");
     case TargetLowering::Custom:
@@ -3241,7 +3286,7 @@
       SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0);
       // Increment the pointer, VAList, to the next vaarg
       Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, 
-                         DAG.getConstant(MVT::getSizeInBits(VT)/8, 
+                         DAG.getConstant(VT.getSizeInBits()/8,
                                          TLI.getPointerTy()));
       // Store the incremented VAList to the legalized pointer
       Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, V, 0);
@@ -3359,9 +3404,9 @@
       Result = DAG.UpdateNodeOperands(Result, Tmp1);
       break;
     case TargetLowering::Promote: {
-      MVT::ValueType OVT = Tmp1.getValueType();
-      MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
-      unsigned DiffBits = MVT::getSizeInBits(NVT) - MVT::getSizeInBits(OVT);
+      MVT OVT = Tmp1.getValueType();
+      MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
+      unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits();
 
       Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, NVT, Tmp1);
       Tmp1 = DAG.getNode(ISD::BSWAP, NVT, Tmp1);
@@ -3392,8 +3437,8 @@
       }
       break;
     case TargetLowering::Promote: {
-      MVT::ValueType OVT = Tmp1.getValueType();
-      MVT::ValueType NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
+      MVT OVT = Tmp1.getValueType();
+      MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), OVT);
 
       // Zero extend the argument.
       Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, NVT, Tmp1);
@@ -3406,16 +3451,16 @@
       case ISD::CTTZ:
         //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
         Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(Tmp1), Tmp1,
-                            DAG.getConstant(MVT::getSizeInBits(NVT), NVT),
+                            DAG.getConstant(NVT.getSizeInBits(), NVT),
                             ISD::SETEQ);
         Result = DAG.getNode(ISD::SELECT, NVT, Tmp2,
-                             DAG.getConstant(MVT::getSizeInBits(OVT),NVT), Tmp1);
+                             DAG.getConstant(OVT.getSizeInBits(), NVT), Tmp1);
         break;
       case ISD::CTLZ:
         // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
         Result = DAG.getNode(ISD::SUB, NVT, Tmp1,
-                             DAG.getConstant(MVT::getSizeInBits(NVT) -
-                                             MVT::getSizeInBits(OVT), NVT));
+                             DAG.getConstant(NVT.getSizeInBits() -
+                                             OVT.getSizeInBits(), NVT));
         break;
       }
       break;
@@ -3455,7 +3500,7 @@
         break;
       case ISD::FABS: {
         // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
-        MVT::ValueType VT = Node->getValueType(0);
+        MVT VT = Node->getValueType(0);
         Tmp2 = DAG.getConstantFP(0.0, VT);
         Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(Tmp1), Tmp1, Tmp2,
                             ISD::SETUGT);
@@ -3466,10 +3511,10 @@
       case ISD::FSQRT:
       case ISD::FSIN:
       case ISD::FCOS: {
-        MVT::ValueType VT = Node->getValueType(0);
+        MVT VT = Node->getValueType(0);
 
         // Expand unsupported unary vector operators by unrolling them.
-        if (MVT::isVector(VT)) {
+        if (VT.isVector()) {
           Result = LegalizeOp(UnrollVectorOp(Op));
           break;
         }
@@ -3499,10 +3544,10 @@
     }
     break;
   case ISD::FPOWI: {
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
 
     // Expand unsupported unary vector operators by unrolling them.
-    if (MVT::isVector(VT)) {
+    if (VT.isVector()) {
       Result = LegalizeOp(UnrollVectorOp(Op));
       break;
     }
@@ -3518,17 +3563,17 @@
     if (!isTypeLegal(Node->getOperand(0).getValueType())) {
       Result = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
                                 Node->getValueType(0));
-    } else if (MVT::isVector(Op.getOperand(0).getValueType())) {
+    } else if (Op.getOperand(0).getValueType().isVector()) {
       // The input has to be a vector type, we have to either scalarize it, pack
       // it, or convert it based on whether the input vector type is legal.
       SDNode *InVal = Node->getOperand(0).Val;
       int InIx = Node->getOperand(0).ResNo;
-      unsigned NumElems = MVT::getVectorNumElements(InVal->getValueType(InIx));
-      MVT::ValueType EVT = MVT::getVectorElementType(InVal->getValueType(InIx));
+      unsigned NumElems = InVal->getValueType(InIx).getVectorNumElements();
+      MVT EVT = InVal->getValueType(InIx).getVectorElementType();
     
       // Figure out if there is a simple type corresponding to this Vector
       // type.  If so, convert to the vector type.
-      MVT::ValueType TVT = MVT::getVectorType(EVT, NumElems);
+      MVT TVT = MVT::getVectorVT(EVT, NumElems);
       if (TLI.isTypeLegal(TVT)) {
         // Turn this into a bit convert of the vector input.
         Result = DAG.getNode(ISD::BIT_CONVERT, Node->getValueType(0), 
@@ -3655,11 +3700,11 @@
       case TargetLowering::Expand:
         if (Node->getOpcode() == ISD::FP_TO_UINT) {
           SDOperand True, False;
-          MVT::ValueType VT =  Node->getOperand(0).getValueType();
-          MVT::ValueType NVT = Node->getValueType(0);
+          MVT VT =  Node->getOperand(0).getValueType();
+          MVT NVT = Node->getValueType(0);
           const uint64_t zero[] = {0, 0};
-          APFloat apf = APFloat(APInt(MVT::getSizeInBits(VT), 2, zero));
-          APInt x = APInt::getSignBit(MVT::getSizeInBits(NVT));
+          APFloat apf = APFloat(APInt(VT.getSizeInBits(), 2, zero));
+          APInt x = APInt::getSignBit(NVT.getSizeInBits());
           (void)apf.convertFromAPInt(x, false, APFloat::rmNearestTiesToEven);
           Tmp2 = DAG.getConstantFP(apf, VT);
           Tmp3 = DAG.getSetCC(TLI.getSetCCResultType(Node->getOperand(0)),
@@ -3679,8 +3724,8 @@
       }
       break;
     case Expand: {
-      MVT::ValueType VT = Op.getValueType();
-      MVT::ValueType OVT = Node->getOperand(0).getValueType();
+      MVT VT = Op.getValueType();
+      MVT OVT = Node->getOperand(0).getValueType();
       // Convert ppcf128 to i32
       if (OVT == MVT::ppcf128 && VT == MVT::i32) {
         if (Node->getOpcode() == ISD::FP_TO_SINT) {
@@ -3797,8 +3842,8 @@
     break;
 
   case ISD::FP_EXTEND: {
-    MVT::ValueType DstVT = Op.getValueType();
-    MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
+    MVT DstVT = Op.getValueType();
+    MVT SrcVT = Op.getOperand(0).getValueType();
     if (TLI.getConvertAction(SrcVT, DstVT) == TargetLowering::Expand) {
       // The only other way we can lower this is to turn it into a STORE,
       // LOAD pair, targetting a temporary location (a stack slot).
@@ -3819,8 +3864,8 @@
     break;
   }
   case ISD::FP_ROUND: {
-    MVT::ValueType DstVT = Op.getValueType();
-    MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
+    MVT DstVT = Op.getValueType();
+    MVT SrcVT = Op.getOperand(0).getValueType();
     if (TLI.getConvertAction(SrcVT, DstVT) == TargetLowering::Expand) {
       if (SrcVT == MVT::ppcf128) {
         SDOperand Lo;
@@ -3856,14 +3901,12 @@
     case Expand: assert(0 && "Shouldn't need to expand other operators here!");
     case Legal:
       Tmp1 = LegalizeOp(Node->getOperand(0));
+      Result = DAG.UpdateNodeOperands(Result, Tmp1);
       if (TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)) ==
           TargetLowering::Custom) {
-        Tmp2 = TLI.LowerOperation(Result, DAG);
-        if (Tmp2.Val) {
-          Tmp1 = Tmp2;
-        }
+        Tmp1 = TLI.LowerOperation(Result, DAG);
+        if (Tmp1.Val) Result = Tmp1;
       }
-      Result = DAG.UpdateNodeOperands(Result, Tmp1);
       break;
     case Promote:
       switch (Node->getOpcode()) {
@@ -3890,7 +3933,7 @@
   case ISD::FP_ROUND_INREG:
   case ISD::SIGN_EXTEND_INREG: {
     Tmp1 = LegalizeOp(Node->getOperand(0));
-    MVT::ValueType ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
+    MVT ExtraVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
 
     // If this operation is not supported, convert it to a shl/shr or load/store
     // pair.
@@ -3904,8 +3947,8 @@
       if (Node->getOpcode() == ISD::SIGN_EXTEND_INREG) {
         // NOTE: we could fall back on load/store here too for targets without
         // SAR.  However, it is doubtful that any exist.
-        unsigned BitsDiff = MVT::getSizeInBits(Node->getValueType(0)) -
-                            MVT::getSizeInBits(ExtraVT);
+        unsigned BitsDiff = Node->getValueType(0).getSizeInBits() -
+                            ExtraVT.getSizeInBits();
         SDOperand ShiftCst = DAG.getConstant(BitsDiff, TLI.getShiftAmountTy());
         Result = DAG.getNode(ISD::SHL, Node->getValueType(0),
                              Node->getOperand(0), ShiftCst);
@@ -3944,8 +3987,8 @@
     AddLegalizedOperand(SDOperand(Node, 1), Tmp1);
     return Op.ResNo ? Tmp1 : Result;
   }
-   case ISD::FLT_ROUNDS_: {
-    MVT::ValueType VT = Node->getValueType(0);
+  case ISD::FLT_ROUNDS_: {
+    MVT VT = Node->getValueType(0);
     switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
     default: assert(0 && "This action not supported for this op yet!");
     case TargetLowering::Custom:
@@ -3957,9 +4000,10 @@
       Result = DAG.getConstant(1, VT);
       break;
     }
+    break;
   }
   case ISD::TRAP: {
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
     switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
     default: assert(0 && "This action not supported for this op yet!");
     case TargetLowering::Legal:
@@ -4004,11 +4048,11 @@
 /// have the correct bits for the low portion of the register, but no guarantee
 /// is made about the top bits: it may be zero, sign-extended, or garbage.
 SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) {
-  MVT::ValueType VT = Op.getValueType();
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
+  MVT VT = Op.getValueType();
+  MVT NVT = TLI.getTypeToTransformTo(VT);
   assert(getTypeAction(VT) == Promote &&
          "Caller should expand or legalize operands that are not promotable!");
-  assert(NVT > VT && MVT::isInteger(NVT) == MVT::isInteger(VT) &&
+  assert(NVT.bitsGT(VT) && NVT.isInteger() == VT.isInteger() &&
          "Cannot promote to smaller type!");
 
   SDOperand Tmp1, Tmp2, Tmp3;
@@ -4055,9 +4099,9 @@
     switch (getTypeAction(Node->getOperand(0).getValueType())) {
     case Legal:
       Result = LegalizeOp(Node->getOperand(0));
-      assert(Result.getValueType() >= NVT &&
+      assert(Result.getValueType().bitsGE(NVT) &&
              "This truncation doesn't make sense!");
-      if (Result.getValueType() > NVT)    // Truncate to NVT instead of VT
+      if (Result.getValueType().bitsGT(NVT))    // Truncate to NVT instead of VT
         Result = DAG.getNode(ISD::TRUNCATE, NVT, Result);
       break;
     case Promote:
@@ -4215,22 +4259,35 @@
     break;
   }
     
-  case ISD::ATOMIC_LCS: {
+  case ISD::ATOMIC_CMP_SWAP: {
+    AtomicSDNode* AtomNode = cast<AtomicSDNode>(Node);
     Tmp2 = PromoteOp(Node->getOperand(2));
     Tmp3 = PromoteOp(Node->getOperand(3));
-    Result = DAG.getAtomic(Node->getOpcode(), Node->getOperand(0), 
-                           Node->getOperand(1), Tmp2, Tmp3,
-                           cast<AtomicSDNode>(Node)->getVT());
+    Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getChain(), 
+                           AtomNode->getBasePtr(), Tmp2, Tmp3,
+                           AtomNode->getSrcValue(),
+                           AtomNode->getAlignment());
     // Remember that we legalized the chain.
     AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1)));
     break;
   }
-  case ISD::ATOMIC_LAS:
+  case ISD::ATOMIC_LOAD_ADD:
+  case ISD::ATOMIC_LOAD_SUB:
+  case ISD::ATOMIC_LOAD_AND:
+  case ISD::ATOMIC_LOAD_OR:
+  case ISD::ATOMIC_LOAD_XOR:
+  case ISD::ATOMIC_LOAD_NAND:
+  case ISD::ATOMIC_LOAD_MIN:
+  case ISD::ATOMIC_LOAD_MAX:
+  case ISD::ATOMIC_LOAD_UMIN:
+  case ISD::ATOMIC_LOAD_UMAX:
   case ISD::ATOMIC_SWAP: {
+    AtomicSDNode* AtomNode = cast<AtomicSDNode>(Node);
     Tmp2 = PromoteOp(Node->getOperand(2));
-    Result = DAG.getAtomic(Node->getOpcode(), Node->getOperand(0), 
-                           Node->getOperand(1), Tmp2,
-                           cast<AtomicSDNode>(Node)->getVT());
+    Result = DAG.getAtomic(Node->getOpcode(), AtomNode->getChain(), 
+                           AtomNode->getBasePtr(), Tmp2,
+                           AtomNode->getSrcValue(),
+                           AtomNode->getAlignment());
     // Remember that we legalized the chain.
     AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1)));
     break;
@@ -4273,7 +4330,7 @@
     // These operators require that their input be sign extended.
     Tmp1 = PromoteOp(Node->getOperand(0));
     Tmp2 = PromoteOp(Node->getOperand(1));
-    if (MVT::isInteger(NVT)) {
+    if (NVT.isInteger()) {
       Tmp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Tmp1,
                          DAG.getValueType(VT));
       Tmp2 = DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Tmp2,
@@ -4282,7 +4339,7 @@
     Result = DAG.getNode(Node->getOpcode(), NVT, Tmp1, Tmp2);
 
     // Perform FP_ROUND: this is probably overly pessimistic.
-    if (MVT::isFloatingPoint(NVT) && NoExcessFPPrecision)
+    if (NVT.isFloatingPoint() && NoExcessFPPrecision)
       Result = DAG.getNode(ISD::FP_ROUND_INREG, NVT, Result,
                            DAG.getValueType(VT));
     break;
@@ -4313,7 +4370,7 @@
     // These operators require that their input be zero extended.
     Tmp1 = PromoteOp(Node->getOperand(0));
     Tmp2 = PromoteOp(Node->getOperand(1));
-    assert(MVT::isInteger(NVT) && "Operators don't apply to FP!");
+    assert(NVT.isInteger() && "Operators don't apply to FP!");
     Tmp1 = DAG.getZeroExtendInReg(Tmp1, VT);
     Tmp2 = DAG.getZeroExtendInReg(Tmp2, VT);
     Result = DAG.getNode(Node->getOpcode(), NVT, Tmp1, Tmp2);
@@ -4342,13 +4399,13 @@
     Tmp2 = Node->getOperand(1);   // Get the pointer.
     if (TLI.getOperationAction(ISD::VAARG, VT) == TargetLowering::Custom) {
       Tmp3 = DAG.getVAArg(VT, Tmp1, Tmp2, Node->getOperand(2));
-      Result = TLI.CustomPromoteOperation(Tmp3, DAG);
+      Result = TLI.LowerOperation(Tmp3, DAG);
     } else {
       const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
       SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0);
       // Increment the pointer, VAList, to the next vaarg
       Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, 
-                         DAG.getConstant(MVT::getSizeInBits(VT)/8, 
+                         DAG.getConstant(VT.getSizeInBits()/8,
                                          TLI.getPointerTy()));
       // Store the incremented VAList to the legalized pointer
       Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, V, 0);
@@ -4373,11 +4430,19 @@
     AddLegalizedOperand(Op.getValue(1), LegalizeOp(Result.getValue(1)));
     break;
   }
-  case ISD::SELECT:
+  case ISD::SELECT: {
     Tmp2 = PromoteOp(Node->getOperand(1));   // Legalize the op0
     Tmp3 = PromoteOp(Node->getOperand(2));   // Legalize the op1
-    Result = DAG.getNode(ISD::SELECT, NVT, Node->getOperand(0), Tmp2, Tmp3);
+
+    MVT VT2 = Tmp2.getValueType();
+    assert(VT2 == Tmp3.getValueType()
+           && "PromoteOp SELECT: Operands 2 and 3 ValueTypes don't match");
+    // Ensure that the resulting node is at least the same size as the operands'
+    // value types, because we cannot assume that TLI.getSetCCValueType() is
+    // constant.
+    Result = DAG.getNode(ISD::SELECT, VT2, Node->getOperand(0), Tmp2, Tmp3);
     break;
+  }
   case ISD::SELECT_CC:
     Tmp2 = PromoteOp(Node->getOperand(2));   // True
     Tmp3 = PromoteOp(Node->getOperand(3));   // False
@@ -4389,8 +4454,8 @@
     Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, NVT, Tmp1);
     Tmp1 = DAG.getNode(ISD::BSWAP, NVT, Tmp1);
     Result = DAG.getNode(ISD::SRL, NVT, Tmp1,
-                         DAG.getConstant(MVT::getSizeInBits(NVT) -
-                                         MVT::getSizeInBits(VT),
+                         DAG.getConstant(NVT.getSizeInBits() -
+                                         VT.getSizeInBits(),
                                          TLI.getShiftAmountTy()));
     break;
   case ISD::CTPOP:
@@ -4407,16 +4472,16 @@
     case ISD::CTTZ:
       // if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
       Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(Tmp1), Tmp1,
-                          DAG.getConstant(MVT::getSizeInBits(NVT), NVT),
+                          DAG.getConstant(NVT.getSizeInBits(), NVT),
                           ISD::SETEQ);
       Result = DAG.getNode(ISD::SELECT, NVT, Tmp2,
-                           DAG.getConstant(MVT::getSizeInBits(VT), NVT), Tmp1);
+                           DAG.getConstant(VT.getSizeInBits(), NVT), Tmp1);
       break;
     case ISD::CTLZ:
       //Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
       Result = DAG.getNode(ISD::SUB, NVT, Tmp1,
-                           DAG.getConstant(MVT::getSizeInBits(NVT) -
-                                           MVT::getSizeInBits(VT), NVT));
+                           DAG.getConstant(NVT.getSizeInBits() -
+                                           VT.getSizeInBits(), NVT));
       break;
     }
     break;
@@ -4449,8 +4514,8 @@
   SDOperand Vec = Op.getOperand(0);
   SDOperand Idx = Op.getOperand(1);
   
-  MVT::ValueType TVT = Vec.getValueType();
-  unsigned NumElems = MVT::getVectorNumElements(TVT);
+  MVT TVT = Vec.getValueType();
+  unsigned NumElems = TVT.getVectorNumElements();
   
   switch (TLI.getOperationAction(ISD::EXTRACT_VECTOR_ELT, TVT)) {
   default: assert(0 && "This action is not supported yet!");
@@ -4499,12 +4564,11 @@
     SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0);
 
     // Add the offset to the index.
-    unsigned EltSize = MVT::getSizeInBits(Op.getValueType())/8;
+    unsigned EltSize = Op.getValueType().getSizeInBits()/8;
     Idx = DAG.getNode(ISD::MUL, Idx.getValueType(), Idx,
                       DAG.getConstant(EltSize, Idx.getValueType()));
 
-    if (MVT::getSizeInBits(Idx.getValueType()) >
-        MVT::getSizeInBits(TLI.getPointerTy()))
+    if (Idx.getValueType().bitsGT(TLI.getPointerTy()))
       Idx = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), Idx);
     else
       Idx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), Idx);
@@ -4524,9 +4588,9 @@
   SDOperand Vec = Op.getOperand(0);
   SDOperand Idx = LegalizeOp(Op.getOperand(1));
   
-  unsigned NumElems = MVT::getVectorNumElements(Vec.getValueType());
+  unsigned NumElems = Vec.getValueType().getVectorNumElements();
   
-  if (NumElems == MVT::getVectorNumElements(Op.getValueType())) {
+  if (NumElems == Op.getValueType().getVectorNumElements()) {
     // This must be an access of the desired vector length.  Return it.
     return Vec;
   }
@@ -4567,9 +4631,9 @@
     Tmp2 = PromoteOp(RHS);   // RHS
 
     // If this is an FP compare, the operands have already been extended.
-    if (MVT::isInteger(LHS.getValueType())) {
-      MVT::ValueType VT = LHS.getValueType();
-      MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
+    if (LHS.getValueType().isInteger()) {
+      MVT VT = LHS.getValueType();
+      MVT NVT = TLI.getTypeToTransformTo(VT);
 
       // Otherwise, we have to insert explicit sign or zero extends.  Note
       // that we could insert sign extends for ALL conditions, but zero extend
@@ -4602,10 +4666,10 @@
     }
     break;
   case Expand: {
-    MVT::ValueType VT = LHS.getValueType();
+    MVT VT = LHS.getValueType();
     if (VT == MVT::f32 || VT == MVT::f64) {
       // Expand into one or more soft-fp libcall(s).
-      RTLIB::Libcall LC1, LC2 = RTLIB::UNKNOWN_LIBCALL;
+      RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
       switch (cast<CondCodeSDNode>(CC)->get()) {
       case ISD::SETEQ:
       case ISD::SETOEQ:
@@ -4662,18 +4726,17 @@
         default: assert(0 && "Unsupported FP setcc!");
         }
       }
-      
+
       SDOperand Dummy;
-      Tmp1 = ExpandLibCall(LC1,
-                           DAG.getNode(ISD::MERGE_VALUES, VT, LHS, RHS).Val, 
+      SDOperand Ops[2] = { LHS, RHS };
+      Tmp1 = ExpandLibCall(LC1, DAG.getMergeValues(Ops, 2).Val,
                            false /*sign irrelevant*/, Dummy);
       Tmp2 = DAG.getConstant(0, MVT::i32);
       CC = DAG.getCondCode(TLI.getCmpLibcallCC(LC1));
       if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
         Tmp1 = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(Tmp1), Tmp1, Tmp2,
                            CC);
-        LHS = ExpandLibCall(LC2,
-                            DAG.getNode(ISD::MERGE_VALUES, VT, LHS, RHS).Val,
+        LHS = ExpandLibCall(LC2, DAG.getMergeValues(Ops, 2).Val,
                             false /*sign irrelevant*/, Dummy);
         Tmp2 = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(LHS), LHS, Tmp2,
                            DAG.getCondCode(TLI.getCmpLibcallCC(LC2)));
@@ -4805,38 +4868,44 @@
 /// a load from the stack slot to DestVT, extending it if needed.
 /// The resultant code need not be legal.
 SDOperand SelectionDAGLegalize::EmitStackConvert(SDOperand SrcOp,
-                                                 MVT::ValueType SlotVT, 
-                                                 MVT::ValueType DestVT) {
+                                                 MVT SlotVT,
+                                                 MVT DestVT) {
   // Create the stack frame object.
-  SDOperand FIPtr = DAG.CreateStackTemporary(SlotVT);
-
+  unsigned SrcAlign = TLI.getTargetData()->getPrefTypeAlignment(
+                                          SrcOp.getValueType().getTypeForMVT());
+  SDOperand FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
+  
   FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr);
   int SPFI = StackPtrFI->getIndex();
-
-  unsigned SrcSize = MVT::getSizeInBits(SrcOp.getValueType());
-  unsigned SlotSize = MVT::getSizeInBits(SlotVT);
-  unsigned DestSize = MVT::getSizeInBits(DestVT);
+  
+  unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
+  unsigned SlotSize = SlotVT.getSizeInBits();
+  unsigned DestSize = DestVT.getSizeInBits();
+  unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(
+                                                        DestVT.getTypeForMVT());
   
   // Emit a store to the stack slot.  Use a truncstore if the input value is
   // later than DestVT.
   SDOperand Store;
+  
   if (SrcSize > SlotSize)
     Store = DAG.getTruncStore(DAG.getEntryNode(), SrcOp, FIPtr,
-                              PseudoSourceValue::getFixedStack(),
-                              SPFI, SlotVT);
+                              PseudoSourceValue::getFixedStack(), SPFI, SlotVT, 
+                              false, SrcAlign);
   else {
     assert(SrcSize == SlotSize && "Invalid store");
     Store = DAG.getStore(DAG.getEntryNode(), SrcOp, FIPtr,
-                         PseudoSourceValue::getFixedStack(),
-                         SPFI, SlotVT);
+                         PseudoSourceValue::getFixedStack(), SPFI,
+                         false, SrcAlign);
   }
   
   // Result is a load from the stack slot.
   if (SlotSize == DestSize)
-    return DAG.getLoad(DestVT, Store, FIPtr, NULL, 0);
+    return DAG.getLoad(DestVT, Store, FIPtr, NULL, 0, false, DestAlign);
   
   assert(SlotSize < DestSize && "Unknown extension!");
-  return DAG.getExtLoad(ISD::EXTLOAD, DestVT, Store, FIPtr, NULL, 0, SlotVT);
+  return DAG.getExtLoad(ISD::EXTLOAD, DestVT, Store, FIPtr, NULL, 0, SlotVT,
+                        false, DestAlign);
 }
 
 SDOperand SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) {
@@ -4899,7 +4968,7 @@
   
   // If all elements are constants, create a load from the constant pool.
   if (isConstant) {
-    MVT::ValueType VT = Node->getValueType(0);
+    MVT VT = Node->getValueType(0);
     std::vector<Constant*> CV;
     for (unsigned i = 0, e = NumElems; i != e; ++i) {
       if (ConstantFPSDNode *V = 
@@ -4911,7 +4980,7 @@
       } else {
         assert(Node->getOperand(i).getOpcode() == ISD::UNDEF);
         const Type *OpNTy = 
-          MVT::getTypeForValueType(Node->getOperand(0).getValueType());
+          Node->getOperand(0).getValueType().getTypeForMVT();
         CV.push_back(UndefValue::get(OpNTy));
       }
     }
@@ -4923,9 +4992,8 @@
   
   if (SplatValue.Val) {   // Splat of one value?
     // Build the shuffle constant vector: <0, 0, 0, 0>
-    MVT::ValueType MaskVT = 
-      MVT::getIntVectorWithNumElements(NumElems);
-    SDOperand Zero = DAG.getConstant(0, MVT::getVectorElementType(MaskVT));
+    MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+    SDOperand Zero = DAG.getConstant(0, MaskVT.getVectorElementType());
     std::vector<SDOperand> ZeroVec(NumElems, Zero);
     SDOperand SplatMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
                                       &ZeroVec[0], ZeroVec.size());
@@ -4961,8 +5029,8 @@
       std::swap(Val1, Val2);
     
     // Build the shuffle constant vector: e.g. <0, 4, 0, 4>
-    MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
-    MVT::ValueType MaskEltVT = MVT::getVectorElementType(MaskVT);
+    MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+    MVT MaskEltVT = MaskVT.getVectorElementType();
     std::vector<SDOperand> MaskVec(NumElems);
 
     // Set elements of the shuffle mask for Val1.
@@ -4996,14 +5064,13 @@
   // Otherwise, we can't handle this case efficiently.  Allocate a sufficiently
   // aligned object on the stack, store each element into it, then load
   // the result as a vector.
-  MVT::ValueType VT = Node->getValueType(0);
+  MVT VT = Node->getValueType(0);
   // Create the stack frame object.
   SDOperand FIPtr = DAG.CreateStackTemporary(VT);
   
   // Emit a store of each element to the stack slot.
   SmallVector<SDOperand, 8> Stores;
-  unsigned TypeByteSize = 
-    MVT::getSizeInBits(Node->getOperand(0).getValueType())/8;
+  unsigned TypeByteSize = Node->getOperand(0).getValueType().getSizeInBits()/8;
   // Store (in the right endianness) the elements to memory.
   for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
     // Ignore undef elements.
@@ -5037,7 +5104,7 @@
   ExpandOp(Op, LHSL, LHSH);
 
   SDOperand Ops[] = { LHSL, LHSH, Amt };
-  MVT::ValueType VT = LHSL.getValueType();
+  MVT VT = LHSL.getValueType();
   Lo = DAG.getNode(NodeOp, DAG.getNodeValueTypes(VT, VT), 2, Ops, 3);
   Hi = Lo.getValue(1);
 }
@@ -5052,12 +5119,12 @@
   assert((Opc == ISD::SHL || Opc == ISD::SRA || Opc == ISD::SRL) &&
          "This is not a shift!");
 
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(Op.getValueType());
+  MVT NVT = TLI.getTypeToTransformTo(Op.getValueType());
   SDOperand ShAmt = LegalizeOp(Amt);
-  MVT::ValueType ShTy = ShAmt.getValueType();
-  unsigned ShBits = MVT::getSizeInBits(ShTy);
-  unsigned VTBits = MVT::getSizeInBits(Op.getValueType());
-  unsigned NVTBits = MVT::getSizeInBits(NVT);
+  MVT ShTy = ShAmt.getValueType();
+  unsigned ShBits = ShTy.getSizeInBits();
+  unsigned VTBits = Op.getValueType().getSizeInBits();
+  unsigned NVTBits = NVT.getSizeInBits();
 
   // Handle the case when Amt is an immediate.
   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Amt.Val)) {
@@ -5208,8 +5275,8 @@
   TargetLowering::ArgListTy Args;
   TargetLowering::ArgListEntry Entry;
   for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
-    MVT::ValueType ArgVT = Node->getOperand(i).getValueType();
-    const Type *ArgTy = MVT::getTypeForValueType(ArgVT);
+    MVT ArgVT = Node->getOperand(i).getValueType();
+    const Type *ArgTy = ArgVT.getTypeForMVT();
     Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy; 
     Entry.isSExt = isSigned;
     Entry.isZExt = !isSigned;
@@ -5219,7 +5286,7 @@
                                            TLI.getPointerTy());
 
   // Splice the libcall in wherever FindInputOutputChains tells us to.
-  const Type *RetTy = MVT::getTypeForValueType(Node->getValueType(0));
+  const Type *RetTy = Node->getValueType(0).getTypeForMVT();
   std::pair<SDOperand,SDOperand> CallInfo =
     TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, CallingConv::C,
                     false, Callee, Args, DAG);
@@ -5245,8 +5312,8 @@
 /// ExpandIntToFP - Expand a [US]INT_TO_FP operation.
 ///
 SDOperand SelectionDAGLegalize::
-ExpandIntToFP(bool isSigned, MVT::ValueType DestTy, SDOperand Source) {
-  MVT::ValueType SourceVT = Source.getValueType();
+ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) {
+  MVT SourceVT = Source.getValueType();
   bool ExpandSource = getTypeAction(SourceVT) == Expand;
 
   // Special case for i32 source to take advantage of UINTTOFP_I32_F32, etc.
@@ -5284,7 +5351,7 @@
     if (DestTy == MVT::f32)
       FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx,
                                PseudoSourceValue::getConstantPool(), 0);
-    else if (MVT::getSizeInBits(DestTy) > MVT::getSizeInBits(MVT::f32))
+    else if (DestTy.bitsGT(MVT::f32))
       // FIXME: Avoid the extend by construction the right constantpool?
       FudgeInReg = DAG.getExtLoad(ISD::EXTLOAD, DestTy, DAG.getEntryNode(),
                                   CPIdx,
@@ -5293,12 +5360,12 @@
     else 
       assert(0 && "Unexpected conversion");
 
-    MVT::ValueType SCVT = SignedConv.getValueType();
+    MVT SCVT = SignedConv.getValueType();
     if (SCVT != DestTy) {
       // Destination type needs to be expanded as well. The FADD now we are
       // constructing will be expanded into a libcall.
-      if (MVT::getSizeInBits(SCVT) != MVT::getSizeInBits(DestTy)) {
-        assert(MVT::getSizeInBits(SCVT) * 2 == MVT::getSizeInBits(DestTy));
+      if (SCVT.getSizeInBits() != DestTy.getSizeInBits()) {
+        assert(SCVT.getSizeInBits() * 2 == DestTy.getSizeInBits());
         SignedConv = DAG.getNode(ISD::BUILD_PAIR, DestTy,
                                  SignedConv, SignedConv.getValue(1));
       }
@@ -5379,7 +5446,7 @@
 /// legal for the target.
 SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
                                                      SDOperand Op0,
-                                                     MVT::ValueType DestVT) {
+                                                     MVT DestVT) {
   if (Op0.getValueType() == MVT::i32) {
     // simple 32-bit [signed|unsigned] integer to float/double expansion
     
@@ -5425,10 +5492,10 @@
     if (DestVT == MVT::f64) {
       // do nothing
       Result = Sub;
-    } else if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(MVT::f64)) {
+    } else if (DestVT.bitsLT(MVT::f64)) {
       Result = DAG.getNode(ISD::FP_ROUND, DestVT, Sub,
                            DAG.getIntPtrConstant(0));
-    } else if (MVT::getSizeInBits(DestVT) > MVT::getSizeInBits(MVT::f64)) {
+    } else if (DestVT.bitsGT(MVT::f64)) {
       Result = DAG.getNode(ISD::FP_EXTEND, DestVT, Sub);
     }
     return Result;
@@ -5447,7 +5514,7 @@
   // as a negative number.  To counteract this, the dynamic code adds an
   // offset depending on the data type.
   uint64_t FF;
-  switch (Op0.getValueType()) {
+  switch (Op0.getValueType().getSimpleVT()) {
   default: assert(0 && "Unsupported integer type!");
   case MVT::i8 : FF = 0x43800000ULL; break;  // 2^8  (as a float)
   case MVT::i16: FF = 0x47800000ULL; break;  // 2^16 (as a float)
@@ -5480,17 +5547,17 @@
 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
 /// operation that takes a larger input.
 SDOperand SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDOperand LegalOp,
-                                                      MVT::ValueType DestVT,
+                                                      MVT DestVT,
                                                       bool isSigned) {
   // First step, figure out the appropriate *INT_TO_FP operation to use.
-  MVT::ValueType NewInTy = LegalOp.getValueType();
+  MVT NewInTy = LegalOp.getValueType();
 
   unsigned OpToUse = 0;
 
   // Scan for the appropriate larger type to use.
   while (1) {
-    NewInTy = (MVT::ValueType)(NewInTy+1);
-    assert(MVT::isInteger(NewInTy) && "Ran out of possibilities!");
+    NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT()+1);
+    assert(NewInTy.isInteger() && "Ran out of possibilities!");
 
     // If the target supports SINT_TO_FP of this type, use it.
     switch (TLI.getOperationAction(ISD::SINT_TO_FP, NewInTy)) {
@@ -5535,17 +5602,17 @@
 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
 /// operation that returns a larger result.
 SDOperand SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDOperand LegalOp,
-                                                      MVT::ValueType DestVT,
+                                                      MVT DestVT,
                                                       bool isSigned) {
   // First step, figure out the appropriate FP_TO*INT operation to use.
-  MVT::ValueType NewOutTy = DestVT;
+  MVT NewOutTy = DestVT;
 
   unsigned OpToUse = 0;
 
   // Scan for the appropriate larger type to use.
   while (1) {
-    NewOutTy = (MVT::ValueType)(NewOutTy+1);
-    assert(MVT::isInteger(NewOutTy) && "Ran out of possibilities!");
+    NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT()+1);
+    assert(NewOutTy.isInteger() && "Ran out of possibilities!");
 
     // If the target supports FP_TO_SINT returning this type, use it.
     switch (TLI.getOperationAction(ISD::FP_TO_SINT, NewOutTy)) {
@@ -5579,15 +5646,15 @@
   
   // Okay, we found the operation and type to use.
   SDOperand Operation = DAG.getNode(OpToUse, NewOutTy, LegalOp);
-  
+
   // If the operation produces an invalid type, it must be custom lowered.  Use
   // the target lowering hooks to expand it.  Just keep the low part of the
   // expanded operation, we know that we're truncating anyway.
   if (getTypeAction(NewOutTy) == Expand) {
-    Operation = SDOperand(TLI.ExpandOperationResult(Operation.Val, DAG), 0);
+    Operation = SDOperand(TLI.ReplaceNodeResults(Operation.Val, DAG), 0);
     assert(Operation.Val && "Didn't return anything");
   }
-  
+
   // Truncate the result of the extended FP_TO_*INT operation to the desired
   // size.
   return DAG.getNode(ISD::TRUNCATE, DestVT, Operation);
@@ -5596,10 +5663,10 @@
 /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation.
 ///
 SDOperand SelectionDAGLegalize::ExpandBSWAP(SDOperand Op) {
-  MVT::ValueType VT = Op.getValueType();
-  MVT::ValueType SHVT = TLI.getShiftAmountTy();
+  MVT VT = Op.getValueType();
+  MVT SHVT = TLI.getShiftAmountTy();
   SDOperand Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
-  switch (VT) {
+  switch (VT.getSimpleVT()) {
   default: assert(0 && "Unhandled Expand type in BSWAP!"); abort();
   case MVT::i16:
     Tmp2 = DAG.getNode(ISD::SHL, VT, Op, DAG.getConstant(8, SHVT));
@@ -5651,9 +5718,9 @@
       0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL,
       0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL
     };
-    MVT::ValueType VT = Op.getValueType();
-    MVT::ValueType ShVT = TLI.getShiftAmountTy();
-    unsigned len = MVT::getSizeInBits(VT);
+    MVT VT = Op.getValueType();
+    MVT ShVT = TLI.getShiftAmountTy();
+    unsigned len = VT.getSizeInBits();
     for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
       //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8])
       SDOperand Tmp2 = DAG.getConstant(mask[i], VT);
@@ -5674,9 +5741,9 @@
     // return popcount(~x);
     //
     // but see also: http://www.hackersdelight.org/HDcode/nlz.cc
-    MVT::ValueType VT = Op.getValueType();
-    MVT::ValueType ShVT = TLI.getShiftAmountTy();
-    unsigned len = MVT::getSizeInBits(VT);
+    MVT VT = Op.getValueType();
+    MVT ShVT = TLI.getShiftAmountTy();
+    unsigned len = VT.getSizeInBits();
     for (unsigned i = 0; (1U << i) <= (len / 2); ++i) {
       SDOperand Tmp3 = DAG.getConstant(1ULL << i, ShVT);
       Op = DAG.getNode(ISD::OR, VT, Op, DAG.getNode(ISD::SRL, VT, Op, Tmp3));
@@ -5689,7 +5756,7 @@
     // unless the target has ctlz but not ctpop, in which case we use:
     // { return 32 - nlz(~x & (x-1)); }
     // see also http://www.hackersdelight.org/HDcode/ntz.cc
-    MVT::ValueType VT = Op.getValueType();
+    MVT VT = Op.getValueType();
     SDOperand Tmp2 = DAG.getConstant(~0ULL, VT);
     SDOperand Tmp3 = DAG.getNode(ISD::AND, VT,
                        DAG.getNode(ISD::XOR, VT, Op, Tmp2),
@@ -5698,7 +5765,7 @@
     if (!TLI.isOperationLegal(ISD::CTPOP, VT) &&
         TLI.isOperationLegal(ISD::CTLZ, VT))
       return DAG.getNode(ISD::SUB, VT,
-                         DAG.getConstant(MVT::getSizeInBits(VT), VT),
+                         DAG.getConstant(VT.getSizeInBits(), VT),
                          DAG.getNode(ISD::CTLZ, VT, Tmp3));
     return DAG.getNode(ISD::CTPOP, VT, Tmp3);
   }
@@ -5711,13 +5778,12 @@
 /// ExpandedNodes map is filled in for any results that are expanded, and the
 /// Lo/Hi values are returned.
 void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){
-  MVT::ValueType VT = Op.getValueType();
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
+  MVT VT = Op.getValueType();
+  MVT NVT = TLI.getTypeToTransformTo(VT);
   SDNode *Node = Op.Val;
   assert(getTypeAction(VT) == Expand && "Not an expanded type!");
-  assert(((MVT::isInteger(NVT) && NVT < VT) || MVT::isFloatingPoint(VT) ||
-         MVT::isVector(VT)) &&
-         "Cannot expand to FP value or to larger int value!");
+  assert(((NVT.isInteger() && NVT.bitsLT(VT)) || VT.isFloatingPoint() ||
+         VT.isVector()) && "Cannot expand to FP value or to larger int value!");
 
   // See if we already expanded it.
   DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator I
@@ -5763,12 +5829,11 @@
     Lo  = ExpandEXTRACT_VECTOR_ELT(Op);
     return ExpandOp(Lo, Lo, Hi);
   case ISD::UNDEF:
-    NVT = TLI.getTypeToExpandTo(VT);
     Lo = DAG.getNode(ISD::UNDEF, NVT);
     Hi = DAG.getNode(ISD::UNDEF, NVT);
     break;
   case ISD::Constant: {
-    unsigned NVTBits = MVT::getSizeInBits(NVT);
+    unsigned NVTBits = NVT.getSizeInBits();
     const APInt &Cst = cast<ConstantSDNode>(Node)->getAPIntValue();
     Lo = DAG.getConstant(APInt(Cst).trunc(NVTBits), NVT);
     Hi = DAG.getConstant(Cst.lshr(NVTBits).trunc(NVTBits), NVT);
@@ -5817,7 +5882,7 @@
     // The high part gets the sign extension from the lo-part.  This handles
     // things like sextinreg V:i64 from i8.
     Hi = DAG.getNode(ISD::SRA, NVT, Lo,
-                     DAG.getConstant(MVT::getSizeInBits(NVT)-1,
+                     DAG.getConstant(NVT.getSizeInBits()-1,
                                      TLI.getShiftAmountTy()));
     break;
 
@@ -5840,7 +5905,7 @@
   case ISD::CTLZ: {
     // ctlz (HL) -> ctlz(H) != 32 ? ctlz(H) : (ctlz(L)+32)
     ExpandOp(Node->getOperand(0), Lo, Hi);
-    SDOperand BitsC = DAG.getConstant(MVT::getSizeInBits(NVT), NVT);
+    SDOperand BitsC = DAG.getConstant(NVT.getSizeInBits(), NVT);
     SDOperand HLZ = DAG.getNode(ISD::CTLZ, NVT, Hi);
     SDOperand TopNotZero = DAG.getSetCC(TLI.getSetCCResultType(HLZ), HLZ, BitsC,
                                         ISD::SETNE);
@@ -5855,7 +5920,7 @@
   case ISD::CTTZ: {
     // cttz (HL) -> cttz(L) != 32 ? cttz(L) : (cttz(H)+32)
     ExpandOp(Node->getOperand(0), Lo, Hi);
-    SDOperand BitsC = DAG.getConstant(MVT::getSizeInBits(NVT), NVT);
+    SDOperand BitsC = DAG.getConstant(NVT.getSizeInBits(), NVT);
     SDOperand LTZ = DAG.getNode(ISD::CTTZ, NVT, Lo);
     SDOperand BotNotZero = DAG.getSetCC(TLI.getSetCCResultType(LTZ), LTZ, BitsC,
                                         ISD::SETNE);
@@ -5904,7 +5969,7 @@
       }
 
       // Increment the pointer to the other half.
-      unsigned IncrementSize = MVT::getSizeInBits(Lo.getValueType())/8;
+      unsigned IncrementSize = Lo.getValueType().getSizeInBits()/8;
       Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
                         DAG.getIntPtrConstant(IncrementSize));
       SVOffset += IncrementSize;
@@ -5922,7 +5987,7 @@
       if (TLI.isBigEndian())
         std::swap(Lo, Hi);
     } else {
-      MVT::ValueType EVT = LD->getMemoryVT();
+      MVT EVT = LD->getMemoryVT();
 
       if ((VT == MVT::f64 && EVT == MVT::f32) ||
           (VT == MVT::ppcf128 && (EVT==MVT::f64 || EVT==MVT::f32))) {
@@ -5949,7 +6014,7 @@
       if (ExtType == ISD::SEXTLOAD) {
         // The high part is obtained by SRA'ing all but one of the bits of the
         // lo part.
-        unsigned LoSize = MVT::getSizeInBits(Lo.getValueType());
+        unsigned LoSize = Lo.getValueType().getSizeInBits();
         Hi = DAG.getNode(ISD::SRA, NVT, Lo,
                          DAG.getConstant(LoSize-1, TLI.getShiftAmountTy()));
       } else if (ExtType == ISD::ZEXTLOAD) {
@@ -6009,7 +6074,7 @@
 
     // The high part is obtained by SRA'ing all but one of the bits of the lo
     // part.
-    unsigned LoSize = MVT::getSizeInBits(Lo.getValueType());
+    unsigned LoSize = Lo.getValueType().getSizeInBits();
     Hi = DAG.getNode(ISD::SRA, NVT, Lo,
                      DAG.getConstant(LoSize-1, TLI.getShiftAmountTy()));
     break;
@@ -6058,7 +6123,7 @@
 
     // If source operand will be expanded to the same type as VT, i.e.
     // i64 <- f64, i32 <- f32, expand the source operand instead.
-    MVT::ValueType VT0 = Node->getOperand(0).getValueType();
+    MVT VT0 = Node->getOperand(0).getValueType();
     if (getTypeAction(VT0) == Expand && TLI.getTypeToTransformTo(VT0) == VT) {
       ExpandOp(Node->getOperand(0), Lo, Hi);
       break;
@@ -6084,7 +6149,7 @@
     break;
   }
 
-  case ISD::ATOMIC_LCS: {
+  case ISD::ATOMIC_CMP_SWAP: {
     SDOperand Tmp = TLI.LowerOperation(Op, DAG);
     assert(Tmp.Val && "Node must be custom expanded!");
     ExpandOp(Tmp.getValue(0), Lo, Hi);
@@ -6307,9 +6372,9 @@
     // If the target wants to custom expand this, let them.
     if (TLI.getOperationAction(Node->getOpcode(), VT) ==
             TargetLowering::Custom) {
-      Op = TLI.LowerOperation(Op, DAG);
-      if (Op.Val) {
-        ExpandOp(Op, Lo, Hi);
+      SDOperand Result = TLI.LowerOperation(Op, DAG);
+      if (Result.Val) {
+        ExpandOp(Result, Lo, Hi);
         break;
       }
     }
@@ -6592,7 +6657,7 @@
   case ISD::SINT_TO_FP:
   case ISD::UINT_TO_FP: {
     bool isSigned = Node->getOpcode() == ISD::SINT_TO_FP;
-    MVT::ValueType SrcVT = Node->getOperand(0).getValueType();
+    MVT SrcVT = Node->getOperand(0).getValueType();
 
     // Promote the operand if needed.  Do this before checking for
     // ppcf128 so conversions of i16 and i8 work.
@@ -6677,18 +6742,18 @@
 /// two smaller values, still of vector type.
 void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo,
                                          SDOperand &Hi) {
-  assert(MVT::isVector(Op.getValueType()) && "Cannot split non-vector type!");
+  assert(Op.getValueType().isVector() && "Cannot split non-vector type!");
   SDNode *Node = Op.Val;
-  unsigned NumElements = MVT::getVectorNumElements(Op.getValueType());
+  unsigned NumElements = Op.getValueType().getVectorNumElements();
   assert(NumElements > 1 && "Cannot split a single element vector!");
 
-  MVT::ValueType NewEltVT = MVT::getVectorElementType(Op.getValueType());
+  MVT NewEltVT = Op.getValueType().getVectorElementType();
 
   unsigned NewNumElts_Lo = 1 << Log2_32(NumElements-1);
   unsigned NewNumElts_Hi = NumElements - NewNumElts_Lo;
 
-  MVT::ValueType NewVT_Lo = MVT::getVectorType(NewEltVT, NewNumElts_Lo);
-  MVT::ValueType NewVT_Hi = MVT::getVectorType(NewEltVT, NewNumElts_Hi);
+  MVT NewVT_Lo = MVT::getVectorVT(NewEltVT, NewNumElts_Lo);
+  MVT NewVT_Hi = MVT::getVectorVT(NewEltVT, NewNumElts_Hi);
 
   // See if we already split it.
   std::map<SDOperand, std::pair<SDOperand, SDOperand> >::iterator I
@@ -6714,22 +6779,29 @@
     Hi = Node->getOperand(1);
     break;
   case ISD::INSERT_VECTOR_ELT: {
-    SplitVectorOp(Node->getOperand(0), Lo, Hi);
-    unsigned Index = cast<ConstantSDNode>(Node->getOperand(2))->getValue();
-    SDOperand ScalarOp = Node->getOperand(1);
-    if (Index < NewNumElts_Lo)
-      Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, NewVT_Lo, Lo, ScalarOp,
-                       DAG.getIntPtrConstant(Index));
-    else
-      Hi = DAG.getNode(ISD::INSERT_VECTOR_ELT, NewVT_Hi, Hi, ScalarOp,
-                       DAG.getIntPtrConstant(Index - NewNumElts_Lo));
+    if (ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
+      SplitVectorOp(Node->getOperand(0), Lo, Hi);
+      unsigned Index = Idx->getValue();
+      SDOperand ScalarOp = Node->getOperand(1);
+      if (Index < NewNumElts_Lo)
+        Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, NewVT_Lo, Lo, ScalarOp,
+                         DAG.getIntPtrConstant(Index));
+      else
+        Hi = DAG.getNode(ISD::INSERT_VECTOR_ELT, NewVT_Hi, Hi, ScalarOp,
+                         DAG.getIntPtrConstant(Index - NewNumElts_Lo));
+      break;
+    }
+    SDOperand Tmp = PerformInsertVectorEltInMemory(Node->getOperand(0),
+                                                   Node->getOperand(1),
+                                                   Node->getOperand(2));
+    SplitVectorOp(Tmp, Lo, Hi);
     break;
   }
   case ISD::VECTOR_SHUFFLE: {
     // Build the low part.
     SDOperand Mask = Node->getOperand(2);
     SmallVector<SDOperand, 8> Ops;
-    MVT::ValueType PtrVT = TLI.getPointerTy();
+    MVT PtrVT = TLI.getPointerTy();
     
     // Insert all of the elements from the input that are needed.  We use 
     // buildvector of extractelement here because the input vectors will have
@@ -6804,7 +6876,7 @@
     SplitVectorOp(Node->getOperand(1), LL, LH);
     SplitVectorOp(Node->getOperand(2), RL, RH);
 
-    if (MVT::isVector(Cond.getValueType())) {
+    if (Cond.getValueType().isVector()) {
       // Handle a vector merge.
       SDOperand CL, CH;
       SplitVectorOp(Cond, CL, CH);
@@ -6817,6 +6889,30 @@
     }
     break;
   }
+  case ISD::SELECT_CC: {
+    SDOperand CondLHS = Node->getOperand(0);
+    SDOperand CondRHS = Node->getOperand(1);
+    SDOperand CondCode = Node->getOperand(4);
+    
+    SDOperand LL, LH, RL, RH;
+    SplitVectorOp(Node->getOperand(2), LL, LH);
+    SplitVectorOp(Node->getOperand(3), RL, RH);
+    
+    // Handle a simple select with vector operands.
+    Lo = DAG.getNode(ISD::SELECT_CC, NewVT_Lo, CondLHS, CondRHS,
+                     LL, RL, CondCode);
+    Hi = DAG.getNode(ISD::SELECT_CC, NewVT_Hi, CondLHS, CondRHS, 
+                     LH, RH, CondCode);
+    break;
+  }
+  case ISD::VSETCC: {
+    SDOperand LL, LH, RL, RH;
+    SplitVectorOp(Node->getOperand(0), LL, LH);
+    SplitVectorOp(Node->getOperand(1), RL, RH);
+    Lo = DAG.getNode(ISD::VSETCC, NewVT_Lo, LL, RL, Node->getOperand(2));
+    Hi = DAG.getNode(ISD::VSETCC, NewVT_Hi, LH, RH, Node->getOperand(2));
+    break;
+  }
   case ISD::ADD:
   case ISD::SUB:
   case ISD::MUL:
@@ -6878,7 +6974,7 @@
     bool isVolatile = LD->isVolatile();
 
     Lo = DAG.getLoad(NewVT_Lo, Ch, Ptr, SV, SVOffset, isVolatile, Alignment);
-    unsigned IncrementSize = NewNumElts_Lo * MVT::getSizeInBits(NewEltVT)/8;
+    unsigned IncrementSize = NewNumElts_Lo * NewEltVT.getSizeInBits()/8;
     Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
                       DAG.getIntPtrConstant(IncrementSize));
     SVOffset += IncrementSize;
@@ -6898,8 +6994,8 @@
     // We know the result is a vector.  The input may be either a vector or a
     // scalar value.
     SDOperand InOp = Node->getOperand(0);
-    if (!MVT::isVector(InOp.getValueType()) ||
-        MVT::getVectorNumElements(InOp.getValueType()) == 1) {
+    if (!InOp.getValueType().isVector() ||
+        InOp.getValueType().getVectorNumElements() == 1) {
       // The input is a scalar or single-element vector.
       // Lower to a store/load so that it can be split.
       // FIXME: this could be improved probably.
@@ -6933,11 +7029,10 @@
 /// (e.g. v1f32), convert it into the equivalent operation that returns a
 /// scalar (e.g. f32) value.
 SDOperand SelectionDAGLegalize::ScalarizeVectorOp(SDOperand Op) {
-  assert(MVT::isVector(Op.getValueType()) &&
-         "Bad ScalarizeVectorOp invocation!");
+  assert(Op.getValueType().isVector() && "Bad ScalarizeVectorOp invocation!");
   SDNode *Node = Op.Val;
-  MVT::ValueType NewVT = MVT::getVectorElementType(Op.getValueType());
-  assert(MVT::getVectorNumElements(Op.getValueType()) == 1);
+  MVT NewVT = Op.getValueType().getVectorElementType();
+  assert(Op.getValueType().getVectorNumElements() == 1);
   
   // See if we already scalarized it.
   std::map<SDOperand, SDOperand>::iterator I = ScalarizedNodes.find(Op);
@@ -7025,14 +7120,35 @@
     Result = Node->getOperand(0);
     assert(Result.getValueType() == NewVT);
     break;
-  case ISD::BIT_CONVERT:
-    Result = DAG.getNode(ISD::BIT_CONVERT, NewVT, Op.getOperand(0));
+  case ISD::BIT_CONVERT: {
+    SDOperand Op0 = Op.getOperand(0);
+    if (Op0.getValueType().getVectorNumElements() == 1)
+      Op0 = ScalarizeVectorOp(Op0);
+    Result = DAG.getNode(ISD::BIT_CONVERT, NewVT, Op0);
     break;
+  }
   case ISD::SELECT:
     Result = DAG.getNode(ISD::SELECT, NewVT, Op.getOperand(0),
                          ScalarizeVectorOp(Op.getOperand(1)),
                          ScalarizeVectorOp(Op.getOperand(2)));
     break;
+  case ISD::SELECT_CC:
+    Result = DAG.getNode(ISD::SELECT_CC, NewVT, Node->getOperand(0), 
+                         Node->getOperand(1),
+                         ScalarizeVectorOp(Op.getOperand(2)),
+                         ScalarizeVectorOp(Op.getOperand(3)),
+                         Node->getOperand(4));
+    break;
+  case ISD::VSETCC: {
+    SDOperand Op0 = ScalarizeVectorOp(Op.getOperand(0));
+    SDOperand Op1 = ScalarizeVectorOp(Op.getOperand(1));
+    Result = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(Op0), Op0, Op1,
+                         Op.getOperand(2));
+    Result = DAG.getNode(ISD::SELECT, NewVT, Result,
+                         DAG.getConstant(-1ULL, NewVT),
+                         DAG.getConstant(0ULL, NewVT));
+    break;
+  }
   }
 
   if (TLI.isTypeLegal(NewVT))

Added: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp?rev=53163&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp (added)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp Sun Jul  6 15:45:41 2008
@@ -0,0 +1,943 @@
+//===-------- LegalizeFloatTypes.cpp - Legalization of float types --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements float type expansion and softening for LegalizeTypes.
+// Softening is the act of turning a computation in an illegal floating point
+// type into a computation in an integer type of the same size; also known as
+// "soft float".  For example, turning f32 arithmetic into operations using i32.
+// The resulting integer value is the same as what you would get by performing
+// the floating point operation and bitcasting the result to the integer type.
+// Expansion is the act of changing a computation in an illegal type to be a
+// computation in two identical registers of a smaller type.  For example,
+// implementing ppcf128 arithmetic in two f64 registers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LegalizeTypes.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+using namespace llvm;
+
+/// GetFPLibCall - Return the right libcall for the given floating point type.
+static RTLIB::Libcall GetFPLibCall(MVT VT,
+                                   RTLIB::Libcall Call_F32,
+                                   RTLIB::Libcall Call_F64,
+                                   RTLIB::Libcall Call_F80,
+                                   RTLIB::Libcall Call_PPCF128) {
+  return
+    VT == MVT::f32 ? Call_F32 :
+    VT == MVT::f64 ? Call_F64 :
+    VT == MVT::f80 ? Call_F80 :
+    VT == MVT::ppcf128 ? Call_PPCF128 :
+    RTLIB::UNKNOWN_LIBCALL;
+}
+
+//===----------------------------------------------------------------------===//
+//  Result Float to Integer Conversion.
+//===----------------------------------------------------------------------===//
+
+void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
+  DEBUG(cerr << "Soften float result " << ResNo << ": "; N->dump(&DAG);
+        cerr << "\n");
+  SDOperand R = SDOperand();
+
+  // See if the target wants to custom expand this node.
+  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(ResNo)) ==
+      TargetLowering::Custom) {
+    // If the target wants to, allow it to lower this itself.
+    if (SDNode *P = TLI.ReplaceNodeResults(N, DAG)) {
+      // Everything that once used N now uses P.  We are guaranteed that the
+      // result value types of N and the result value types of P match.
+      ReplaceNodeWith(N, P);
+      return;
+    }
+  }
+
+  switch (N->getOpcode()) {
+  default:
+#ifndef NDEBUG
+    cerr << "SoftenFloatResult #" << ResNo << ": ";
+    N->dump(&DAG); cerr << "\n";
+#endif
+    assert(0 && "Do not know how to convert the result of this operator!");
+    abort();
+
+    case ISD::BIT_CONVERT: R = SoftenFloatRes_BIT_CONVERT(N); break;
+    case ISD::BUILD_PAIR:  R = SoftenFloatRes_BUILD_PAIR(N); break;
+    case ISD::ConstantFP:
+      R = SoftenFloatRes_ConstantFP(cast<ConstantFPSDNode>(N));
+      break;
+    case ISD::FCOPYSIGN:   R = SoftenFloatRes_FCOPYSIGN(N); break;
+    case ISD::LOAD:        R = SoftenFloatRes_LOAD(N); break;
+    case ISD::SINT_TO_FP:
+    case ISD::UINT_TO_FP:  R = SoftenFloatRes_XINT_TO_FP(N); break;
+
+    case ISD::FADD: R = SoftenFloatRes_FADD(N); break;
+    case ISD::FMUL: R = SoftenFloatRes_FMUL(N); break;
+    case ISD::FSUB: R = SoftenFloatRes_FSUB(N); break;
+  }
+
+  // If R is null, the sub-method took care of registering the result.
+  if (R.Val)
+    SetSoftenedFloat(SDOperand(N, ResNo), R);
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_BIT_CONVERT(SDNode *N) {
+  return BitConvertToInteger(N->getOperand(0));
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_BUILD_PAIR(SDNode *N) {
+  // Convert the inputs to integers, and build a new pair out of them.
+  return DAG.getNode(ISD::BUILD_PAIR,
+                     TLI.getTypeToTransformTo(N->getValueType(0)),
+                     BitConvertToInteger(N->getOperand(0)),
+                     BitConvertToInteger(N->getOperand(1)));
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_ConstantFP(ConstantFPSDNode *N) {
+  return DAG.getConstant(N->getValueAPF().convertToAPInt(),
+                         TLI.getTypeToTransformTo(N->getValueType(0)));
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_FADD(SDNode *N) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  SDOperand Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
+                       GetSoftenedFloat(N->getOperand(1)) };
+  return MakeLibCall(GetFPLibCall(N->getValueType(0),
+                                  RTLIB::ADD_F32,
+                                  RTLIB::ADD_F64,
+                                  RTLIB::ADD_F80,
+                                  RTLIB::ADD_PPCF128),
+                     NVT, Ops, 2, false);
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) {
+  SDOperand LHS = GetSoftenedFloat(N->getOperand(0));
+  SDOperand RHS = BitConvertToInteger(N->getOperand(1));
+
+  MVT LVT = LHS.getValueType();
+  MVT RVT = RHS.getValueType();
+
+  unsigned LSize = LVT.getSizeInBits();
+  unsigned RSize = RVT.getSizeInBits();
+
+  // First get the sign bit of second operand.
+  SDOperand SignBit = DAG.getNode(ISD::SHL, RVT, DAG.getConstant(1, RVT),
+                                  DAG.getConstant(RSize - 1,
+                                                  TLI.getShiftAmountTy()));
+  SignBit = DAG.getNode(ISD::AND, RVT, RHS, SignBit);
+
+  // Shift right or sign-extend it if the two operands have different types.
+  int SizeDiff = RVT.getSizeInBits() - LVT.getSizeInBits();
+  if (SizeDiff > 0) {
+    SignBit = DAG.getNode(ISD::SRL, RVT, SignBit,
+                          DAG.getConstant(SizeDiff, TLI.getShiftAmountTy()));
+    SignBit = DAG.getNode(ISD::TRUNCATE, LVT, SignBit);
+  } else if (SizeDiff < 0) {
+    SignBit = DAG.getNode(ISD::ANY_EXTEND, LVT, SignBit);
+    SignBit = DAG.getNode(ISD::SHL, LVT, SignBit,
+                          DAG.getConstant(-SizeDiff, TLI.getShiftAmountTy()));
+  }
+
+  // Clear the sign bit of the first operand.
+  SDOperand Mask = DAG.getNode(ISD::SHL, LVT, DAG.getConstant(1, LVT),
+                               DAG.getConstant(LSize - 1,
+                                               TLI.getShiftAmountTy()));
+  Mask = DAG.getNode(ISD::SUB, LVT, Mask, DAG.getConstant(1, LVT));
+  LHS = DAG.getNode(ISD::AND, LVT, LHS, Mask);
+
+  // Or the value with the sign bit.
+  return DAG.getNode(ISD::OR, LVT, LHS, SignBit);
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_FMUL(SDNode *N) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  SDOperand Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
+                       GetSoftenedFloat(N->getOperand(1)) };
+  return MakeLibCall(GetFPLibCall(N->getValueType(0),
+                                  RTLIB::MUL_F32,
+                                  RTLIB::MUL_F64,
+                                  RTLIB::MUL_F80,
+                                  RTLIB::MUL_PPCF128),
+                     NVT, Ops, 2, false);
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_FSUB(SDNode *N) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  SDOperand Ops[2] = { GetSoftenedFloat(N->getOperand(0)),
+                       GetSoftenedFloat(N->getOperand(1)) };
+  return MakeLibCall(GetFPLibCall(N->getValueType(0),
+                                  RTLIB::SUB_F32,
+                                  RTLIB::SUB_F64,
+                                  RTLIB::SUB_F80,
+                                  RTLIB::SUB_PPCF128),
+                     NVT, Ops, 2, false);
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
+  LoadSDNode *L = cast<LoadSDNode>(N);
+  MVT VT = N->getValueType(0);
+  MVT NVT = TLI.getTypeToTransformTo(VT);
+
+  if (L->getExtensionType() == ISD::NON_EXTLOAD)
+     return DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
+                        NVT, L->getChain(), L->getBasePtr(), L->getOffset(),
+                        L->getSrcValue(), L->getSrcValueOffset(), NVT,
+                        L->isVolatile(), L->getAlignment());
+
+  // Do a non-extending load followed by FP_EXTEND.
+  SDOperand NL = DAG.getLoad(L->getAddressingMode(), ISD::NON_EXTLOAD,
+                             L->getMemoryVT(), L->getChain(),
+                             L->getBasePtr(), L->getOffset(),
+                             L->getSrcValue(), L->getSrcValueOffset(),
+                             L->getMemoryVT(),
+                             L->isVolatile(), L->getAlignment());
+  return BitConvertToInteger(DAG.getNode(ISD::FP_EXTEND, VT, NL));
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
+  bool isSigned = N->getOpcode() == ISD::SINT_TO_FP;
+  MVT DestVT = N->getValueType(0);
+  SDOperand Op = N->getOperand(0);
+
+  if (Op.getValueType() == MVT::i32) {
+    // simple 32-bit [signed|unsigned] integer to float/double expansion
+
+    // Get the stack frame index of a 8 byte buffer.
+    SDOperand StackSlot = DAG.CreateStackTemporary(MVT::f64);
+
+    // word offset constant for Hi/Lo address computation
+    SDOperand Offset =
+      DAG.getConstant(MVT(MVT::i32).getSizeInBits() / 8,
+                      TLI.getPointerTy());
+    // set up Hi and Lo (into buffer) address based on endian
+    SDOperand Hi = StackSlot;
+    SDOperand Lo = DAG.getNode(ISD::ADD, TLI.getPointerTy(), StackSlot, Offset);
+    if (TLI.isLittleEndian())
+      std::swap(Hi, Lo);
+
+    // if signed map to unsigned space
+    SDOperand OpMapped;
+    if (isSigned) {
+      // constant used to invert sign bit (signed to unsigned mapping)
+      SDOperand SignBit = DAG.getConstant(0x80000000u, MVT::i32);
+      OpMapped = DAG.getNode(ISD::XOR, MVT::i32, Op, SignBit);
+    } else {
+      OpMapped = Op;
+    }
+    // store the lo of the constructed double - based on integer input
+    SDOperand Store1 = DAG.getStore(DAG.getEntryNode(),
+                                    OpMapped, Lo, NULL, 0);
+    // initial hi portion of constructed double
+    SDOperand InitialHi = DAG.getConstant(0x43300000u, MVT::i32);
+    // store the hi of the constructed double - biased exponent
+    SDOperand Store2=DAG.getStore(Store1, InitialHi, Hi, NULL, 0);
+    // load the constructed double
+    SDOperand Load = DAG.getLoad(MVT::f64, Store2, StackSlot, NULL, 0);
+    // FP constant to bias correct the final result
+    SDOperand Bias = DAG.getConstantFP(isSigned ?
+                                            BitsToDouble(0x4330000080000000ULL)
+                                          : BitsToDouble(0x4330000000000000ULL),
+                                     MVT::f64);
+    // subtract the bias
+    SDOperand Sub = DAG.getNode(ISD::FSUB, MVT::f64, Load, Bias);
+    // final result
+    SDOperand Result;
+    // handle final rounding
+    if (DestVT == MVT::f64) {
+      // do nothing
+      Result = Sub;
+    } else if (DestVT.bitsLT(MVT::f64)) {
+      Result = DAG.getNode(ISD::FP_ROUND, DestVT, Sub,
+                           DAG.getIntPtrConstant(0));
+    } else if (DestVT.bitsGT(MVT::f64)) {
+      Result = DAG.getNode(ISD::FP_EXTEND, DestVT, Sub);
+    }
+    return BitConvertToInteger(Result);
+  }
+  assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet");
+  SDOperand Tmp1 = DAG.getNode(ISD::SINT_TO_FP, DestVT, Op);
+
+  SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultType(Op), Op,
+                                   DAG.getConstant(0, Op.getValueType()),
+                                   ISD::SETLT);
+  SDOperand Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
+  SDOperand CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(),
+                                    SignSet, Four, Zero);
+
+  // If the sign bit of the integer is set, the large number will be treated
+  // as a negative number.  To counteract this, the dynamic code adds an
+  // offset depending on the data type.
+  uint64_t FF;
+  switch (Op.getValueType().getSimpleVT()) {
+  default: assert(0 && "Unsupported integer type!");
+  case MVT::i8 : FF = 0x43800000ULL; break;  // 2^8  (as a float)
+  case MVT::i16: FF = 0x47800000ULL; break;  // 2^16 (as a float)
+  case MVT::i32: FF = 0x4F800000ULL; break;  // 2^32 (as a float)
+  case MVT::i64: FF = 0x5F800000ULL; break;  // 2^64 (as a float)
+  }
+  if (TLI.isLittleEndian()) FF <<= 32;
+  static Constant *FudgeFactor = ConstantInt::get(Type::Int64Ty, FF);
+
+  SDOperand CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy());
+  CPIdx = DAG.getNode(ISD::ADD, TLI.getPointerTy(), CPIdx, CstOffset);
+  SDOperand FudgeInReg;
+  if (DestVT == MVT::f32)
+    FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx,
+                             PseudoSourceValue::getConstantPool(), 0);
+  else {
+    FudgeInReg = DAG.getExtLoad(ISD::EXTLOAD, DestVT,
+                                DAG.getEntryNode(), CPIdx,
+                                PseudoSourceValue::getConstantPool(), 0,
+                                MVT::f32);
+  }
+
+  return BitConvertToInteger(DAG.getNode(ISD::FADD, DestVT, Tmp1, FudgeInReg));
+}
+
+
+//===----------------------------------------------------------------------===//
+//  Operand Float to Integer Conversion..
+//===----------------------------------------------------------------------===//
+
+bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
+  DEBUG(cerr << "Soften float operand " << OpNo << ": "; N->dump(&DAG);
+        cerr << "\n");
+  SDOperand Res(0, 0);
+
+  if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType())
+      == TargetLowering::Custom)
+    Res = TLI.LowerOperation(SDOperand(N, OpNo), DAG);
+
+  if (Res.Val == 0) {
+    switch (N->getOpcode()) {
+    default:
+#ifndef NDEBUG
+      cerr << "SoftenFloatOperand Op #" << OpNo << ": ";
+      N->dump(&DAG); cerr << "\n";
+#endif
+      assert(0 && "Do not know how to convert this operator's operand!");
+      abort();
+
+    case ISD::BIT_CONVERT: Res = SoftenFloatOp_BIT_CONVERT(N); break;
+
+    case ISD::BR_CC:     Res = SoftenFloatOp_BR_CC(N); break;
+    case ISD::SELECT_CC: Res = SoftenFloatOp_SELECT_CC(N); break;
+    case ISD::SETCC:     Res = SoftenFloatOp_SETCC(N); break;
+    }
+  }
+
+  // If the result is null, the sub-method took care of registering results etc.
+  if (!Res.Val) return false;
+
+  // If the result is N, the sub-method updated N in place.  Check to see if any
+  // operands are new, and if so, mark them.
+  if (Res.Val == N) {
+    // Mark N as new and remark N and its operands.  This allows us to correctly
+    // revisit N if it needs another step of promotion and allows us to visit
+    // any new operands to N.
+    ReanalyzeNode(N);
+    return true;
+  }
+
+  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
+         "Invalid operand expansion");
+
+  ReplaceValueWith(SDOperand(N, 0), Res);
+  return false;
+}
+
+/// SoftenSetCCOperands - Soften the operands of a comparison.  This code is
+/// shared among BR_CC, SELECT_CC, and SETCC handlers.
+void DAGTypeLegalizer::SoftenSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS,
+                                           ISD::CondCode &CCCode) {
+  SDOperand LHSInt = GetSoftenedFloat(NewLHS);
+  SDOperand RHSInt = GetSoftenedFloat(NewRHS);
+  MVT VT  = NewLHS.getValueType();
+  MVT NVT = LHSInt.getValueType();
+
+  assert((VT == MVT::f32 || VT == MVT::f64) && "Unsupported setcc type!");
+
+  // Expand into one or more soft-fp libcall(s).
+  RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
+  switch (CCCode) {
+  case ISD::SETEQ:
+  case ISD::SETOEQ:
+    LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : RTLIB::OEQ_F64;
+    break;
+  case ISD::SETNE:
+  case ISD::SETUNE:
+    LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : RTLIB::UNE_F64;
+    break;
+  case ISD::SETGE:
+  case ISD::SETOGE:
+    LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : RTLIB::OGE_F64;
+    break;
+  case ISD::SETLT:
+  case ISD::SETOLT:
+    LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
+    break;
+  case ISD::SETLE:
+  case ISD::SETOLE:
+    LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : RTLIB::OLE_F64;
+    break;
+  case ISD::SETGT:
+  case ISD::SETOGT:
+    LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : RTLIB::OGT_F64;
+    break;
+  case ISD::SETUO:
+    LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : RTLIB::UO_F64;
+    break;
+  case ISD::SETO:
+    break;
+  default:
+    LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : RTLIB::UO_F64;
+    switch (CCCode) {
+    case ISD::SETONE:
+      // SETONE = SETOLT | SETOGT
+      LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
+      // Fallthrough
+    case ISD::SETUGT:
+      LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 : RTLIB::OGT_F64;
+      break;
+    case ISD::SETUGE:
+      LC2 = (VT == MVT::f32) ? RTLIB::OGE_F32 : RTLIB::OGE_F64;
+      break;
+    case ISD::SETULT:
+      LC2 = (VT == MVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
+      break;
+    case ISD::SETULE:
+      LC2 = (VT == MVT::f32) ? RTLIB::OLE_F32 : RTLIB::OLE_F64;
+      break;
+    case ISD::SETUEQ:
+      LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : RTLIB::OEQ_F64;
+      break;
+    default: assert(false && "Do not know how to soften this setcc!");
+    }
+  }
+
+  SDOperand Ops[2] = { LHSInt, RHSInt };
+  NewLHS = MakeLibCall(LC1, NVT, Ops, 2, false/*sign irrelevant*/);
+  NewRHS = DAG.getConstant(0, NVT);
+  if (LC2 != RTLIB::UNKNOWN_LIBCALL) {
+    SDOperand Tmp = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(NewLHS),
+                                NewLHS, NewRHS,
+                                DAG.getCondCode(TLI.getCmpLibcallCC(LC1)));
+    NewLHS = MakeLibCall(LC2, NVT, Ops, 2, false/*sign irrelevant*/);
+    NewLHS = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(NewLHS), NewLHS,
+                         NewRHS, DAG.getCondCode(TLI.getCmpLibcallCC(LC2)));
+    NewLHS = DAG.getNode(ISD::OR, Tmp.getValueType(), Tmp, NewLHS);
+    NewRHS = SDOperand();
+  }
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatOp_BIT_CONVERT(SDNode *N) {
+  return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0),
+                     GetSoftenedFloat(N->getOperand(0)));
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(2), NewRHS = N->getOperand(3);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get();
+  SoftenSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If SoftenSetCCOperands returned a scalar, we need to compare the result
+  // against zero to select between true and false values.
+  if (NewRHS.Val == 0) {
+    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
+    CCCode = ISD::SETNE;
+  }
+
+  // Update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
+                                DAG.getCondCode(CCCode), NewLHS, NewRHS,
+                                N->getOperand(4));
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
+  SoftenSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If SoftenSetCCOperands returned a scalar, we need to compare the result
+  // against zero to select between true and false values.
+  if (NewRHS.Val == 0) {
+    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
+    CCCode = ISD::SETNE;
+  }
+
+  // Update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS,
+                                N->getOperand(2), N->getOperand(3),
+                                DAG.getCondCode(CCCode));
+}
+
+SDOperand DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
+  SoftenSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If SoftenSetCCOperands returned a scalar, use it.
+  if (NewRHS.Val == 0) {
+    assert(NewLHS.getValueType() == N->getValueType(0) &&
+           "Unexpected setcc expansion!");
+    return NewLHS;
+  }
+
+  // Otherwise, update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS,
+                                DAG.getCondCode(CCCode));
+}
+
+
+//===----------------------------------------------------------------------===//
+//  Float Result Expansion
+//===----------------------------------------------------------------------===//
+
+/// ExpandFloatResult - This method is called when the specified result of the
+/// specified node is found to need expansion.  At this point, the node may also
+/// have invalid operands or may have other results that need promotion, we just
+/// know that (at least) one result needs expansion.
+void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
+  DEBUG(cerr << "Expand float result: "; N->dump(&DAG); cerr << "\n");
+  SDOperand Lo, Hi;
+  Lo = Hi = SDOperand();
+
+  // See if the target wants to custom expand this node.
+  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(ResNo)) ==
+      TargetLowering::Custom) {
+    // If the target wants to, allow it to lower this itself.
+    if (SDNode *P = TLI.ReplaceNodeResults(N, DAG)) {
+      // Everything that once used N now uses P.  We are guaranteed that the
+      // result value types of N and the result value types of P match.
+      ReplaceNodeWith(N, P);
+      return;
+    }
+  }
+
+  switch (N->getOpcode()) {
+  default:
+#ifndef NDEBUG
+    cerr << "ExpandFloatResult #" << ResNo << ": ";
+    N->dump(&DAG); cerr << "\n";
+#endif
+    assert(0 && "Do not know how to expand the result of this operator!");
+    abort();
+
+  case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, Lo, Hi); break;
+  case ISD::UNDEF:        SplitRes_UNDEF(N, Lo, Hi); break;
+  case ISD::SELECT:       SplitRes_SELECT(N, Lo, Hi); break;
+  case ISD::SELECT_CC:    SplitRes_SELECT_CC(N, Lo, Hi); break;
+
+  case ISD::BIT_CONVERT:        ExpandRes_BIT_CONVERT(N, Lo, Hi); break;
+  case ISD::BUILD_PAIR:         ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
+  case ISD::EXTRACT_ELEMENT:    ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
+  case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
+
+  case ISD::ConstantFP: ExpandFloatRes_ConstantFP(N, Lo, Hi); break;
+  case ISD::FADD:       ExpandFloatRes_FADD(N, Lo, Hi); break;
+  case ISD::FDIV:       ExpandFloatRes_FDIV(N, Lo, Hi); break;
+  case ISD::FMUL:       ExpandFloatRes_FMUL(N, Lo, Hi); break;
+  case ISD::FSUB:       ExpandFloatRes_FSUB(N, Lo, Hi); break;
+  case ISD::LOAD:       ExpandFloatRes_LOAD(N, Lo, Hi); break;
+  case ISD::SINT_TO_FP:
+  case ISD::UINT_TO_FP: ExpandFloatRes_XINT_TO_FP(N, Lo, Hi); break;
+  }
+
+  // If Lo/Hi is null, the sub-method took care of registering results etc.
+  if (Lo.Val)
+    SetExpandedFloat(SDOperand(N, ResNo), Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDOperand &Lo,
+                                                 SDOperand &Hi) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  assert(NVT.getSizeInBits() == integerPartWidth &&
+         "Do not know how to expand this float constant!");
+  APInt C = cast<ConstantFPSDNode>(N)->getValueAPF().convertToAPInt();
+  Lo = DAG.getConstantFP(APFloat(APInt(integerPartWidth, 1,
+                                       &C.getRawData()[1])), NVT);
+  Hi = DAG.getConstantFP(APFloat(APInt(integerPartWidth, 1,
+                                       &C.getRawData()[0])), NVT);
+}
+
+void DAGTypeLegalizer::ExpandFloatRes_FADD(SDNode *N, SDOperand &Lo,
+                                           SDOperand &Hi) {
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SDOperand Call = MakeLibCall(GetFPLibCall(N->getValueType(0),
+                                            RTLIB::ADD_F32,
+                                            RTLIB::ADD_F64,
+                                            RTLIB::ADD_F80,
+                                            RTLIB::ADD_PPCF128),
+                               N->getValueType(0), Ops, 2,
+                               false);
+  assert(Call.Val->getOpcode() == ISD::BUILD_PAIR && "Call lowered wrongly!");
+  Lo = Call.getOperand(0); Hi = Call.getOperand(1);
+}
+
+void DAGTypeLegalizer::ExpandFloatRes_FDIV(SDNode *N, SDOperand &Lo,
+                                           SDOperand &Hi) {
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SDOperand Call = MakeLibCall(GetFPLibCall(N->getValueType(0),
+                                            RTLIB::DIV_F32,
+                                            RTLIB::DIV_F64,
+                                            RTLIB::DIV_F80,
+                                            RTLIB::DIV_PPCF128),
+                               N->getValueType(0), Ops, 2,
+                               false);
+  assert(Call.Val->getOpcode() == ISD::BUILD_PAIR && "Call lowered wrongly!");
+  Lo = Call.getOperand(0); Hi = Call.getOperand(1);
+}
+
+void DAGTypeLegalizer::ExpandFloatRes_FMUL(SDNode *N, SDOperand &Lo,
+                                           SDOperand &Hi) {
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SDOperand Call = MakeLibCall(GetFPLibCall(N->getValueType(0),
+                                            RTLIB::MUL_F32,
+                                            RTLIB::MUL_F64,
+                                            RTLIB::MUL_F80,
+                                            RTLIB::MUL_PPCF128),
+                               N->getValueType(0), Ops, 2,
+                               false);
+  assert(Call.Val->getOpcode() == ISD::BUILD_PAIR && "Call lowered wrongly!");
+  Lo = Call.getOperand(0); Hi = Call.getOperand(1);
+}
+
+void DAGTypeLegalizer::ExpandFloatRes_FSUB(SDNode *N, SDOperand &Lo,
+                                           SDOperand &Hi) {
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SDOperand Call = MakeLibCall(GetFPLibCall(N->getValueType(0),
+                                            RTLIB::SUB_F32,
+                                            RTLIB::SUB_F64,
+                                            RTLIB::SUB_F80,
+                                            RTLIB::SUB_PPCF128),
+                               N->getValueType(0), Ops, 2,
+                               false);
+  assert(Call.Val->getOpcode() == ISD::BUILD_PAIR && "Call lowered wrongly!");
+  Lo = Call.getOperand(0); Hi = Call.getOperand(1);
+}
+
+void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDOperand &Lo,
+                                           SDOperand &Hi) {
+  if (ISD::isNormalLoad(N)) {
+    ExpandRes_NormalLoad(N, Lo, Hi);
+    return;
+  }
+
+  assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
+  LoadSDNode *LD = cast<LoadSDNode>(N);
+  SDOperand Chain = LD->getChain();
+  SDOperand Ptr = LD->getBasePtr();
+
+  MVT NVT = TLI.getTypeToTransformTo(LD->getValueType(0));
+  assert(NVT.isByteSized() && "Expanded type not byte sized!");
+  assert(LD->getMemoryVT().bitsLE(NVT) && "Float type not round?");
+
+  Lo = DAG.getExtLoad(LD->getExtensionType(), NVT, Chain, Ptr,
+                      LD->getSrcValue(), LD->getSrcValueOffset(),
+                      LD->getMemoryVT(),
+                      LD->isVolatile(), LD->getAlignment());
+
+  // Remember the chain.
+  Chain = Lo.getValue(1);
+
+  // The high part is undefined.
+  Hi = DAG.getNode(ISD::UNDEF, NVT);
+
+  // Modified the chain - switch anything that used the old chain to use the
+  // new one.
+  ReplaceValueWith(SDOperand(LD, 1), Chain);
+}
+
+void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDOperand &Lo,
+                                                 SDOperand &Hi) {
+  assert(N->getValueType(0) == MVT::ppcf128 && "Unsupported XINT_TO_FP!");
+  MVT VT = N->getValueType(0);
+  MVT NVT = TLI.getTypeToTransformTo(VT);
+  SDOperand Src = N->getOperand(0);
+  MVT SrcVT = Src.getValueType();
+
+  // First do an SINT_TO_FP, whether the original was signed or unsigned.
+  if (SrcVT.bitsLE(MVT::i32)) {
+    // The integer can be represented exactly in an f64.
+    Src = DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Src);
+    Lo = DAG.getConstantFP(APFloat(APInt(NVT.getSizeInBits(), 0)), NVT);
+    Hi = DAG.getNode(ISD::SINT_TO_FP, NVT, Src);
+  } else {
+    RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+    if (SrcVT.bitsLE(MVT::i64)) {
+      Src = DAG.getNode(ISD::SIGN_EXTEND, MVT::i64, Src);
+      LC = RTLIB::SINTTOFP_I64_PPCF128;
+    } else if (SrcVT.bitsLE(MVT::i128)) {
+      Src = DAG.getNode(ISD::SIGN_EXTEND, MVT::i128, Src);
+      LC = RTLIB::SINTTOFP_I128_PPCF128;
+    }
+    assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported XINT_TO_FP!");
+
+    Hi = MakeLibCall(LC, VT, &Src, 1, true);
+    assert(Hi.Val->getOpcode() == ISD::BUILD_PAIR && "Call lowered wrongly!");
+    Lo = Hi.getOperand(0); Hi = Hi.getOperand(1);
+  }
+
+  if (N->getOpcode() == ISD::SINT_TO_FP)
+    return;
+
+  // Unsigned - fix up the SINT_TO_FP value just calculated.
+  Hi = DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi);
+  SrcVT = Src.getValueType();
+
+  // x>=0 ? (ppcf128)(iN)x : (ppcf128)(iN)x + 2^N; N=32,64,128.
+  static const uint64_t TwoE32[]  = { 0x41f0000000000000LL, 0 };
+  static const uint64_t TwoE64[]  = { 0x43f0000000000000LL, 0 };
+  static const uint64_t TwoE128[] = { 0x47f0000000000000LL, 0 };
+  const uint64_t *Parts = 0;
+
+  switch (SrcVT.getSimpleVT()) {
+  default:
+    assert(false && "Unsupported UINT_TO_FP!");
+  case MVT::i32:
+    Parts = TwoE32;
+  case MVT::i64:
+    Parts = TwoE64;
+  case MVT::i128:
+    Parts = TwoE128;
+  }
+
+  Lo = DAG.getNode(ISD::FADD, VT, Hi,
+                   DAG.getConstantFP(APFloat(APInt(128, 2, Parts)),
+                                     MVT::ppcf128));
+  Lo = DAG.getNode(ISD::SELECT_CC, VT, Src, DAG.getConstant(0, SrcVT), Lo, Hi,
+                   DAG.getCondCode(ISD::SETLT));
+  Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Lo,
+                   DAG.getConstant(1, TLI.getPointerTy()));
+  Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Lo,
+                   DAG.getConstant(0, TLI.getPointerTy()));
+}
+
+
+//===----------------------------------------------------------------------===//
+//  Float Operand Expansion
+//===----------------------------------------------------------------------===//
+
+/// ExpandFloatOperand - This method is called when the specified operand of the
+/// specified node is found to need expansion.  At this point, all of the result
+/// types of the node are known to be legal, but other operands of the node may
+/// need promotion or expansion as well as the specified one.
+bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
+  DEBUG(cerr << "Expand float operand: "; N->dump(&DAG); cerr << "\n");
+  SDOperand Res(0, 0);
+
+  if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType())
+      == TargetLowering::Custom)
+    Res = TLI.LowerOperation(SDOperand(N, OpNo), DAG);
+
+  if (Res.Val == 0) {
+    switch (N->getOpcode()) {
+    default:
+  #ifndef NDEBUG
+      cerr << "ExpandFloatOperand Op #" << OpNo << ": ";
+      N->dump(&DAG); cerr << "\n";
+  #endif
+      assert(0 && "Do not know how to expand this operator's operand!");
+      abort();
+
+    case ISD::BIT_CONVERT:     Res = ExpandOp_BIT_CONVERT(N); break;
+    case ISD::BUILD_VECTOR:    Res = ExpandOp_BUILD_VECTOR(N); break;
+    case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
+
+    case ISD::BR_CC:     Res = ExpandFloatOp_BR_CC(N); break;
+    case ISD::SELECT_CC: Res = ExpandFloatOp_SELECT_CC(N); break;
+    case ISD::SETCC:     Res = ExpandFloatOp_SETCC(N); break;
+
+    case ISD::FP_ROUND:   Res = ExpandFloatOp_FP_ROUND(N); break;
+    case ISD::FP_TO_SINT: Res = ExpandFloatOp_FP_TO_SINT(N); break;
+    case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_UINT(N); break;
+
+    case ISD::STORE:
+      Res = ExpandFloatOp_STORE(cast<StoreSDNode>(N), OpNo);
+      break;
+    }
+  }
+
+  // If the result is null, the sub-method took care of registering results etc.
+  if (!Res.Val) return false;
+  // If the result is N, the sub-method updated N in place.  Check to see if any
+  // operands are new, and if so, mark them.
+  if (Res.Val == N) {
+    // Mark N as new and remark N and its operands.  This allows us to correctly
+    // revisit N if it needs another step of expansion and allows us to visit
+    // any new operands to N.
+    ReanalyzeNode(N);
+    return true;
+  }
+
+  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
+         "Invalid operand expansion");
+
+  ReplaceValueWith(SDOperand(N, 0), Res);
+  return false;
+}
+
+/// FloatExpandSetCCOperands - Expand the operands of a comparison.  This code
+/// is shared among BR_CC, SELECT_CC, and SETCC handlers.
+void DAGTypeLegalizer::FloatExpandSetCCOperands(SDOperand &NewLHS,
+                                                SDOperand &NewRHS,
+                                                ISD::CondCode &CCCode) {
+  SDOperand LHSLo, LHSHi, RHSLo, RHSHi;
+  GetExpandedFloat(NewLHS, LHSLo, LHSHi);
+  GetExpandedFloat(NewRHS, RHSLo, RHSHi);
+
+  MVT VT = NewLHS.getValueType();
+  assert(VT == MVT::ppcf128 && "Unsupported setcc type!");
+
+  // FIXME:  This generated code sucks.  We want to generate
+  //         FCMP crN, hi1, hi2
+  //         BNE crN, L:
+  //         FCMP crN, lo1, lo2
+  // The following can be improved, but not that much.
+  SDOperand Tmp1, Tmp2, Tmp3;
+  Tmp1 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, ISD::SETEQ);
+  Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(LHSLo), LHSLo, RHSLo, CCCode);
+  Tmp3 = DAG.getNode(ISD::AND, Tmp1.getValueType(), Tmp1, Tmp2);
+  Tmp1 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, ISD::SETNE);
+  Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, CCCode);
+  Tmp1 = DAG.getNode(ISD::AND, Tmp1.getValueType(), Tmp1, Tmp2);
+  NewLHS = DAG.getNode(ISD::OR, Tmp1.getValueType(), Tmp1, Tmp3);
+  NewRHS = SDOperand();   // LHS is the result, not a compare.
+}
+
+SDOperand DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(2), NewRHS = N->getOperand(3);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get();
+  FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If ExpandSetCCOperands returned a scalar, we need to compare the result
+  // against zero to select between true and false values.
+  if (NewRHS.Val == 0) {
+    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
+    CCCode = ISD::SETNE;
+  }
+
+  // Update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
+                                DAG.getCondCode(CCCode), NewLHS, NewRHS,
+                                N->getOperand(4));
+}
+
+SDOperand DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
+  FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If ExpandSetCCOperands returned a scalar, we need to compare the result
+  // against zero to select between true and false values.
+  if (NewRHS.Val == 0) {
+    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
+    CCCode = ISD::SETNE;
+  }
+
+  // Update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS,
+                                N->getOperand(2), N->getOperand(3),
+                                DAG.getCondCode(CCCode));
+}
+
+SDOperand DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
+  FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If ExpandSetCCOperands returned a scalar, use it.
+  if (NewRHS.Val == 0) {
+    assert(NewLHS.getValueType() == N->getValueType(0) &&
+           "Unexpected setcc expansion!");
+    return NewLHS;
+  }
+
+  // Otherwise, update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS,
+                                DAG.getCondCode(CCCode));
+}
+
+SDOperand DAGTypeLegalizer::ExpandFloatOp_FP_TO_UINT(SDNode *N) {
+  assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
+         "Unsupported FP_TO_UINT!");
+
+  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+  switch (N->getValueType(0).getSimpleVT()) {
+  default:
+    assert(false && "Unsupported FP_TO_UINT!");
+  case MVT::i32:
+    LC = RTLIB::FPTOUINT_PPCF128_I32;
+    break;
+  case MVT::i64:
+    LC = RTLIB::FPTOUINT_PPCF128_I64;
+    break;
+  case MVT::i128:
+    LC = RTLIB::FPTOUINT_PPCF128_I128;
+    break;
+  }
+
+  return MakeLibCall(LC, N->getValueType(0), &N->getOperand(0), 1, false);
+}
+
+SDOperand DAGTypeLegalizer::ExpandFloatOp_FP_TO_SINT(SDNode *N) {
+  assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
+         "Unsupported FP_TO_SINT!");
+
+  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+  switch (N->getValueType(0).getSimpleVT()) {
+  default:
+    assert(false && "Unsupported FP_TO_SINT!");
+  case MVT::i32:
+    LC = RTLIB::FPTOSINT_PPCF128_I32;
+  case MVT::i64:
+    LC = RTLIB::FPTOSINT_PPCF128_I64;
+    break;
+  case MVT::i128:
+    LC = RTLIB::FPTOSINT_PPCF128_I64;
+    break;
+  }
+
+  return MakeLibCall(LC, N->getValueType(0), &N->getOperand(0), 1, false);
+}
+
+SDOperand DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) {
+  assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
+         "Logic only correct for ppcf128!");
+  SDOperand Lo, Hi;
+  GetExpandedFloat(N->getOperand(0), Lo, Hi);
+  // Round it the rest of the way (e.g. to f32) if needed.
+  return DAG.getNode(ISD::FP_ROUND, N->getValueType(0), Hi, N->getOperand(1));
+}
+
+SDOperand DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
+  if (ISD::isNormalStore(N))
+    return ExpandOp_NormalStore(N, OpNo);
+
+  assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
+  assert(OpNo == 1 && "Can only expand the stored value so far");
+  StoreSDNode *ST = cast<StoreSDNode>(N);
+
+  SDOperand Chain = ST->getChain();
+  SDOperand Ptr = ST->getBasePtr();
+
+  MVT NVT = TLI.getTypeToTransformTo(ST->getValue().getValueType());
+  assert(NVT.isByteSized() && "Expanded type not byte sized!");
+  assert(ST->getMemoryVT().bitsLE(NVT) && "Float type not round?");
+
+  SDOperand Lo, Hi;
+  GetExpandedOp(ST->getValue(), Lo, Hi);
+
+  return DAG.getTruncStore(Chain, Lo, Ptr,
+                           ST->getSrcValue(), ST->getSrcValueOffset(),
+                           ST->getMemoryVT(),
+                           ST->isVolatile(), ST->getAlignment());
+}

Added: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp?rev=53163&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp (added)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp Sun Jul  6 15:45:41 2008
@@ -0,0 +1,2025 @@
+//===----- LegalizeIntegerTypes.cpp - Legalization of integer types -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements integer type expansion and promotion for LegalizeTypes.
+// Promotion is the act of changing a computation in an illegal type into a
+// computation in a larger type.  For example, implementing i8 arithmetic in an
+// i32 register (often needed on powerpc).
+// Expansion is the act of changing a computation in an illegal type into a
+// computation in two identical registers of a smaller type.  For example,
+// implementing i64 arithmetic in two i32 registers (often needed on 32-bit
+// targets).
+//
+//===----------------------------------------------------------------------===//
+
+#include "LegalizeTypes.h"
+#include "llvm/Constants.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+//  Integer Result Promotion
+//===----------------------------------------------------------------------===//
+
+/// PromoteIntegerResult - This method is called when a result of a node is
+/// found to be in need of promotion to a larger type.  At this point, the node
+/// may also have invalid operands or may have other results that need
+/// expansion, we just know that (at least) one result needs promotion.
+void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
+  DEBUG(cerr << "Promote integer result: "; N->dump(&DAG); cerr << "\n");
+  SDOperand Result = SDOperand();
+
+  // See if the target wants to custom expand this node.
+  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(ResNo)) ==
+      TargetLowering::Custom) {
+    // If the target wants to, allow it to lower this itself.
+    if (SDNode *P = TLI.ReplaceNodeResults(N, DAG)) {
+      // Everything that once used N now uses P.  We are guaranteed that the
+      // result value types of N and the result value types of P match.
+      ReplaceNodeWith(N, P);
+      return;
+    }
+  }
+
+  switch (N->getOpcode()) {
+  default:
+#ifndef NDEBUG
+    cerr << "PromoteIntegerResult #" << ResNo << ": ";
+    N->dump(&DAG); cerr << "\n";
+#endif
+    assert(0 && "Do not know how to promote this operator!");
+    abort();
+  case ISD::UNDEF:    Result = PromoteIntRes_UNDEF(N); break;
+  case ISD::Constant: Result = PromoteIntRes_Constant(N); break;
+
+  case ISD::TRUNCATE:    Result = PromoteIntRes_TRUNCATE(N); break;
+  case ISD::SIGN_EXTEND:
+  case ISD::ZERO_EXTEND:
+  case ISD::ANY_EXTEND:  Result = PromoteIntRes_INT_EXTEND(N); break;
+  case ISD::FP_ROUND:    Result = PromoteIntRes_FP_ROUND(N); break;
+  case ISD::FP_TO_SINT:
+  case ISD::FP_TO_UINT:  Result = PromoteIntRes_FP_TO_XINT(N); break;
+  case ISD::SETCC:    Result = PromoteIntRes_SETCC(N); break;
+  case ISD::LOAD:     Result = PromoteIntRes_LOAD(cast<LoadSDNode>(N)); break;
+  case ISD::BUILD_PAIR:  Result = PromoteIntRes_BUILD_PAIR(N); break;
+  case ISD::BIT_CONVERT: Result = PromoteIntRes_BIT_CONVERT(N); break;
+
+  case ISD::AND:
+  case ISD::OR:
+  case ISD::XOR:
+  case ISD::ADD:
+  case ISD::SUB:
+  case ISD::MUL:      Result = PromoteIntRes_SimpleIntBinOp(N); break;
+
+  case ISD::SDIV:
+  case ISD::SREM:     Result = PromoteIntRes_SDIV(N); break;
+
+  case ISD::UDIV:
+  case ISD::UREM:     Result = PromoteIntRes_UDIV(N); break;
+
+  case ISD::SHL:      Result = PromoteIntRes_SHL(N); break;
+  case ISD::SRA:      Result = PromoteIntRes_SRA(N); break;
+  case ISD::SRL:      Result = PromoteIntRes_SRL(N); break;
+
+  case ISD::SELECT:    Result = PromoteIntRes_SELECT(N); break;
+  case ISD::SELECT_CC: Result = PromoteIntRes_SELECT_CC(N); break;
+
+  case ISD::CTLZ:     Result = PromoteIntRes_CTLZ(N); break;
+  case ISD::CTPOP:    Result = PromoteIntRes_CTPOP(N); break;
+  case ISD::CTTZ:     Result = PromoteIntRes_CTTZ(N); break;
+
+  case ISD::EXTRACT_VECTOR_ELT:
+    Result = PromoteIntRes_EXTRACT_VECTOR_ELT(N);
+    break;
+
+  case ISD::VAARG : Result = PromoteIntRes_VAARG(N); break;
+  }
+
+  // If Result is null, the sub-method took care of registering the result.
+  if (Result.Val)
+    SetPromotedInteger(SDOperand(N, ResNo), Result);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_UNDEF(SDNode *N) {
+  return DAG.getNode(ISD::UNDEF, TLI.getTypeToTransformTo(N->getValueType(0)));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_Constant(SDNode *N) {
+  MVT VT = N->getValueType(0);
+  // Zero extend things like i1, sign extend everything else.  It shouldn't
+  // matter in theory which one we pick, but this tends to give better code?
+  unsigned Opc = VT.isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+  SDOperand Result = DAG.getNode(Opc, TLI.getTypeToTransformTo(VT),
+                                 SDOperand(N, 0));
+  assert(isa<ConstantSDNode>(Result) && "Didn't constant fold ext?");
+  return Result;
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) {
+  SDOperand Res;
+
+  switch (getTypeAction(N->getOperand(0).getValueType())) {
+  default: assert(0 && "Unknown type action!");
+  case Legal:
+  case ExpandInteger:
+    Res = N->getOperand(0);
+    break;
+  case PromoteInteger:
+    Res = GetPromotedInteger(N->getOperand(0));
+    break;
+  }
+
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  assert(Res.getValueType().getSizeInBits() >= NVT.getSizeInBits() &&
+         "Truncation doesn't make sense!");
+  if (Res.getValueType() == NVT)
+    return Res;
+
+  // Truncate to NVT instead of VT
+  return DAG.getNode(ISD::TRUNCATE, NVT, Res);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+
+  if (getTypeAction(N->getOperand(0).getValueType()) == PromoteInteger) {
+    SDOperand Res = GetPromotedInteger(N->getOperand(0));
+    assert(Res.getValueType().getSizeInBits() <= NVT.getSizeInBits() &&
+           "Extension doesn't make sense!");
+
+    // If the result and operand types are the same after promotion, simplify
+    // to an in-register extension.
+    if (NVT == Res.getValueType()) {
+      // The high bits are not guaranteed to be anything.  Insert an extend.
+      if (N->getOpcode() == ISD::SIGN_EXTEND)
+        return DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Res,
+                           DAG.getValueType(N->getOperand(0).getValueType()));
+      if (N->getOpcode() == ISD::ZERO_EXTEND)
+        return DAG.getZeroExtendInReg(Res, N->getOperand(0).getValueType());
+      assert(N->getOpcode() == ISD::ANY_EXTEND && "Unknown integer extension!");
+      return Res;
+    }
+  }
+
+  // Otherwise, just extend the original operand all the way to the larger type.
+  return DAG.getNode(N->getOpcode(), NVT, N->getOperand(0));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_FP_ROUND(SDNode *N) {
+  // NOTE: Assumes input is legal.
+  if (N->getConstantOperandVal(1) == 0)
+    return DAG.getNode(ISD::FP_ROUND_INREG, N->getOperand(0).getValueType(),
+                       N->getOperand(0), DAG.getValueType(N->getValueType(0)));
+  // If the precision discard isn't needed, just return the operand unrounded.
+  return N->getOperand(0);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
+  unsigned NewOpc = N->getOpcode();
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+
+  // If we're promoting a UINT to a larger size, check to see if the new node
+  // will be legal.  If it isn't, check to see if FP_TO_SINT is legal, since
+  // we can use that instead.  This allows us to generate better code for
+  // FP_TO_UINT for small destination sizes on targets where FP_TO_UINT is not
+  // legal, such as PowerPC.
+  if (N->getOpcode() == ISD::FP_TO_UINT) {
+    if (!TLI.isOperationLegal(ISD::FP_TO_UINT, NVT) &&
+        (TLI.isOperationLegal(ISD::FP_TO_SINT, NVT) ||
+         TLI.getOperationAction(ISD::FP_TO_SINT, NVT)==TargetLowering::Custom))
+      NewOpc = ISD::FP_TO_SINT;
+  }
+
+  return DAG.getNode(NewOpc, NVT, N->getOperand(0));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) {
+  assert(isTypeLegal(TLI.getSetCCResultType(N->getOperand(0)))
+         && "SetCC type is not legal??");
+  return DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(N->getOperand(0)),
+                     N->getOperand(0), N->getOperand(1), N->getOperand(2));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) {
+  assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  ISD::LoadExtType ExtType =
+    ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType();
+  SDOperand Res = DAG.getExtLoad(ExtType, NVT, N->getChain(), N->getBasePtr(),
+                                 N->getSrcValue(), N->getSrcValueOffset(),
+                                 N->getMemoryVT(), N->isVolatile(),
+                                 N->getAlignment());
+
+  // Legalized the chain result - switch anything that used the old chain to
+  // use the new one.
+  ReplaceValueWith(SDOperand(N, 1), Res.getValue(1));
+  return Res;
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) {
+  // The pair element type may be legal, or may not promote to the same type as
+  // the result, for example i14 = BUILD_PAIR (i7, i7).  Handle all cases.
+  return DAG.getNode(ISD::ANY_EXTEND,
+                     TLI.getTypeToTransformTo(N->getValueType(0)),
+                     JoinIntegers(N->getOperand(0), N->getOperand(1)));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
+  SDOperand InOp = N->getOperand(0);
+  MVT InVT = InOp.getValueType();
+  MVT NInVT = TLI.getTypeToTransformTo(InVT);
+  MVT OutVT = TLI.getTypeToTransformTo(N->getValueType(0));
+
+  switch (getTypeAction(InVT)) {
+  default:
+    assert(false && "Unknown type action!");
+    break;
+  case Legal:
+    break;
+  case PromoteInteger:
+    if (OutVT.getSizeInBits() == NInVT.getSizeInBits())
+      // The input promotes to the same size.  Convert the promoted value.
+      return DAG.getNode(ISD::BIT_CONVERT, OutVT, GetPromotedInteger(InOp));
+    break;
+  case SoftenFloat:
+    // Promote the integer operand by hand.
+    return DAG.getNode(ISD::ANY_EXTEND, OutVT, GetSoftenedFloat(InOp));
+  case ExpandInteger:
+  case ExpandFloat:
+    break;
+  case Scalarize:
+    // Convert the element to an integer and promote it by hand.
+    return DAG.getNode(ISD::ANY_EXTEND, OutVT,
+                       BitConvertToInteger(GetScalarizedVector(InOp)));
+  case Split:
+    // For example, i32 = BIT_CONVERT v2i16 on alpha.  Convert the split
+    // pieces of the input into integers and reassemble in the final type.
+    SDOperand Lo, Hi;
+    GetSplitVector(N->getOperand(0), Lo, Hi);
+    Lo = BitConvertToInteger(Lo);
+    Hi = BitConvertToInteger(Hi);
+
+    if (TLI.isBigEndian())
+      std::swap(Lo, Hi);
+
+    InOp = DAG.getNode(ISD::ANY_EXTEND,
+                       MVT::getIntegerVT(OutVT.getSizeInBits()),
+                       JoinIntegers(Lo, Hi));
+    return DAG.getNode(ISD::BIT_CONVERT, OutVT, InOp);
+  }
+
+  // Otherwise, lower the bit-convert to a store/load from the stack, then
+  // promote the load.
+  SDOperand Op = CreateStackStoreLoad(InOp, N->getValueType(0));
+  return PromoteIntRes_LOAD(cast<LoadSDNode>(Op.Val));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N) {
+  // The input may have strange things in the top bits of the registers, but
+  // these operations don't care.  They may have weird bits going out, but
+  // that too is okay if they are integer operations.
+  SDOperand LHS = GetPromotedInteger(N->getOperand(0));
+  SDOperand RHS = GetPromotedInteger(N->getOperand(1));
+  return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_SDIV(SDNode *N) {
+  // Sign extend the input.
+  SDOperand LHS = GetPromotedInteger(N->getOperand(0));
+  SDOperand RHS = GetPromotedInteger(N->getOperand(1));
+  MVT VT = N->getValueType(0);
+  LHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, LHS.getValueType(), LHS,
+                    DAG.getValueType(VT));
+  RHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, RHS.getValueType(), RHS,
+                    DAG.getValueType(VT));
+
+  return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_UDIV(SDNode *N) {
+  // Zero extend the input.
+  SDOperand LHS = GetPromotedInteger(N->getOperand(0));
+  SDOperand RHS = GetPromotedInteger(N->getOperand(1));
+  MVT VT = N->getValueType(0);
+  LHS = DAG.getZeroExtendInReg(LHS, VT);
+  RHS = DAG.getZeroExtendInReg(RHS, VT);
+
+  return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N) {
+  return DAG.getNode(ISD::SHL, TLI.getTypeToTransformTo(N->getValueType(0)),
+                     GetPromotedInteger(N->getOperand(0)), N->getOperand(1));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) {
+  // The input value must be properly sign extended.
+  MVT VT = N->getValueType(0);
+  MVT NVT = TLI.getTypeToTransformTo(VT);
+  SDOperand Res = GetPromotedInteger(N->getOperand(0));
+  Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Res, DAG.getValueType(VT));
+  return DAG.getNode(ISD::SRA, NVT, Res, N->getOperand(1));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) {
+  // The input value must be properly zero extended.
+  MVT VT = N->getValueType(0);
+  MVT NVT = TLI.getTypeToTransformTo(VT);
+  SDOperand Res = ZExtPromotedInteger(N->getOperand(0));
+  return DAG.getNode(ISD::SRL, NVT, Res, N->getOperand(1));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_SELECT(SDNode *N) {
+  SDOperand LHS = GetPromotedInteger(N->getOperand(1));
+  SDOperand RHS = GetPromotedInteger(N->getOperand(2));
+  return DAG.getNode(ISD::SELECT, LHS.getValueType(), N->getOperand(0),LHS,RHS);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_SELECT_CC(SDNode *N) {
+  SDOperand LHS = GetPromotedInteger(N->getOperand(2));
+  SDOperand RHS = GetPromotedInteger(N->getOperand(3));
+  return DAG.getNode(ISD::SELECT_CC, LHS.getValueType(), N->getOperand(0),
+                     N->getOperand(1), LHS, RHS, N->getOperand(4));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  MVT OVT = N->getValueType(0);
+  MVT NVT = Op.getValueType();
+  // Zero extend to the promoted type and do the count there.
+  Op = DAG.getNode(ISD::CTLZ, NVT, DAG.getZeroExtendInReg(Op, OVT));
+  // Subtract off the extra leading bits in the bigger type.
+  return DAG.getNode(ISD::SUB, NVT, Op,
+                     DAG.getConstant(NVT.getSizeInBits() -
+                                     OVT.getSizeInBits(), NVT));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_CTPOP(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  MVT OVT = N->getValueType(0);
+  MVT NVT = Op.getValueType();
+  // Zero extend to the promoted type and do the count there.
+  return DAG.getNode(ISD::CTPOP, NVT, DAG.getZeroExtendInReg(Op, OVT));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  MVT OVT = N->getValueType(0);
+  MVT NVT = Op.getValueType();
+  // The count is the same in the promoted type except if the original
+  // value was zero.  This can be handled by setting the bit just off
+  // the top of the original type.
+  Op = DAG.getNode(ISD::OR, NVT, Op,
+                   // FIXME: Do this using an APINT constant.
+                   DAG.getConstant(1UL << OVT.getSizeInBits(), NVT));
+  return DAG.getNode(ISD::CTTZ, NVT, Op);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N) {
+  MVT OldVT = N->getValueType(0);
+  SDOperand OldVec = N->getOperand(0);
+  unsigned OldElts = OldVec.getValueType().getVectorNumElements();
+
+  if (OldElts == 1) {
+    assert(!isTypeLegal(OldVec.getValueType()) &&
+           "Legal one-element vector of a type needing promotion!");
+    // It is tempting to follow GetScalarizedVector by a call to
+    // GetPromotedInteger, but this would be wrong because the
+    // scalarized value may not yet have been processed.
+    return DAG.getNode(ISD::ANY_EXTEND, TLI.getTypeToTransformTo(OldVT),
+                       GetScalarizedVector(OldVec));
+  }
+
+  // Convert to a vector half as long with an element type of twice the width,
+  // for example <4 x i16> -> <2 x i32>.
+  assert(!(OldElts & 1) && "Odd length vectors not supported!");
+  MVT NewVT = MVT::getIntegerVT(2 * OldVT.getSizeInBits());
+  assert(OldVT.isSimple() && NewVT.isSimple());
+
+  SDOperand NewVec = DAG.getNode(ISD::BIT_CONVERT,
+                                 MVT::getVectorVT(NewVT, OldElts / 2),
+                                 OldVec);
+
+  // Extract the element at OldIdx / 2 from the new vector.
+  SDOperand OldIdx = N->getOperand(1);
+  SDOperand NewIdx = DAG.getNode(ISD::SRL, OldIdx.getValueType(), OldIdx,
+                                 DAG.getConstant(1, TLI.getShiftAmountTy()));
+  SDOperand Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, NewVT, NewVec, NewIdx);
+
+  // Select the appropriate half of the element: Lo if OldIdx was even,
+  // Hi if it was odd.
+  SDOperand Lo = Elt;
+  SDOperand Hi = DAG.getNode(ISD::SRL, NewVT, Elt,
+                             DAG.getConstant(OldVT.getSizeInBits(),
+                                             TLI.getShiftAmountTy()));
+  if (TLI.isBigEndian())
+    std::swap(Lo, Hi);
+
+  SDOperand Odd = DAG.getNode(ISD::AND, OldIdx.getValueType(), OldIdx,
+                              DAG.getConstant(1, TLI.getShiftAmountTy()));
+  return DAG.getNode(ISD::SELECT, NewVT, Odd, Hi, Lo);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
+  SDOperand Chain = N->getOperand(0); // Get the chain.
+  SDOperand Ptr = N->getOperand(1); // Get the pointer.
+  MVT VT = N->getValueType(0);
+
+  const Value *V = cast<SrcValueSDNode>(N->getOperand(2))->getValue();
+  SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Chain, Ptr, V, 0);
+
+  // Increment the arg pointer, VAList, to the next vaarg
+  // FIXME: should the ABI size be used for the increment?  Think of
+  // x86 long double (10 bytes long, but aligned on 4 or 8 bytes) or
+  // integers of unusual size (such MVT::i1, which gives an increment
+  // of zero here!).
+  unsigned Increment = VT.getSizeInBits() / 8;
+  SDOperand Tmp = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList,
+                              DAG.getConstant(Increment, TLI.getPointerTy()));
+
+  // Store the incremented VAList to the pointer.
+  Tmp = DAG.getStore(VAList.getValue(1), Tmp, Ptr, V, 0);
+
+  // Load the actual argument out of the arg pointer VAList.
+  Tmp = DAG.getExtLoad(ISD::EXTLOAD, TLI.getTypeToTransformTo(VT), Tmp,
+                       VAList, NULL, 0, VT);
+
+  // Legalized the chain result - switch anything that used the old chain to
+  // use the new one.
+  ReplaceValueWith(SDOperand(N, 1), Tmp.getValue(1));
+  return Tmp;
+}
+
+//===----------------------------------------------------------------------===//
+//  Integer Operand Promotion
+//===----------------------------------------------------------------------===//
+
+/// PromoteIntegerOperand - This method is called when the specified operand of
+/// the specified node is found to need promotion.  At this point, all of the
+/// result types of the node are known to be legal, but other operands of the
+/// node may need promotion or expansion as well as the specified one.
+bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
+  DEBUG(cerr << "Promote integer operand: "; N->dump(&DAG); cerr << "\n");
+  SDOperand Res(0, 0);
+
+  if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType())
+      == TargetLowering::Custom)
+    Res = TLI.LowerOperation(SDOperand(N, OpNo), DAG);
+
+  if (Res.Val == 0) {
+    switch (N->getOpcode()) {
+      default:
+  #ifndef NDEBUG
+      cerr << "PromoteIntegerOperand Op #" << OpNo << ": ";
+      N->dump(&DAG); cerr << "\n";
+  #endif
+      assert(0 && "Do not know how to promote this operator's operand!");
+      abort();
+
+    case ISD::ANY_EXTEND:  Res = PromoteIntOp_ANY_EXTEND(N); break;
+    case ISD::ZERO_EXTEND: Res = PromoteIntOp_ZERO_EXTEND(N); break;
+    case ISD::SIGN_EXTEND: Res = PromoteIntOp_SIGN_EXTEND(N); break;
+    case ISD::TRUNCATE:    Res = PromoteIntOp_TRUNCATE(N); break;
+    case ISD::FP_EXTEND:   Res = PromoteIntOp_FP_EXTEND(N); break;
+    case ISD::FP_ROUND:    Res = PromoteIntOp_FP_ROUND(N); break;
+    case ISD::SINT_TO_FP:
+    case ISD::UINT_TO_FP:  Res = PromoteIntOp_INT_TO_FP(N); break;
+    case ISD::BUILD_PAIR:  Res = PromoteIntOp_BUILD_PAIR(N); break;
+
+    case ISD::BRCOND:      Res = PromoteIntOp_BRCOND(N, OpNo); break;
+    case ISD::BR_CC:       Res = PromoteIntOp_BR_CC(N, OpNo); break;
+    case ISD::SELECT:      Res = PromoteIntOp_SELECT(N, OpNo); break;
+    case ISD::SELECT_CC:   Res = PromoteIntOp_SELECT_CC(N, OpNo); break;
+    case ISD::SETCC:       Res = PromoteIntOp_SETCC(N, OpNo); break;
+
+    case ISD::STORE:       Res = PromoteIntOp_STORE(cast<StoreSDNode>(N),
+                                                      OpNo); break;
+
+    case ISD::BUILD_VECTOR: Res = PromoteIntOp_BUILD_VECTOR(N); break;
+    case ISD::INSERT_VECTOR_ELT:
+      Res = PromoteIntOp_INSERT_VECTOR_ELT(N, OpNo);
+      break;
+
+    case ISD::MEMBARRIER:  Res = PromoteIntOp_MEMBARRIER(N); break;
+    }
+  }
+
+  // If the result is null, the sub-method took care of registering results etc.
+  if (!Res.Val) return false;
+  // If the result is N, the sub-method updated N in place.
+  if (Res.Val == N) {
+    // Mark N as new and remark N and its operands.  This allows us to correctly
+    // revisit N if it needs another step of promotion and allows us to visit
+    // any new operands to N.
+    ReanalyzeNode(N);
+    return true;
+  }
+
+  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
+         "Invalid operand expansion");
+
+  ReplaceValueWith(SDOperand(N, 0), Res);
+  return false;
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_ANY_EXTEND(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  return DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  Op = DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op);
+  return DAG.getZeroExtendInReg(Op, N->getOperand(0).getValueType());
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  Op = DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op);
+  return DAG.getNode(ISD::SIGN_EXTEND_INREG, Op.getValueType(),
+                     Op, DAG.getValueType(N->getOperand(0).getValueType()));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_TRUNCATE(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  return DAG.getNode(ISD::TRUNCATE, N->getValueType(0), Op);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_FP_EXTEND(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  return DAG.getNode(ISD::FP_EXTEND, N->getValueType(0), Op);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_FP_ROUND(SDNode *N) {
+  SDOperand Op = GetPromotedInteger(N->getOperand(0));
+  return DAG.getNode(ISD::FP_ROUND, N->getValueType(0), Op,
+                     DAG.getIntPtrConstant(0));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_INT_TO_FP(SDNode *N) {
+  SDOperand In = GetPromotedInteger(N->getOperand(0));
+  MVT OpVT = N->getOperand(0).getValueType();
+  if (N->getOpcode() == ISD::UINT_TO_FP)
+    In = DAG.getZeroExtendInReg(In, OpVT);
+  else
+    In = DAG.getNode(ISD::SIGN_EXTEND_INREG, In.getValueType(),
+                     In, DAG.getValueType(OpVT));
+
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), In);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) {
+  // Since the result type is legal, the operands must promote to it.
+  MVT OVT = N->getOperand(0).getValueType();
+  SDOperand Lo = GetPromotedInteger(N->getOperand(0));
+  SDOperand Hi = GetPromotedInteger(N->getOperand(1));
+  assert(Lo.getValueType() == N->getValueType(0) && "Operand over promoted?");
+
+  Lo = DAG.getZeroExtendInReg(Lo, OVT);
+  Hi = DAG.getNode(ISD::SHL, N->getValueType(0), Hi,
+                   DAG.getConstant(OVT.getSizeInBits(),
+                                   TLI.getShiftAmountTy()));
+  return DAG.getNode(ISD::OR, N->getValueType(0), Lo, Hi);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
+  assert(OpNo == 0 && "Only know how to promote condition");
+  SDOperand Cond = GetPromotedInteger(N->getOperand(0));  // Promote condition.
+
+  // The top bits of the promoted condition are not necessarily zero, ensure
+  // that the value is properly zero extended.
+  unsigned BitWidth = Cond.getValueSizeInBits();
+  if (!DAG.MaskedValueIsZero(Cond,
+                             APInt::getHighBitsSet(BitWidth, BitWidth-1)))
+    Cond = DAG.getZeroExtendInReg(Cond, MVT::i1);
+
+  // The chain (Op#0) and basic block destination (Op#2) are always legal types.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), Cond, N->getOperand(1),
+                                N->getOperand(2));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
+  assert(OpNo == 1 && "only know how to promote condition");
+  SDOperand Cond = GetPromotedInteger(N->getOperand(1));  // Promote condition.
+
+  // The top bits of the promoted condition are not necessarily zero, ensure
+  // that the value is properly zero extended.
+  unsigned BitWidth = Cond.getValueSizeInBits();
+  if (!DAG.MaskedValueIsZero(Cond,
+                             APInt::getHighBitsSet(BitWidth, BitWidth-1)))
+    Cond = DAG.getZeroExtendInReg(Cond, MVT::i1);
+
+  // The chain (Op#0) and basic block destination (Op#2) are always legal types.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), Cond,
+                                N->getOperand(2));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo) {
+  assert(OpNo == 2 && "Don't know how to promote this operand!");
+
+  SDOperand LHS = N->getOperand(2);
+  SDOperand RHS = N->getOperand(3);
+  PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(1))->get());
+
+  // The chain (Op#0), CC (#1) and basic block destination (Op#4) are always
+  // legal types.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
+                                N->getOperand(1), LHS, RHS, N->getOperand(4));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) {
+  assert(OpNo == 0 && "Don't know how to promote this operand!");
+
+  SDOperand LHS = N->getOperand(0);
+  SDOperand RHS = N->getOperand(1);
+  PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(4))->get());
+
+  // The CC (#4) and the possible return values (#2 and #3) have legal types.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), LHS, RHS, N->getOperand(2),
+                                N->getOperand(3), N->getOperand(4));
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) {
+  assert(OpNo == 0 && "Don't know how to promote this operand!");
+
+  SDOperand LHS = N->getOperand(0);
+  SDOperand RHS = N->getOperand(1);
+  PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(2))->get());
+
+  // The CC (#2) is always legal.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), LHS, RHS, N->getOperand(2));
+}
+
+/// PromoteSetCCOperands - Promote the operands of a comparison.  This code is
+/// shared among BR_CC, SELECT_CC, and SETCC handlers.
+void DAGTypeLegalizer::PromoteSetCCOperands(SDOperand &NewLHS,SDOperand &NewRHS,
+                                            ISD::CondCode CCCode) {
+  MVT VT = NewLHS.getValueType();
+
+  // Get the promoted values.
+  NewLHS = GetPromotedInteger(NewLHS);
+  NewRHS = GetPromotedInteger(NewRHS);
+
+  // Otherwise, we have to insert explicit sign or zero extends.  Note
+  // that we could insert sign extends for ALL conditions, but zero extend
+  // is cheaper on many machines (an AND instead of two shifts), so prefer
+  // it.
+  switch (CCCode) {
+  default: assert(0 && "Unknown integer comparison!");
+  case ISD::SETEQ:
+  case ISD::SETNE:
+  case ISD::SETUGE:
+  case ISD::SETUGT:
+  case ISD::SETULE:
+  case ISD::SETULT:
+    // ALL of these operations will work if we either sign or zero extend
+    // the operands (including the unsigned comparisons!).  Zero extend is
+    // usually a simpler/cheaper operation, so prefer it.
+    NewLHS = DAG.getZeroExtendInReg(NewLHS, VT);
+    NewRHS = DAG.getZeroExtendInReg(NewRHS, VT);
+    break;
+  case ISD::SETGE:
+  case ISD::SETGT:
+  case ISD::SETLT:
+  case ISD::SETLE:
+    NewLHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, NewLHS.getValueType(), NewLHS,
+                         DAG.getValueType(VT));
+    NewRHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, NewRHS.getValueType(), NewRHS,
+                         DAG.getValueType(VT));
+    break;
+  }
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){
+  assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
+  SDOperand Ch = N->getChain(), Ptr = N->getBasePtr();
+  int SVOffset = N->getSrcValueOffset();
+  unsigned Alignment = N->getAlignment();
+  bool isVolatile = N->isVolatile();
+
+  SDOperand Val = GetPromotedInteger(N->getValue());  // Get promoted value.
+
+  assert(!N->isTruncatingStore() && "Cannot promote this store operand!");
+
+  // Truncate the value and store the result.
+  return DAG.getTruncStore(Ch, Val, Ptr, N->getSrcValue(),
+                           SVOffset, N->getMemoryVT(),
+                           isVolatile, Alignment);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
+  // The vector type is legal but the element type is not.  This implies
+  // that the vector is a power-of-two in length and that the element
+  // type does not have a strange size (eg: it is not i1).
+  MVT VecVT = N->getValueType(0);
+  unsigned NumElts = VecVT.getVectorNumElements();
+  assert(!(NumElts & 1) && "Legal vector of one illegal element?");
+
+  // Build a vector of half the length out of elements of twice the bitwidth.
+  // For example <4 x i16> -> <2 x i32>.
+  MVT OldVT = N->getOperand(0).getValueType();
+  MVT NewVT = MVT::getIntegerVT(2 * OldVT.getSizeInBits());
+  assert(OldVT.isSimple() && NewVT.isSimple());
+
+  std::vector<SDOperand> NewElts;
+  NewElts.reserve(NumElts/2);
+
+  for (unsigned i = 0; i < NumElts; i += 2) {
+    // Combine two successive elements into one promoted element.
+    SDOperand Lo = N->getOperand(i);
+    SDOperand Hi = N->getOperand(i+1);
+    if (TLI.isBigEndian())
+      std::swap(Lo, Hi);
+    NewElts.push_back(JoinIntegers(Lo, Hi));
+  }
+
+  SDOperand NewVec = DAG.getNode(ISD::BUILD_VECTOR,
+                                 MVT::getVectorVT(NewVT, NewElts.size()),
+                                 &NewElts[0], NewElts.size());
+
+  // Convert the new vector to the old vector type.
+  return DAG.getNode(ISD::BIT_CONVERT, VecVT, NewVec);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
+                                                             unsigned OpNo) {
+  if (OpNo == 1) {
+    // Promote the inserted value.  This is valid because the type does not
+    // have to match the vector element type.
+
+    // Check that any extra bits introduced will be truncated away.
+    assert(N->getOperand(1).getValueType().getSizeInBits() >=
+           N->getValueType(0).getVectorElementType().getSizeInBits() &&
+           "Type of inserted value narrower than vector element type!");
+    return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
+                                  GetPromotedInteger(N->getOperand(1)),
+                                  N->getOperand(2));
+  }
+
+  assert(OpNo == 2 && "Different operand and result vector types?");
+
+  // Promote the index.
+  SDOperand Idx = N->getOperand(2);
+  Idx = DAG.getZeroExtendInReg(GetPromotedInteger(Idx), Idx.getValueType());
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
+                                N->getOperand(1), Idx);
+}
+
+SDOperand DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) {
+  SDOperand NewOps[6];
+  NewOps[0] = N->getOperand(0);
+  for (unsigned i = 1; i < array_lengthof(NewOps); ++i) {
+    SDOperand Flag = GetPromotedInteger(N->getOperand(i));
+    NewOps[i] = DAG.getZeroExtendInReg(Flag, MVT::i1);
+  }
+  return DAG.UpdateNodeOperands(SDOperand (N, 0), NewOps,
+                                array_lengthof(NewOps));
+}
+
+
+//===----------------------------------------------------------------------===//
+//  Integer Result Expansion
+//===----------------------------------------------------------------------===//
+
+/// ExpandIntegerResult - This method is called when the specified result of the
+/// specified node is found to need expansion.  At this point, the node may also
+/// have invalid operands or may have other results that need promotion, we just
+/// know that (at least) one result needs expansion.
+void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
+  DEBUG(cerr << "Expand integer result: "; N->dump(&DAG); cerr << "\n");
+  SDOperand Lo, Hi;
+  Lo = Hi = SDOperand();
+
+  // See if the target wants to custom expand this node.
+  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(ResNo)) ==
+      TargetLowering::Custom) {
+    // If the target wants to, allow it to lower this itself.
+    if (SDNode *P = TLI.ReplaceNodeResults(N, DAG)) {
+      // Everything that once used N now uses P.  We are guaranteed that the
+      // result value types of N and the result value types of P match.
+      ReplaceNodeWith(N, P);
+      return;
+    }
+  }
+
+  switch (N->getOpcode()) {
+  default:
+#ifndef NDEBUG
+    cerr << "ExpandIntegerResult #" << ResNo << ": ";
+    N->dump(&DAG); cerr << "\n";
+#endif
+    assert(0 && "Do not know how to expand the result of this operator!");
+    abort();
+
+  case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, Lo, Hi); break;
+  case ISD::SELECT:       SplitRes_SELECT(N, Lo, Hi); break;
+  case ISD::SELECT_CC:    SplitRes_SELECT_CC(N, Lo, Hi); break;
+  case ISD::UNDEF:        SplitRes_UNDEF(N, Lo, Hi); break;
+
+  case ISD::BIT_CONVERT:        ExpandRes_BIT_CONVERT(N, Lo, Hi); break;
+  case ISD::BUILD_PAIR:         ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
+  case ISD::EXTRACT_ELEMENT:    ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
+  case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
+
+  case ISD::Constant:    ExpandIntRes_Constant(N, Lo, Hi); break;
+  case ISD::ANY_EXTEND:  ExpandIntRes_ANY_EXTEND(N, Lo, Hi); break;
+  case ISD::ZERO_EXTEND: ExpandIntRes_ZERO_EXTEND(N, Lo, Hi); break;
+  case ISD::SIGN_EXTEND: ExpandIntRes_SIGN_EXTEND(N, Lo, Hi); break;
+  case ISD::AssertZext:  ExpandIntRes_AssertZext(N, Lo, Hi); break;
+  case ISD::TRUNCATE:    ExpandIntRes_TRUNCATE(N, Lo, Hi); break;
+  case ISD::SIGN_EXTEND_INREG: ExpandIntRes_SIGN_EXTEND_INREG(N, Lo, Hi); break;
+  case ISD::FP_TO_SINT:  ExpandIntRes_FP_TO_SINT(N, Lo, Hi); break;
+  case ISD::FP_TO_UINT:  ExpandIntRes_FP_TO_UINT(N, Lo, Hi); break;
+  case ISD::LOAD:        ExpandIntRes_LOAD(cast<LoadSDNode>(N), Lo, Hi); break;
+
+  case ISD::AND:
+  case ISD::OR:
+  case ISD::XOR:         ExpandIntRes_Logical(N, Lo, Hi); break;
+  case ISD::BSWAP:       ExpandIntRes_BSWAP(N, Lo, Hi); break;
+  case ISD::ADD:
+  case ISD::SUB:         ExpandIntRes_ADDSUB(N, Lo, Hi); break;
+  case ISD::ADDC:
+  case ISD::SUBC:        ExpandIntRes_ADDSUBC(N, Lo, Hi); break;
+  case ISD::ADDE:
+  case ISD::SUBE:        ExpandIntRes_ADDSUBE(N, Lo, Hi); break;
+  case ISD::MUL:         ExpandIntRes_MUL(N, Lo, Hi); break;
+  case ISD::SDIV:        ExpandIntRes_SDIV(N, Lo, Hi); break;
+  case ISD::SREM:        ExpandIntRes_SREM(N, Lo, Hi); break;
+  case ISD::UDIV:        ExpandIntRes_UDIV(N, Lo, Hi); break;
+  case ISD::UREM:        ExpandIntRes_UREM(N, Lo, Hi); break;
+  case ISD::SHL:
+  case ISD::SRA:
+  case ISD::SRL:         ExpandIntRes_Shift(N, Lo, Hi); break;
+
+  case ISD::CTLZ:        ExpandIntRes_CTLZ(N, Lo, Hi); break;
+  case ISD::CTPOP:       ExpandIntRes_CTPOP(N, Lo, Hi); break;
+  case ISD::CTTZ:        ExpandIntRes_CTTZ(N, Lo, Hi); break;
+  }
+
+  // If Lo/Hi is null, the sub-method took care of registering results etc.
+  if (Lo.Val)
+    SetExpandedInteger(SDOperand(N, ResNo), Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N,
+                                             SDOperand &Lo, SDOperand &Hi) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  unsigned NBitWidth = NVT.getSizeInBits();
+  const APInt &Cst = cast<ConstantSDNode>(N)->getAPIntValue();
+  Lo = DAG.getConstant(APInt(Cst).trunc(NBitWidth), NVT);
+  Hi = DAG.getConstant(Cst.lshr(NBitWidth).trunc(NBitWidth), NVT);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_ANY_EXTEND(SDNode *N,
+                                               SDOperand &Lo, SDOperand &Hi) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  SDOperand Op = N->getOperand(0);
+  if (Op.getValueType().bitsLE(NVT)) {
+    // The low part is any extension of the input (which degenerates to a copy).
+    Lo = DAG.getNode(ISD::ANY_EXTEND, NVT, Op);
+    Hi = DAG.getNode(ISD::UNDEF, NVT);   // The high part is undefined.
+  } else {
+    // For example, extension of an i48 to an i64.  The operand type necessarily
+    // promotes to the result type, so will end up being expanded too.
+    assert(getTypeAction(Op.getValueType()) == PromoteInteger &&
+           "Only know how to promote this result!");
+    SDOperand Res = GetPromotedInteger(Op);
+    assert(Res.getValueType() == N->getValueType(0) &&
+           "Operand over promoted?");
+    // Split the promoted operand.  This will simplify when it is expanded.
+    SplitInteger(Res, Lo, Hi);
+  }
+}
+
+void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,
+                                                SDOperand &Lo, SDOperand &Hi) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  SDOperand Op = N->getOperand(0);
+  if (Op.getValueType().bitsLE(NVT)) {
+    // The low part is zero extension of the input (which degenerates to a copy).
+    Lo = DAG.getNode(ISD::ZERO_EXTEND, NVT, N->getOperand(0));
+    Hi = DAG.getConstant(0, NVT);   // The high part is just a zero.
+  } else {
+    // For example, extension of an i48 to an i64.  The operand type necessarily
+    // promotes to the result type, so will end up being expanded too.
+    assert(getTypeAction(Op.getValueType()) == PromoteInteger &&
+           "Only know how to promote this result!");
+    SDOperand Res = GetPromotedInteger(Op);
+    assert(Res.getValueType() == N->getValueType(0) &&
+           "Operand over promoted?");
+    // Split the promoted operand.  This will simplify when it is expanded.
+    SplitInteger(Res, Lo, Hi);
+    unsigned ExcessBits =
+      Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
+    Hi = DAG.getZeroExtendInReg(Hi, MVT::getIntegerVT(ExcessBits));
+  }
+}
+
+void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N,
+                                                SDOperand &Lo, SDOperand &Hi) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  SDOperand Op = N->getOperand(0);
+  if (Op.getValueType().bitsLE(NVT)) {
+    // The low part is sign extension of the input (which degenerates to a copy).
+    Lo = DAG.getNode(ISD::SIGN_EXTEND, NVT, N->getOperand(0));
+    // The high part is obtained by SRA'ing all but one of the bits of low part.
+    unsigned LoSize = NVT.getSizeInBits();
+    Hi = DAG.getNode(ISD::SRA, NVT, Lo,
+                     DAG.getConstant(LoSize-1, TLI.getShiftAmountTy()));
+  } else {
+    // For example, extension of an i48 to an i64.  The operand type necessarily
+    // promotes to the result type, so will end up being expanded too.
+    assert(getTypeAction(Op.getValueType()) == PromoteInteger &&
+           "Only know how to promote this result!");
+    SDOperand Res = GetPromotedInteger(Op);
+    assert(Res.getValueType() == N->getValueType(0) &&
+           "Operand over promoted?");
+    // Split the promoted operand.  This will simplify when it is expanded.
+    SplitInteger(Res, Lo, Hi);
+    unsigned ExcessBits =
+      Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
+    Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, Hi.getValueType(), Hi,
+                     DAG.getValueType(MVT::getIntegerVT(ExcessBits)));
+  }
+}
+
+void DAGTypeLegalizer::ExpandIntRes_AssertZext(SDNode *N,
+                                               SDOperand &Lo, SDOperand &Hi) {
+  GetExpandedInteger(N->getOperand(0), Lo, Hi);
+  MVT NVT = Lo.getValueType();
+  MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+  unsigned NVTBits = NVT.getSizeInBits();
+  unsigned EVTBits = EVT.getSizeInBits();
+
+  if (NVTBits < EVTBits) {
+    Hi = DAG.getNode(ISD::AssertZext, NVT, Hi,
+                     DAG.getValueType(MVT::getIntegerVT(EVTBits - NVTBits)));
+  } else {
+    Lo = DAG.getNode(ISD::AssertZext, NVT, Lo, DAG.getValueType(EVT));
+    // The high part must be zero, make it explicit.
+    Hi = DAG.getConstant(0, NVT);
+  }
+}
+
+void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N,
+                                             SDOperand &Lo, SDOperand &Hi) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  Lo = DAG.getNode(ISD::TRUNCATE, NVT, N->getOperand(0));
+  Hi = DAG.getNode(ISD::SRL, N->getOperand(0).getValueType(), N->getOperand(0),
+                   DAG.getConstant(NVT.getSizeInBits(),
+                                   TLI.getShiftAmountTy()));
+  Hi = DAG.getNode(ISD::TRUNCATE, NVT, Hi);
+}
+
+void DAGTypeLegalizer::
+ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
+  GetExpandedInteger(N->getOperand(0), Lo, Hi);
+  MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
+
+  if (EVT.bitsLE(Lo.getValueType())) {
+    // sext_inreg the low part if needed.
+    Lo = DAG.getNode(ISD::SIGN_EXTEND_INREG, Lo.getValueType(), Lo,
+                     N->getOperand(1));
+
+    // The high part gets the sign extension from the lo-part.  This handles
+    // things like sextinreg V:i64 from i8.
+    Hi = DAG.getNode(ISD::SRA, Hi.getValueType(), Lo,
+                     DAG.getConstant(Hi.getValueType().getSizeInBits()-1,
+                                     TLI.getShiftAmountTy()));
+  } else {
+    // For example, extension of an i48 to an i64.  Leave the low part alone,
+    // sext_inreg the high part.
+    unsigned ExcessBits =
+      EVT.getSizeInBits() - Lo.getValueType().getSizeInBits();
+    Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, Hi.getValueType(), Hi,
+                     DAG.getValueType(MVT::getIntegerVT(ExcessBits)));
+  }
+}
+
+void DAGTypeLegalizer::ExpandIntRes_FP_TO_SINT(SDNode *N, SDOperand &Lo,
+                                               SDOperand &Hi) {
+  MVT VT = N->getValueType(0);
+  SDOperand Op = N->getOperand(0);
+  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+  if (VT == MVT::i64) {
+    if (Op.getValueType() == MVT::f32)
+      LC = RTLIB::FPTOSINT_F32_I64;
+    else if (Op.getValueType() == MVT::f64)
+      LC = RTLIB::FPTOSINT_F64_I64;
+    else if (Op.getValueType() == MVT::f80)
+      LC = RTLIB::FPTOSINT_F80_I64;
+    else if (Op.getValueType() == MVT::ppcf128)
+      LC = RTLIB::FPTOSINT_PPCF128_I64;
+  } else if (VT == MVT::i128) {
+    if (Op.getValueType() == MVT::f32)
+      LC = RTLIB::FPTOSINT_F32_I128;
+    else if (Op.getValueType() == MVT::f64)
+      LC = RTLIB::FPTOSINT_F64_I128;
+    else if (Op.getValueType() == MVT::f80)
+      LC = RTLIB::FPTOSINT_F80_I128;
+    else if (Op.getValueType() == MVT::ppcf128)
+      LC = RTLIB::FPTOSINT_PPCF128_I128;
+  } else {
+    assert(0 && "Unexpected fp-to-sint conversion!");
+  }
+  SplitInteger(MakeLibCall(LC, VT, &Op, 1, true/*sign irrelevant*/), Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_FP_TO_UINT(SDNode *N, SDOperand &Lo,
+                                               SDOperand &Hi) {
+  MVT VT = N->getValueType(0);
+  SDOperand Op = N->getOperand(0);
+  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+  if (VT == MVT::i64) {
+    if (Op.getValueType() == MVT::f32)
+      LC = RTLIB::FPTOUINT_F32_I64;
+    else if (Op.getValueType() == MVT::f64)
+      LC = RTLIB::FPTOUINT_F64_I64;
+    else if (Op.getValueType() == MVT::f80)
+      LC = RTLIB::FPTOUINT_F80_I64;
+    else if (Op.getValueType() == MVT::ppcf128)
+      LC = RTLIB::FPTOUINT_PPCF128_I64;
+  } else if (VT == MVT::i128) {
+    if (Op.getValueType() == MVT::f32)
+      LC = RTLIB::FPTOUINT_F32_I128;
+    else if (Op.getValueType() == MVT::f64)
+      LC = RTLIB::FPTOUINT_F64_I128;
+    else if (Op.getValueType() == MVT::f80)
+      LC = RTLIB::FPTOUINT_F80_I128;
+    else if (Op.getValueType() == MVT::ppcf128)
+      LC = RTLIB::FPTOUINT_PPCF128_I128;
+  } else {
+    assert(0 && "Unexpected fp-to-uint conversion!");
+  }
+  SplitInteger(MakeLibCall(LC, VT, &Op, 1, false/*sign irrelevant*/), Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
+                                         SDOperand &Lo, SDOperand &Hi) {
+  if (ISD::isNormalLoad(N)) {
+    ExpandRes_NormalLoad(N, Lo, Hi);
+    return;
+  }
+
+  assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
+
+  MVT VT = N->getValueType(0);
+  MVT NVT = TLI.getTypeToTransformTo(VT);
+  SDOperand Ch  = N->getChain();    // Legalize the chain.
+  SDOperand Ptr = N->getBasePtr();  // Legalize the pointer.
+  ISD::LoadExtType ExtType = N->getExtensionType();
+  int SVOffset = N->getSrcValueOffset();
+  unsigned Alignment = N->getAlignment();
+  bool isVolatile = N->isVolatile();
+
+  assert(NVT.isByteSized() && "Expanded type not byte sized!");
+
+  if (N->getMemoryVT().bitsLE(NVT)) {
+    MVT EVT = N->getMemoryVT();
+
+    Lo = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(), SVOffset, EVT,
+                        isVolatile, Alignment);
+
+    // Remember the chain.
+    Ch = Lo.getValue(1);
+
+    if (ExtType == ISD::SEXTLOAD) {
+      // The high part is obtained by SRA'ing all but one of the bits of the
+      // lo part.
+      unsigned LoSize = Lo.getValueType().getSizeInBits();
+      Hi = DAG.getNode(ISD::SRA, NVT, Lo,
+                       DAG.getConstant(LoSize-1, TLI.getShiftAmountTy()));
+    } else if (ExtType == ISD::ZEXTLOAD) {
+      // The high part is just a zero.
+      Hi = DAG.getConstant(0, NVT);
+    } else {
+      assert(ExtType == ISD::EXTLOAD && "Unknown extload!");
+      // The high part is undefined.
+      Hi = DAG.getNode(ISD::UNDEF, NVT);
+    }
+  } else if (TLI.isLittleEndian()) {
+    // Little-endian - low bits are at low addresses.
+    Lo = DAG.getLoad(NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
+                     isVolatile, Alignment);
+
+    unsigned ExcessBits =
+      N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits();
+    MVT NEVT = MVT::getIntegerVT(ExcessBits);
+
+    // Increment the pointer to the other half.
+    unsigned IncrementSize = NVT.getSizeInBits()/8;
+    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
+                      DAG.getIntPtrConstant(IncrementSize));
+    Hi = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(),
+                        SVOffset+IncrementSize, NEVT,
+                        isVolatile, MinAlign(Alignment, IncrementSize));
+
+    // Build a factor node to remember that this load is independent of the
+    // other one.
+    Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
+                     Hi.getValue(1));
+  } else {
+    // Big-endian - high bits are at low addresses.  Favor aligned loads at
+    // the cost of some bit-fiddling.
+    MVT EVT = N->getMemoryVT();
+    unsigned EBytes = EVT.getStoreSizeInBits()/8;
+    unsigned IncrementSize = NVT.getSizeInBits()/8;
+    unsigned ExcessBits = (EBytes - IncrementSize)*8;
+
+    // Load both the high bits and maybe some of the low bits.
+    Hi = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
+                        MVT::getIntegerVT(EVT.getSizeInBits() - ExcessBits),
+                        isVolatile, Alignment);
+
+    // Increment the pointer to the other half.
+    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
+                      DAG.getIntPtrConstant(IncrementSize));
+    // Load the rest of the low bits.
+    Lo = DAG.getExtLoad(ISD::ZEXTLOAD, NVT, Ch, Ptr, N->getSrcValue(),
+                        SVOffset+IncrementSize,
+                        MVT::getIntegerVT(ExcessBits),
+                        isVolatile, MinAlign(Alignment, IncrementSize));
+
+    // Build a factor node to remember that this load is independent of the
+    // other one.
+    Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
+                     Hi.getValue(1));
+
+    if (ExcessBits < NVT.getSizeInBits()) {
+      // Transfer low bits from the bottom of Hi to the top of Lo.
+      Lo = DAG.getNode(ISD::OR, NVT, Lo,
+                       DAG.getNode(ISD::SHL, NVT, Hi,
+                                   DAG.getConstant(ExcessBits,
+                                                   TLI.getShiftAmountTy())));
+      // Move high bits to the right position in Hi.
+      Hi = DAG.getNode(ExtType == ISD::SEXTLOAD ? ISD::SRA : ISD::SRL, NVT, Hi,
+                       DAG.getConstant(NVT.getSizeInBits() - ExcessBits,
+                                       TLI.getShiftAmountTy()));
+    }
+  }
+
+  // Legalized the chain result - switch anything that used the old chain to
+  // use the new one.
+  ReplaceValueWith(SDOperand(N, 1), Ch);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_Logical(SDNode *N,
+                                            SDOperand &Lo, SDOperand &Hi) {
+  SDOperand LL, LH, RL, RH;
+  GetExpandedInteger(N->getOperand(0), LL, LH);
+  GetExpandedInteger(N->getOperand(1), RL, RH);
+  Lo = DAG.getNode(N->getOpcode(), LL.getValueType(), LL, RL);
+  Hi = DAG.getNode(N->getOpcode(), LL.getValueType(), LH, RH);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_BSWAP(SDNode *N,
+                                          SDOperand &Lo, SDOperand &Hi) {
+  GetExpandedInteger(N->getOperand(0), Hi, Lo);  // Note swapped operands.
+  Lo = DAG.getNode(ISD::BSWAP, Lo.getValueType(), Lo);
+  Hi = DAG.getNode(ISD::BSWAP, Hi.getValueType(), Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
+                                           SDOperand &Lo, SDOperand &Hi) {
+  // Expand the subcomponents.
+  SDOperand LHSL, LHSH, RHSL, RHSH;
+  GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
+  GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
+  SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
+  SDOperand LoOps[2] = { LHSL, RHSL };
+  SDOperand HiOps[3] = { LHSH, RHSH };
+
+  if (N->getOpcode() == ISD::ADD) {
+    Lo = DAG.getNode(ISD::ADDC, VTList, LoOps, 2);
+    HiOps[2] = Lo.getValue(1);
+    Hi = DAG.getNode(ISD::ADDE, VTList, HiOps, 3);
+  } else {
+    Lo = DAG.getNode(ISD::SUBC, VTList, LoOps, 2);
+    HiOps[2] = Lo.getValue(1);
+    Hi = DAG.getNode(ISD::SUBE, VTList, HiOps, 3);
+  }
+}
+
+void DAGTypeLegalizer::ExpandIntRes_ADDSUBC(SDNode *N,
+                                            SDOperand &Lo, SDOperand &Hi) {
+  // Expand the subcomponents.
+  SDOperand LHSL, LHSH, RHSL, RHSH;
+  GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
+  GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
+  SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
+  SDOperand LoOps[2] = { LHSL, RHSL };
+  SDOperand HiOps[3] = { LHSH, RHSH };
+
+  if (N->getOpcode() == ISD::ADDC) {
+    Lo = DAG.getNode(ISD::ADDC, VTList, LoOps, 2);
+    HiOps[2] = Lo.getValue(1);
+    Hi = DAG.getNode(ISD::ADDE, VTList, HiOps, 3);
+  } else {
+    Lo = DAG.getNode(ISD::SUBC, VTList, LoOps, 2);
+    HiOps[2] = Lo.getValue(1);
+    Hi = DAG.getNode(ISD::SUBE, VTList, HiOps, 3);
+  }
+
+  // Legalized the flag result - switch anything that used the old flag to
+  // use the new one.
+  ReplaceValueWith(SDOperand(N, 1), Hi.getValue(1));
+}
+
+void DAGTypeLegalizer::ExpandIntRes_ADDSUBE(SDNode *N,
+                                            SDOperand &Lo, SDOperand &Hi) {
+  // Expand the subcomponents.
+  SDOperand LHSL, LHSH, RHSL, RHSH;
+  GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
+  GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
+  SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
+  SDOperand LoOps[3] = { LHSL, RHSL, N->getOperand(2) };
+  SDOperand HiOps[3] = { LHSH, RHSH };
+
+  Lo = DAG.getNode(N->getOpcode(), VTList, LoOps, 3);
+  HiOps[2] = Lo.getValue(1);
+  Hi = DAG.getNode(N->getOpcode(), VTList, HiOps, 3);
+
+  // Legalized the flag result - switch anything that used the old flag to
+  // use the new one.
+  ReplaceValueWith(SDOperand(N, 1), Hi.getValue(1));
+}
+
+void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
+                                        SDOperand &Lo, SDOperand &Hi) {
+  MVT VT = N->getValueType(0);
+  MVT NVT = TLI.getTypeToTransformTo(VT);
+
+  bool HasMULHS = TLI.isOperationLegal(ISD::MULHS, NVT);
+  bool HasMULHU = TLI.isOperationLegal(ISD::MULHU, NVT);
+  bool HasSMUL_LOHI = TLI.isOperationLegal(ISD::SMUL_LOHI, NVT);
+  bool HasUMUL_LOHI = TLI.isOperationLegal(ISD::UMUL_LOHI, NVT);
+  if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
+    SDOperand LL, LH, RL, RH;
+    GetExpandedInteger(N->getOperand(0), LL, LH);
+    GetExpandedInteger(N->getOperand(1), RL, RH);
+    unsigned OuterBitSize = VT.getSizeInBits();
+    unsigned InnerBitSize = NVT.getSizeInBits();
+    unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
+    unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
+
+    APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize);
+    if (DAG.MaskedValueIsZero(N->getOperand(0), HighMask) &&
+        DAG.MaskedValueIsZero(N->getOperand(1), HighMask)) {
+      // The inputs are both zero-extended.
+      if (HasUMUL_LOHI) {
+        // We can emit a umul_lohi.
+        Lo = DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL);
+        Hi = SDOperand(Lo.Val, 1);
+        return;
+      }
+      if (HasMULHU) {
+        // We can emit a mulhu+mul.
+        Lo = DAG.getNode(ISD::MUL, NVT, LL, RL);
+        Hi = DAG.getNode(ISD::MULHU, NVT, LL, RL);
+        return;
+      }
+    }
+    if (LHSSB > InnerBitSize && RHSSB > InnerBitSize) {
+      // The input values are both sign-extended.
+      if (HasSMUL_LOHI) {
+        // We can emit a smul_lohi.
+        Lo = DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL);
+        Hi = SDOperand(Lo.Val, 1);
+        return;
+      }
+      if (HasMULHS) {
+        // We can emit a mulhs+mul.
+        Lo = DAG.getNode(ISD::MUL, NVT, LL, RL);
+        Hi = DAG.getNode(ISD::MULHS, NVT, LL, RL);
+        return;
+      }
+    }
+    if (HasUMUL_LOHI) {
+      // Lo,Hi = umul LHS, RHS.
+      SDOperand UMulLOHI = DAG.getNode(ISD::UMUL_LOHI,
+                                       DAG.getVTList(NVT, NVT), LL, RL);
+      Lo = UMulLOHI;
+      Hi = UMulLOHI.getValue(1);
+      RH = DAG.getNode(ISD::MUL, NVT, LL, RH);
+      LH = DAG.getNode(ISD::MUL, NVT, LH, RL);
+      Hi = DAG.getNode(ISD::ADD, NVT, Hi, RH);
+      Hi = DAG.getNode(ISD::ADD, NVT, Hi, LH);
+      return;
+    }
+    if (HasMULHU) {
+      Lo = DAG.getNode(ISD::MUL, NVT, LL, RL);
+      Hi = DAG.getNode(ISD::MULHU, NVT, LL, RL);
+      RH = DAG.getNode(ISD::MUL, NVT, LL, RH);
+      LH = DAG.getNode(ISD::MUL, NVT, LH, RL);
+      Hi = DAG.getNode(ISD::ADD, NVT, Hi, RH);
+      Hi = DAG.getNode(ISD::ADD, NVT, Hi, LH);
+      return;
+    }
+  }
+
+  // If nothing else, we can make a libcall.
+  RTLIB::Libcall LC;
+  switch (VT.getSimpleVT()) {
+  default:
+    assert(false && "Unsupported MUL!");
+  case MVT::i64:
+    LC = RTLIB::MUL_I64;
+    break;
+  }
+
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SplitInteger(MakeLibCall(LC, VT, Ops, 2, true/*sign irrelevant*/), Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
+                                         SDOperand &Lo, SDOperand &Hi) {
+  assert(N->getValueType(0) == MVT::i64 && "Unsupported sdiv!");
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SplitInteger(MakeLibCall(RTLIB::SDIV_I64, N->getValueType(0), Ops, 2, true),
+               Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_SREM(SDNode *N,
+                                         SDOperand &Lo, SDOperand &Hi) {
+  assert(N->getValueType(0) == MVT::i64 && "Unsupported srem!");
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SplitInteger(MakeLibCall(RTLIB::SREM_I64, N->getValueType(0), Ops, 2, true),
+               Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N,
+                                         SDOperand &Lo, SDOperand &Hi) {
+  assert(N->getValueType(0) == MVT::i64 && "Unsupported udiv!");
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SplitInteger(MakeLibCall(RTLIB::UDIV_I64, N->getValueType(0), Ops, 2, false),
+               Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_UREM(SDNode *N,
+                                         SDOperand &Lo, SDOperand &Hi) {
+  assert(N->getValueType(0) == MVT::i64 && "Unsupported urem!");
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SplitInteger(MakeLibCall(RTLIB::UREM_I64, N->getValueType(0), Ops, 2, false),
+               Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
+                                          SDOperand &Lo, SDOperand &Hi) {
+  MVT VT = N->getValueType(0);
+
+  // If we can emit an efficient shift operation, do so now.  Check to see if
+  // the RHS is a constant.
+  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+    return ExpandShiftByConstant(N, CN->getValue(), Lo, Hi);
+
+  // If we can determine that the high bit of the shift is zero or one, even if
+  // the low bits are variable, emit this shift in an optimized form.
+  if (ExpandShiftWithKnownAmountBit(N, Lo, Hi))
+    return;
+
+  // If this target supports shift_PARTS, use it.  First, map to the _PARTS opc.
+  unsigned PartsOpc;
+  if (N->getOpcode() == ISD::SHL) {
+    PartsOpc = ISD::SHL_PARTS;
+  } else if (N->getOpcode() == ISD::SRL) {
+    PartsOpc = ISD::SRL_PARTS;
+  } else {
+    assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
+    PartsOpc = ISD::SRA_PARTS;
+  }
+
+  // Next check to see if the target supports this SHL_PARTS operation or if it
+  // will custom expand it.
+  MVT NVT = TLI.getTypeToTransformTo(VT);
+  TargetLowering::LegalizeAction Action = TLI.getOperationAction(PartsOpc, NVT);
+  if ((Action == TargetLowering::Legal && TLI.isTypeLegal(NVT)) ||
+      Action == TargetLowering::Custom) {
+    // Expand the subcomponents.
+    SDOperand LHSL, LHSH;
+    GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
+
+    SDOperand Ops[] = { LHSL, LHSH, N->getOperand(1) };
+    MVT VT = LHSL.getValueType();
+    Lo = DAG.getNode(PartsOpc, DAG.getNodeValueTypes(VT, VT), 2, Ops, 3);
+    Hi = Lo.getValue(1);
+    return;
+  }
+
+  // Otherwise, emit a libcall.
+  assert(VT == MVT::i64 && "Unsupported shift!");
+
+  RTLIB::Libcall LC;
+  bool isSigned;
+  if (N->getOpcode() == ISD::SHL) {
+    LC = RTLIB::SHL_I64;
+    isSigned = false; /*sign irrelevant*/
+  } else if (N->getOpcode() == ISD::SRL) {
+    LC = RTLIB::SRL_I64;
+    isSigned = false;
+  } else {
+    assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
+    LC = RTLIB::SRA_I64;
+    isSigned = true;
+  }
+
+  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
+  SplitInteger(MakeLibCall(LC, VT, Ops, 2, isSigned), Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N,
+                                         SDOperand &Lo, SDOperand &Hi) {
+  // ctlz (HiLo) -> Hi != 0 ? ctlz(Hi) : (ctlz(Lo)+32)
+  GetExpandedInteger(N->getOperand(0), Lo, Hi);
+  MVT NVT = Lo.getValueType();
+
+  SDOperand HiNotZero = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi,
+                                     DAG.getConstant(0, NVT), ISD::SETNE);
+
+  SDOperand LoLZ = DAG.getNode(ISD::CTLZ, NVT, Lo);
+  SDOperand HiLZ = DAG.getNode(ISD::CTLZ, NVT, Hi);
+
+  Lo = DAG.getNode(ISD::SELECT, NVT, HiNotZero, HiLZ,
+                   DAG.getNode(ISD::ADD, NVT, LoLZ,
+                               DAG.getConstant(NVT.getSizeInBits(), NVT)));
+  Hi = DAG.getConstant(0, NVT);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_CTPOP(SDNode *N,
+                                          SDOperand &Lo, SDOperand &Hi) {
+  // ctpop(HiLo) -> ctpop(Hi)+ctpop(Lo)
+  GetExpandedInteger(N->getOperand(0), Lo, Hi);
+  MVT NVT = Lo.getValueType();
+  Lo = DAG.getNode(ISD::ADD, NVT, DAG.getNode(ISD::CTPOP, NVT, Lo),
+                   DAG.getNode(ISD::CTPOP, NVT, Hi));
+  Hi = DAG.getConstant(0, NVT);
+}
+
+void DAGTypeLegalizer::ExpandIntRes_CTTZ(SDNode *N,
+                                         SDOperand &Lo, SDOperand &Hi) {
+  // cttz (HiLo) -> Lo != 0 ? cttz(Lo) : (cttz(Hi)+32)
+  GetExpandedInteger(N->getOperand(0), Lo, Hi);
+  MVT NVT = Lo.getValueType();
+
+  SDOperand LoNotZero = DAG.getSetCC(TLI.getSetCCResultType(Lo), Lo,
+                                     DAG.getConstant(0, NVT), ISD::SETNE);
+
+  SDOperand LoLZ = DAG.getNode(ISD::CTTZ, NVT, Lo);
+  SDOperand HiLZ = DAG.getNode(ISD::CTTZ, NVT, Hi);
+
+  Lo = DAG.getNode(ISD::SELECT, NVT, LoNotZero, LoLZ,
+                   DAG.getNode(ISD::ADD, NVT, HiLZ,
+                               DAG.getConstant(NVT.getSizeInBits(), NVT)));
+  Hi = DAG.getConstant(0, NVT);
+}
+
+/// ExpandShiftByConstant - N is a shift by a value that needs to be expanded,
+/// and the shift amount is a constant 'Amt'.  Expand the operation.
+void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
+                                             SDOperand &Lo, SDOperand &Hi) {
+  // Expand the incoming operand to be shifted, so that we have its parts
+  SDOperand InL, InH;
+  GetExpandedInteger(N->getOperand(0), InL, InH);
+
+  MVT NVT = InL.getValueType();
+  unsigned VTBits = N->getValueType(0).getSizeInBits();
+  unsigned NVTBits = NVT.getSizeInBits();
+  MVT ShTy = N->getOperand(1).getValueType();
+
+  if (N->getOpcode() == ISD::SHL) {
+    if (Amt > VTBits) {
+      Lo = Hi = DAG.getConstant(0, NVT);
+    } else if (Amt > NVTBits) {
+      Lo = DAG.getConstant(0, NVT);
+      Hi = DAG.getNode(ISD::SHL, NVT, InL, DAG.getConstant(Amt-NVTBits,ShTy));
+    } else if (Amt == NVTBits) {
+      Lo = DAG.getConstant(0, NVT);
+      Hi = InL;
+    } else {
+      Lo = DAG.getNode(ISD::SHL, NVT, InL, DAG.getConstant(Amt, ShTy));
+      Hi = DAG.getNode(ISD::OR, NVT,
+                       DAG.getNode(ISD::SHL, NVT, InH,
+                                   DAG.getConstant(Amt, ShTy)),
+                       DAG.getNode(ISD::SRL, NVT, InL,
+                                   DAG.getConstant(NVTBits-Amt, ShTy)));
+    }
+    return;
+  }
+
+  if (N->getOpcode() == ISD::SRL) {
+    if (Amt > VTBits) {
+      Lo = DAG.getConstant(0, NVT);
+      Hi = DAG.getConstant(0, NVT);
+    } else if (Amt > NVTBits) {
+      Lo = DAG.getNode(ISD::SRL, NVT, InH, DAG.getConstant(Amt-NVTBits,ShTy));
+      Hi = DAG.getConstant(0, NVT);
+    } else if (Amt == NVTBits) {
+      Lo = InH;
+      Hi = DAG.getConstant(0, NVT);
+    } else {
+      Lo = DAG.getNode(ISD::OR, NVT,
+                       DAG.getNode(ISD::SRL, NVT, InL,
+                                   DAG.getConstant(Amt, ShTy)),
+                       DAG.getNode(ISD::SHL, NVT, InH,
+                                   DAG.getConstant(NVTBits-Amt, ShTy)));
+      Hi = DAG.getNode(ISD::SRL, NVT, InH, DAG.getConstant(Amt, ShTy));
+    }
+    return;
+  }
+
+  assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
+  if (Amt > VTBits) {
+    Hi = Lo = DAG.getNode(ISD::SRA, NVT, InH,
+                          DAG.getConstant(NVTBits-1, ShTy));
+  } else if (Amt > NVTBits) {
+    Lo = DAG.getNode(ISD::SRA, NVT, InH,
+                     DAG.getConstant(Amt-NVTBits, ShTy));
+    Hi = DAG.getNode(ISD::SRA, NVT, InH,
+                     DAG.getConstant(NVTBits-1, ShTy));
+  } else if (Amt == NVTBits) {
+    Lo = InH;
+    Hi = DAG.getNode(ISD::SRA, NVT, InH,
+                     DAG.getConstant(NVTBits-1, ShTy));
+  } else {
+    Lo = DAG.getNode(ISD::OR, NVT,
+                     DAG.getNode(ISD::SRL, NVT, InL,
+                                 DAG.getConstant(Amt, ShTy)),
+                     DAG.getNode(ISD::SHL, NVT, InH,
+                                 DAG.getConstant(NVTBits-Amt, ShTy)));
+    Hi = DAG.getNode(ISD::SRA, NVT, InH, DAG.getConstant(Amt, ShTy));
+  }
+}
+
+/// ExpandShiftWithKnownAmountBit - Try to determine whether we can simplify
+/// this shift based on knowledge of the high bit of the shift amount.  If we
+/// can tell this, we know that it is >= 32 or < 32, without knowing the actual
+/// shift amount.
+bool DAGTypeLegalizer::
+ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
+  SDOperand Amt = N->getOperand(1);
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  MVT ShTy = Amt.getValueType();
+  unsigned ShBits = ShTy.getSizeInBits();
+  unsigned NVTBits = NVT.getSizeInBits();
+  assert(isPowerOf2_32(NVTBits) &&
+         "Expanded integer type size not a power of two!");
+
+  APInt HighBitMask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
+  APInt KnownZero, KnownOne;
+  DAG.ComputeMaskedBits(N->getOperand(1), HighBitMask, KnownZero, KnownOne);
+
+  // If we don't know anything about the high bits, exit.
+  if (((KnownZero|KnownOne) & HighBitMask) == 0)
+    return false;
+
+  // Get the incoming operand to be shifted.
+  SDOperand InL, InH;
+  GetExpandedInteger(N->getOperand(0), InL, InH);
+
+  // If we know that any of the high bits of the shift amount are one, then we
+  // can do this as a couple of simple shifts.
+  if (KnownOne.intersects(HighBitMask)) {
+    // Mask out the high bit, which we know is set.
+    Amt = DAG.getNode(ISD::AND, ShTy, Amt,
+                      DAG.getConstant(~HighBitMask, ShTy));
+
+    switch (N->getOpcode()) {
+    default: assert(0 && "Unknown shift");
+    case ISD::SHL:
+      Lo = DAG.getConstant(0, NVT);              // Low part is zero.
+      Hi = DAG.getNode(ISD::SHL, NVT, InL, Amt); // High part from Lo part.
+      return true;
+    case ISD::SRL:
+      Hi = DAG.getConstant(0, NVT);              // Hi part is zero.
+      Lo = DAG.getNode(ISD::SRL, NVT, InH, Amt); // Lo part from Hi part.
+      return true;
+    case ISD::SRA:
+      Hi = DAG.getNode(ISD::SRA, NVT, InH,       // Sign extend high part.
+                       DAG.getConstant(NVTBits-1, ShTy));
+      Lo = DAG.getNode(ISD::SRA, NVT, InH, Amt); // Lo part from Hi part.
+      return true;
+    }
+  }
+
+  // If we know that all of the high bits of the shift amount are zero, then we
+  // can do this as a couple of simple shifts.
+  if ((KnownZero & HighBitMask) == HighBitMask) {
+    // Compute 32-amt.
+    SDOperand Amt2 = DAG.getNode(ISD::SUB, ShTy,
+                                 DAG.getConstant(NVTBits, ShTy),
+                                 Amt);
+    unsigned Op1, Op2;
+    switch (N->getOpcode()) {
+    default: assert(0 && "Unknown shift");
+    case ISD::SHL:  Op1 = ISD::SHL; Op2 = ISD::SRL; break;
+    case ISD::SRL:
+    case ISD::SRA:  Op1 = ISD::SRL; Op2 = ISD::SHL; break;
+    }
+
+    Lo = DAG.getNode(N->getOpcode(), NVT, InL, Amt);
+    Hi = DAG.getNode(ISD::OR, NVT,
+                     DAG.getNode(Op1, NVT, InH, Amt),
+                     DAG.getNode(Op2, NVT, InL, Amt2));
+    return true;
+  }
+
+  return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+//  Integer Operand Expansion
+//===----------------------------------------------------------------------===//
+
+/// ExpandIntegerOperand - This method is called when the specified operand of
+/// the specified node is found to need expansion.  At this point, all of the
+/// result types of the node are known to be legal, but other operands of the
+/// node may need promotion or expansion as well as the specified one.
+bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
+  DEBUG(cerr << "Expand integer operand: "; N->dump(&DAG); cerr << "\n");
+  SDOperand Res(0, 0);
+
+  if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType())
+      == TargetLowering::Custom)
+    Res = TLI.LowerOperation(SDOperand(N, OpNo), DAG);
+
+  if (Res.Val == 0) {
+    switch (N->getOpcode()) {
+    default:
+  #ifndef NDEBUG
+      cerr << "ExpandIntegerOperand Op #" << OpNo << ": ";
+      N->dump(&DAG); cerr << "\n";
+  #endif
+      assert(0 && "Do not know how to expand this operator's operand!");
+      abort();
+
+    case ISD::BUILD_VECTOR:    Res = ExpandOp_BUILD_VECTOR(N); break;
+    case ISD::BIT_CONVERT:     Res = ExpandOp_BIT_CONVERT(N); break;
+    case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
+
+    case ISD::TRUNCATE:        Res = ExpandIntOp_TRUNCATE(N); break;
+
+    case ISD::SINT_TO_FP:
+      Res = ExpandIntOp_SINT_TO_FP(N->getOperand(0), N->getValueType(0));
+      break;
+    case ISD::UINT_TO_FP:
+      Res = ExpandIntOp_UINT_TO_FP(N->getOperand(0), N->getValueType(0));
+      break;
+
+    case ISD::BR_CC:     Res = ExpandIntOp_BR_CC(N); break;
+    case ISD::SELECT_CC: Res = ExpandIntOp_SELECT_CC(N); break;
+    case ISD::SETCC:     Res = ExpandIntOp_SETCC(N); break;
+
+    case ISD::STORE:
+      Res = ExpandIntOp_STORE(cast<StoreSDNode>(N), OpNo);
+      break;
+    }
+  }
+
+  // If the result is null, the sub-method took care of registering results etc.
+  if (!Res.Val) return false;
+  // If the result is N, the sub-method updated N in place.  Check to see if any
+  // operands are new, and if so, mark them.
+  if (Res.Val == N) {
+    // Mark N as new and remark N and its operands.  This allows us to correctly
+    // revisit N if it needs another step of expansion and allows us to visit
+    // any new operands to N.
+    ReanalyzeNode(N);
+    return true;
+  }
+
+  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
+         "Invalid operand expansion");
+
+  ReplaceValueWith(SDOperand(N, 0), Res);
+  return false;
+}
+
+SDOperand DAGTypeLegalizer::ExpandIntOp_TRUNCATE(SDNode *N) {
+  SDOperand InL, InH;
+  GetExpandedInteger(N->getOperand(0), InL, InH);
+  // Just truncate the low part of the source.
+  return DAG.getNode(ISD::TRUNCATE, N->getValueType(0), InL);
+}
+
+SDOperand DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDOperand Source,
+                                                     MVT DestTy) {
+  // We know the destination is legal, but that the input needs to be expanded.
+  MVT SourceVT = Source.getValueType();
+
+  // Check to see if the target has a custom way to lower this.  If so, use it.
+  switch (TLI.getOperationAction(ISD::SINT_TO_FP, SourceVT)) {
+  default: assert(0 && "This action not implemented for this operation!");
+  case TargetLowering::Legal:
+  case TargetLowering::Expand:
+    break;   // This case is handled below.
+  case TargetLowering::Custom:
+    SDOperand NV = TLI.LowerOperation(DAG.getNode(ISD::SINT_TO_FP, DestTy,
+                                                  Source), DAG);
+    if (NV.Val) return NV;
+    break;   // The target lowered this.
+  }
+
+  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+  if (SourceVT == MVT::i64) {
+    if (DestTy == MVT::f32)
+      LC = RTLIB::SINTTOFP_I64_F32;
+    else {
+      assert(DestTy == MVT::f64 && "Unknown fp value type!");
+      LC = RTLIB::SINTTOFP_I64_F64;
+    }
+  } else if (SourceVT == MVT::i128) {
+    if (DestTy == MVT::f32)
+      LC = RTLIB::SINTTOFP_I128_F32;
+    else if (DestTy == MVT::f64)
+      LC = RTLIB::SINTTOFP_I128_F64;
+    else if (DestTy == MVT::f80)
+      LC = RTLIB::SINTTOFP_I128_F80;
+    else {
+      assert(DestTy == MVT::ppcf128 && "Unknown fp value type!");
+      LC = RTLIB::SINTTOFP_I128_PPCF128;
+    }
+  } else {
+    assert(0 && "Unknown int value type!");
+  }
+
+  assert(LC != RTLIB::UNKNOWN_LIBCALL &&
+         "Don't know how to expand this SINT_TO_FP!");
+  return MakeLibCall(LC, DestTy, &Source, 1, true);
+}
+
+SDOperand DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDOperand Source,
+                                                     MVT DestTy) {
+  // We know the destination is legal, but that the input needs to be expanded.
+  assert(getTypeAction(Source.getValueType()) == ExpandInteger &&
+         "This is not an expansion!");
+
+  // If this is unsigned, and not supported, first perform the conversion to
+  // signed, then adjust the result if the sign bit is set.
+  SDOperand SignedConv = ExpandIntOp_SINT_TO_FP(Source, DestTy);
+
+  // The 64-bit value loaded will be incorrectly if the 'sign bit' of the
+  // incoming integer is set.  To handle this, we dynamically test to see if
+  // it is set, and, if so, add a fudge factor.
+  SDOperand Lo, Hi;
+  GetExpandedInteger(Source, Lo, Hi);
+
+  SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi,
+                                   DAG.getConstant(0, Hi.getValueType()),
+                                   ISD::SETLT);
+  SDOperand Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
+  SDOperand CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(),
+                                    SignSet, Four, Zero);
+  uint64_t FF = 0x5f800000ULL;
+  if (TLI.isLittleEndian()) FF <<= 32;
+  Constant *FudgeFactor = ConstantInt::get((Type*)Type::Int64Ty, FF);
+
+  SDOperand CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy());
+  CPIdx = DAG.getNode(ISD::ADD, TLI.getPointerTy(), CPIdx, CstOffset);
+  SDOperand FudgeInReg;
+  if (DestTy == MVT::f32)
+    FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx, NULL, 0);
+  else if (DestTy.bitsGT(MVT::f32))
+    // FIXME: Avoid the extend by construction the right constantpool?
+    FudgeInReg = DAG.getExtLoad(ISD::EXTLOAD, DestTy, DAG.getEntryNode(),
+                                CPIdx, NULL, 0, MVT::f32);
+  else
+    assert(0 && "Unexpected conversion");
+
+  return DAG.getNode(ISD::FADD, DestTy, SignedConv, FudgeInReg);
+}
+
+SDOperand DAGTypeLegalizer::ExpandIntOp_BR_CC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(2), NewRHS = N->getOperand(3);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get();
+  IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If ExpandSetCCOperands returned a scalar, we need to compare the result
+  // against zero to select between true and false values.
+  if (NewRHS.Val == 0) {
+    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
+    CCCode = ISD::SETNE;
+  }
+
+  // Update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
+                                DAG.getCondCode(CCCode), NewLHS, NewRHS,
+                                N->getOperand(4));
+}
+
+SDOperand DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
+  IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If ExpandSetCCOperands returned a scalar, we need to compare the result
+  // against zero to select between true and false values.
+  if (NewRHS.Val == 0) {
+    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
+    CCCode = ISD::SETNE;
+  }
+
+  // Update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS,
+                                N->getOperand(2), N->getOperand(3),
+                                DAG.getCondCode(CCCode));
+}
+
+SDOperand DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) {
+  SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
+  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
+  IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode);
+
+  // If ExpandSetCCOperands returned a scalar, use it.
+  if (NewRHS.Val == 0) {
+    assert(NewLHS.getValueType() == N->getValueType(0) &&
+           "Unexpected setcc expansion!");
+    return NewLHS;
+  }
+
+  // Otherwise, update N to have the operands specified.
+  return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS,
+                                DAG.getCondCode(CCCode));
+}
+
+/// IntegerExpandSetCCOperands - Expand the operands of a comparison.  This code
+/// is shared among BR_CC, SELECT_CC, and SETCC handlers.
+void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDOperand &NewLHS,
+                                                  SDOperand &NewRHS,
+                                                  ISD::CondCode &CCCode) {
+  SDOperand LHSLo, LHSHi, RHSLo, RHSHi;
+  GetExpandedInteger(NewLHS, LHSLo, LHSHi);
+  GetExpandedInteger(NewRHS, RHSLo, RHSHi);
+
+  MVT VT = NewLHS.getValueType();
+
+  if (CCCode == ISD::SETEQ || CCCode == ISD::SETNE) {
+    if (RHSLo == RHSHi) {
+      if (ConstantSDNode *RHSCST = dyn_cast<ConstantSDNode>(RHSLo)) {
+        if (RHSCST->isAllOnesValue()) {
+          // Equality comparison to -1.
+          NewLHS = DAG.getNode(ISD::AND, LHSLo.getValueType(), LHSLo, LHSHi);
+          NewRHS = RHSLo;
+          return;
+        }
+      }
+    }
+
+    NewLHS = DAG.getNode(ISD::XOR, LHSLo.getValueType(), LHSLo, RHSLo);
+    NewRHS = DAG.getNode(ISD::XOR, LHSLo.getValueType(), LHSHi, RHSHi);
+    NewLHS = DAG.getNode(ISD::OR, NewLHS.getValueType(), NewLHS, NewRHS);
+    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
+    return;
+  }
+
+  // If this is a comparison of the sign bit, just look at the top part.
+  // X > -1,  x < 0
+  if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(NewRHS))
+    if ((CCCode == ISD::SETLT && CST->isNullValue()) ||     // X < 0
+        (CCCode == ISD::SETGT && CST->isAllOnesValue())) {  // X > -1
+      NewLHS = LHSHi;
+      NewRHS = RHSHi;
+      return;
+    }
+
+  // FIXME: This generated code sucks.
+  ISD::CondCode LowCC;
+  switch (CCCode) {
+  default: assert(0 && "Unknown integer setcc!");
+  case ISD::SETLT:
+  case ISD::SETULT: LowCC = ISD::SETULT; break;
+  case ISD::SETGT:
+  case ISD::SETUGT: LowCC = ISD::SETUGT; break;
+  case ISD::SETLE:
+  case ISD::SETULE: LowCC = ISD::SETULE; break;
+  case ISD::SETGE:
+  case ISD::SETUGE: LowCC = ISD::SETUGE; break;
+  }
+
+  // Tmp1 = lo(op1) < lo(op2)   // Always unsigned comparison
+  // Tmp2 = hi(op1) < hi(op2)   // Signedness depends on operands
+  // dest = hi(op1) == hi(op2) ? Tmp1 : Tmp2;
+
+  // NOTE: on targets without efficient SELECT of bools, we can always use
+  // this identity: (B1 ? B2 : B3) --> (B1 & B2)|(!B1&B3)
+  TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, false, true, NULL);
+  SDOperand Tmp1, Tmp2;
+  Tmp1 = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSLo), LHSLo, RHSLo, LowCC,
+                           false, DagCombineInfo);
+  if (!Tmp1.Val)
+    Tmp1 = DAG.getSetCC(TLI.getSetCCResultType(LHSLo), LHSLo, RHSLo, LowCC);
+  Tmp2 = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi,
+                           CCCode, false, DagCombineInfo);
+  if (!Tmp2.Val)
+    Tmp2 = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi,
+                       DAG.getCondCode(CCCode));
+
+  ConstantSDNode *Tmp1C = dyn_cast<ConstantSDNode>(Tmp1.Val);
+  ConstantSDNode *Tmp2C = dyn_cast<ConstantSDNode>(Tmp2.Val);
+  if ((Tmp1C && Tmp1C->isNullValue()) ||
+      (Tmp2C && Tmp2C->isNullValue() &&
+       (CCCode == ISD::SETLE || CCCode == ISD::SETGE ||
+        CCCode == ISD::SETUGE || CCCode == ISD::SETULE)) ||
+      (Tmp2C && Tmp2C->getAPIntValue() == 1 &&
+       (CCCode == ISD::SETLT || CCCode == ISD::SETGT ||
+        CCCode == ISD::SETUGT || CCCode == ISD::SETULT))) {
+    // low part is known false, returns high part.
+    // For LE / GE, if high part is known false, ignore the low part.
+    // For LT / GT, if high part is known true, ignore the low part.
+    NewLHS = Tmp2;
+    NewRHS = SDOperand();
+    return;
+  }
+
+  NewLHS = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi,
+                             ISD::SETEQ, false, DagCombineInfo);
+  if (!NewLHS.Val)
+    NewLHS = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi,
+                          ISD::SETEQ);
+  NewLHS = DAG.getNode(ISD::SELECT, Tmp1.getValueType(),
+                       NewLHS, Tmp1, Tmp2);
+  NewRHS = SDOperand();
+}
+
+SDOperand DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
+  if (ISD::isNormalStore(N))
+    return ExpandOp_NormalStore(N, OpNo);
+
+  assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
+  assert(OpNo == 1 && "Can only expand the stored value so far");
+
+  MVT VT = N->getOperand(1).getValueType();
+  MVT NVT = TLI.getTypeToTransformTo(VT);
+  SDOperand Ch  = N->getChain();
+  SDOperand Ptr = N->getBasePtr();
+  int SVOffset = N->getSrcValueOffset();
+  unsigned Alignment = N->getAlignment();
+  bool isVolatile = N->isVolatile();
+  SDOperand Lo, Hi;
+
+  assert(NVT.isByteSized() && "Expanded type not byte sized!");
+
+  if (N->getMemoryVT().bitsLE(NVT)) {
+    GetExpandedInteger(N->getValue(), Lo, Hi);
+    return DAG.getTruncStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset,
+                             N->getMemoryVT(), isVolatile, Alignment);
+  } else if (TLI.isLittleEndian()) {
+    // Little-endian - low bits are at low addresses.
+    GetExpandedInteger(N->getValue(), Lo, Hi);
+
+    Lo = DAG.getStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset,
+                      isVolatile, Alignment);
+
+    unsigned ExcessBits =
+      N->getMemoryVT().getSizeInBits() - NVT.getSizeInBits();
+    MVT NEVT = MVT::getIntegerVT(ExcessBits);
+
+    // Increment the pointer to the other half.
+    unsigned IncrementSize = NVT.getSizeInBits()/8;
+    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
+                      DAG.getIntPtrConstant(IncrementSize));
+    Hi = DAG.getTruncStore(Ch, Hi, Ptr, N->getSrcValue(),
+                           SVOffset+IncrementSize, NEVT,
+                           isVolatile, MinAlign(Alignment, IncrementSize));
+    return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
+  } else {
+    // Big-endian - high bits are at low addresses.  Favor aligned stores at
+    // the cost of some bit-fiddling.
+    GetExpandedInteger(N->getValue(), Lo, Hi);
+
+    MVT EVT = N->getMemoryVT();
+    unsigned EBytes = EVT.getStoreSizeInBits()/8;
+    unsigned IncrementSize = NVT.getSizeInBits()/8;
+    unsigned ExcessBits = (EBytes - IncrementSize)*8;
+    MVT HiVT = MVT::getIntegerVT(EVT.getSizeInBits() - ExcessBits);
+
+    if (ExcessBits < NVT.getSizeInBits()) {
+      // Transfer high bits from the top of Lo to the bottom of Hi.
+      Hi = DAG.getNode(ISD::SHL, NVT, Hi,
+                       DAG.getConstant(NVT.getSizeInBits() - ExcessBits,
+                                       TLI.getShiftAmountTy()));
+      Hi = DAG.getNode(ISD::OR, NVT, Hi,
+                       DAG.getNode(ISD::SRL, NVT, Lo,
+                                   DAG.getConstant(ExcessBits,
+                                                   TLI.getShiftAmountTy())));
+    }
+
+    // Store both the high bits and maybe some of the low bits.
+    Hi = DAG.getTruncStore(Ch, Hi, Ptr, N->getSrcValue(),
+                           SVOffset, HiVT, isVolatile, Alignment);
+
+    // Increment the pointer to the other half.
+    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
+                      DAG.getIntPtrConstant(IncrementSize));
+    // Store the lowest ExcessBits bits in the second half.
+    Lo = DAG.getTruncStore(Ch, Lo, Ptr, N->getSrcValue(),
+                           SVOffset+IncrementSize,
+                           MVT::getIntegerVT(ExcessBits),
+                           isVolatile, MinAlign(Alignment, IncrementSize));
+    return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
+  }
+}

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp Sun Jul  6 15:45:41 2008
@@ -42,7 +42,7 @@
   // The root of the dag may dangle to deleted nodes until the type legalizer is
   // done.  Set it to null to avoid confusion.
   DAG.setRoot(SDOperand());
-  
+
   // Walk all nodes in the graph, assigning them a NodeID of 'ReadyToProcess'
   // (and remembering them) if they are leaves and assigning 'NewNode' if
   // non-leaves.
@@ -55,33 +55,36 @@
       I->setNodeId(NewNode);
     }
   }
-  
+
   // Now that we have a set of nodes to process, handle them all.
   while (!Worklist.empty()) {
     SDNode *N = Worklist.back();
     Worklist.pop_back();
     assert(N->getNodeId() == ReadyToProcess &&
            "Node should be ready if on worklist!");
-    
+
     // Scan the values produced by the node, checking to see if any result
     // types are illegal.
     unsigned i = 0;
     unsigned NumResults = N->getNumValues();
     do {
-      MVT::ValueType ResultVT = N->getValueType(i);
+      MVT ResultVT = N->getValueType(i);
       switch (getTypeAction(ResultVT)) {
       default:
         assert(false && "Unknown action!");
       case Legal:
         break;
-      case Promote:
-        PromoteResult(N, i);
+      case PromoteInteger:
+        PromoteIntegerResult(N, i);
+        goto NodeDone;
+      case ExpandInteger:
+        ExpandIntegerResult(N, i);
         goto NodeDone;
-      case Expand:
-        ExpandResult(N, i);
+      case SoftenFloat:
+        SoftenFloatResult(N, i);
         goto NodeDone;
-      case FloatToInt:
-        FloatToIntResult(N, i);
+      case ExpandFloat:
+        ExpandFloatResult(N, i);
         goto NodeDone;
       case Scalarize:
         ScalarizeResult(N, i);
@@ -98,20 +101,23 @@
     unsigned NumOperands = N->getNumOperands();
     bool NeedsRevisit = false;
     for (i = 0; i != NumOperands; ++i) {
-      MVT::ValueType OpVT = N->getOperand(i).getValueType();
+      MVT OpVT = N->getOperand(i).getValueType();
       switch (getTypeAction(OpVT)) {
       default:
         assert(false && "Unknown action!");
       case Legal:
         continue;
-      case Promote:
-        NeedsRevisit = PromoteOperand(N, i);
+      case PromoteInteger:
+        NeedsRevisit = PromoteIntegerOperand(N, i);
         break;
-      case Expand:
-        NeedsRevisit = ExpandOperand(N, i);
+      case ExpandInteger:
+        NeedsRevisit = ExpandIntegerOperand(N, i);
         break;
-      case FloatToInt:
-        NeedsRevisit = FloatToIntOperand(N, i);
+      case SoftenFloat:
+        NeedsRevisit = SoftenFloatOperand(N, i);
+        break;
+      case ExpandFloat:
+        NeedsRevisit = ExpandFloatOperand(N, i);
         break;
       case Scalarize:
         NeedsRevisit = ScalarizeOperand(N, i);
@@ -126,7 +132,7 @@
     // If the node needs revisiting, don't add all users to the worklist etc.
     if (NeedsRevisit)
       continue;
-    
+
     if (i == NumOperands)
       DEBUG(cerr << "Legally typed node: "; N->dump(&DAG); cerr << "\n");
     }
@@ -135,37 +141,37 @@
     // If we reach here, the node was processed, potentially creating new nodes.
     // Mark it as processed and add its users to the worklist as appropriate.
     N->setNodeId(Processed);
-    
+
     for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
          UI != E; ++UI) {
       SDNode *User = UI->getUser();
       int NodeID = User->getNodeId();
       assert(NodeID != ReadyToProcess && NodeID != Processed &&
              "Invalid node id for user of unprocessed node!");
-      
+
       // This node has two options: it can either be a new node or its Node ID
       // may be a count of the number of operands it has that are not ready.
       if (NodeID > 0) {
         User->setNodeId(NodeID-1);
-        
+
         // If this was the last use it was waiting on, add it to the ready list.
         if (NodeID-1 == ReadyToProcess)
           Worklist.push_back(User);
         continue;
       }
-      
+
       // Otherwise, this node is new: this is the first operand of it that
       // became ready.  Its new NodeID is the number of operands it has minus 1
       // (as this node is now processed).
       assert(NodeID == NewNode && "Unknown node ID!");
       User->setNodeId(User->getNumOperands()-1);
-      
+
       // If the node only has a single operand, it is now ready.
       if (User->getNumOperands() == 1)
         Worklist.push_back(User);
     }
   }
-  
+
   // If the root changed (e.g. it was a dead load, update the root).
   DAG.setRoot(Dummy.getValue());
 
@@ -223,6 +229,9 @@
   if (N->getNodeId() != NewNode)
     return;
 
+  // Remove any stale map entries.
+  ExpungeNode(N);
+
   // Okay, we know that this node is new.  Recursively walk all of its operands
   // to see if they are new also.  The depth of this walk is bounded by the size
   // of the new tree that was constructed (usually 2-3 nodes), so we don't worry
@@ -268,51 +277,6 @@
     Worklist.push_back(N);
 }
 
-void DAGTypeLegalizer::SanityCheck(SDNode *N) {
-  for (SmallVector<SDNode*, 128>::iterator I = Worklist.begin(),
-       E = Worklist.end(); I != E; ++I)
-    assert(*I != N);
-
-  for (DenseMap<SDOperand, SDOperand>::iterator I = ReplacedNodes.begin(),
-       E = ReplacedNodes.end(); I != E; ++I) {
-    assert(I->first.Val != N);
-    assert(I->second.Val != N);
-  }
-
-  for (DenseMap<SDOperand, SDOperand>::iterator I = PromotedNodes.begin(),
-       E = PromotedNodes.end(); I != E; ++I) {
-    assert(I->first.Val != N);
-    assert(I->second.Val != N);
-  }
-
-  for (DenseMap<SDOperand, SDOperand>::iterator
-       I = FloatToIntedNodes.begin(),
-       E = FloatToIntedNodes.end(); I != E; ++I) {
-    assert(I->first.Val != N);
-    assert(I->second.Val != N);
-  }
-
-  for (DenseMap<SDOperand, SDOperand>::iterator I = ScalarizedNodes.begin(),
-       E = ScalarizedNodes.end(); I != E; ++I) {
-    assert(I->first.Val != N);
-    assert(I->second.Val != N);
-  }
-
-  for (DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator
-       I = ExpandedNodes.begin(), E = ExpandedNodes.end(); I != E; ++I) {
-    assert(I->first.Val != N);
-    assert(I->second.first.Val != N);
-    assert(I->second.second.Val != N);
-  }
-
-  for (DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator
-       I = SplitNodes.begin(), E = SplitNodes.end(); I != E; ++I) {
-    assert(I->first.Val != N);
-    assert(I->second.first.Val != N);
-    assert(I->second.second.Val != N);
-  }
-}
-
 namespace {
   /// NodeUpdateListener - This class is a DAGUpdateListener that listens for
   /// updates to nodes and recomputes their ready state.
@@ -322,14 +286,14 @@
   public:
     NodeUpdateListener(DAGTypeLegalizer &dtl) : DTL(dtl) {}
 
-    virtual void NodeDeleted(SDNode *N) {
-      // Ignore deletes.
+    virtual void NodeDeleted(SDNode *N, SDNode *E) {
       assert(N->getNodeId() != DAGTypeLegalizer::Processed &&
              N->getNodeId() != DAGTypeLegalizer::ReadyToProcess &&
              "RAUW deleted processed node!");
-#ifndef NDEBUG
-      DTL.SanityCheck(N);
-#endif
+      // It is possible, though rare, for the deleted node N to occur as a
+      // target in a map, so note the replacement N -> E in ReplacedNodes.
+      assert(E && "Node not replaced?");
+      DTL.NoteDeletion(N, E);
     }
 
     virtual void NodeUpdated(SDNode *N) {
@@ -352,15 +316,16 @@
   if (From == To) return;
 
   // If expansion produced new nodes, make sure they are properly marked.
-  AnalyzeNewNode(To.Val);
+  ExpungeNode(From.Val);
+  AnalyzeNewNode(To.Val); // Expunges To.
 
   // Anything that used the old node should now use the new one.  Note that this
   // can potentially cause recursive merging.
   NodeUpdateListener NUL(*this);
   DAG.ReplaceAllUsesOfValueWith(From, To, &NUL);
 
-  // The old node may still be present in ExpandedNodes or PromotedNodes.
-  // Inform them about the replacement.
+  // The old node may still be present in a map like ExpandedIntegers or
+  // PromotedIntegers.  Inform maps about the replacement.
   ReplacedNodes[From] = To;
 }
 
@@ -370,7 +335,8 @@
   if (From == To) return;
 
   // If expansion produced new nodes, make sure they are properly marked.
-  AnalyzeNewNode(To);
+  ExpungeNode(From);
+  AnalyzeNewNode(To); // Expunges To.
 
   assert(From->getNumValues() == To->getNumValues() &&
          "Node results don't match");
@@ -379,9 +345,9 @@
   // can potentially cause recursive merging.
   NodeUpdateListener NUL(*this);
   DAG.ReplaceAllUsesWith(From, To, &NUL);
-  
-  // The old node may still be present in ExpandedNodes or PromotedNodes.
-  // Inform them about the replacement.
+
+  // The old node may still be present in a map like ExpandedIntegers or
+  // PromotedIntegers.  Inform maps about the replacement.
   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) {
     assert(From->getValueType(i) == To->getValueType(i) &&
            "Node results don't match");
@@ -402,33 +368,132 @@
   }
 }
 
-void DAGTypeLegalizer::SetPromotedOp(SDOperand Op, SDOperand Result) {
+/// ExpungeNode - If N has a bogus mapping in ReplacedNodes, eliminate it.
+/// This can occur when a node is deleted then reallocated as a new node -
+/// the mapping in ReplacedNodes applies to the deleted node, not the new
+/// one.
+/// The only map that can have a deleted node as a source is ReplacedNodes.
+/// Other maps can have deleted nodes as targets, but since their looked-up
+/// values are always immediately remapped using RemapNode, resulting in a
+/// not-deleted node, this is harmless as long as ReplacedNodes/RemapNode
+/// always performs correct mappings.  In order to keep the mapping correct,
+/// ExpungeNode should be called on any new nodes *before* adding them as
+/// either source or target to ReplacedNodes (which typically means calling
+/// Expunge when a new node is first seen, since it may no longer be marked
+/// NewNode by the time it is added to ReplacedNodes).
+void DAGTypeLegalizer::ExpungeNode(SDNode *N) {
+  if (N->getNodeId() != NewNode)
+    return;
+
+  // If N is not remapped by ReplacedNodes then there is nothing to do.
+  unsigned i, e;
+  for (i = 0, e = N->getNumValues(); i != e; ++i)
+    if (ReplacedNodes.find(SDOperand(N, i)) != ReplacedNodes.end())
+      break;
+
+  if (i == e)
+    return;
+
+  // Remove N from all maps - this is expensive but rare.
+
+  for (DenseMap<SDOperand, SDOperand>::iterator I = PromotedIntegers.begin(),
+       E = PromotedIntegers.end(); I != E; ++I) {
+    assert(I->first.Val != N);
+    RemapNode(I->second);
+  }
+
+  for (DenseMap<SDOperand, SDOperand>::iterator I = SoftenedFloats.begin(),
+       E = SoftenedFloats.end(); I != E; ++I) {
+    assert(I->first.Val != N);
+    RemapNode(I->second);
+  }
+
+  for (DenseMap<SDOperand, SDOperand>::iterator I = ScalarizedVectors.begin(),
+       E = ScalarizedVectors.end(); I != E; ++I) {
+    assert(I->first.Val != N);
+    RemapNode(I->second);
+  }
+
+  for (DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator
+       I = ExpandedIntegers.begin(), E = ExpandedIntegers.end(); I != E; ++I){
+    assert(I->first.Val != N);
+    RemapNode(I->second.first);
+    RemapNode(I->second.second);
+  }
+
+  for (DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator
+       I = ExpandedFloats.begin(), E = ExpandedFloats.end(); I != E; ++I) {
+    assert(I->first.Val != N);
+    RemapNode(I->second.first);
+    RemapNode(I->second.second);
+  }
+
+  for (DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator
+       I = SplitVectors.begin(), E = SplitVectors.end(); I != E; ++I) {
+    assert(I->first.Val != N);
+    RemapNode(I->second.first);
+    RemapNode(I->second.second);
+  }
+
+  for (DenseMap<SDOperand, SDOperand>::iterator I = ReplacedNodes.begin(),
+       E = ReplacedNodes.end(); I != E; ++I)
+    RemapNode(I->second);
+
+  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
+    ReplacedNodes.erase(SDOperand(N, i));
+}
+
+
+void DAGTypeLegalizer::SetPromotedInteger(SDOperand Op, SDOperand Result) {
   AnalyzeNewNode(Result.Val);
 
-  SDOperand &OpEntry = PromotedNodes[Op];
+  SDOperand &OpEntry = PromotedIntegers[Op];
   assert(OpEntry.Val == 0 && "Node is already promoted!");
   OpEntry = Result;
 }
 
-void DAGTypeLegalizer::SetIntegerOp(SDOperand Op, SDOperand Result) {
+void DAGTypeLegalizer::SetSoftenedFloat(SDOperand Op, SDOperand Result) {
   AnalyzeNewNode(Result.Val);
 
-  SDOperand &OpEntry = FloatToIntedNodes[Op];
+  SDOperand &OpEntry = SoftenedFloats[Op];
   assert(OpEntry.Val == 0 && "Node is already converted to integer!");
   OpEntry = Result;
 }
 
-void DAGTypeLegalizer::SetScalarizedOp(SDOperand Op, SDOperand Result) {
+void DAGTypeLegalizer::SetScalarizedVector(SDOperand Op, SDOperand Result) {
   AnalyzeNewNode(Result.Val);
 
-  SDOperand &OpEntry = ScalarizedNodes[Op];
+  SDOperand &OpEntry = ScalarizedVectors[Op];
   assert(OpEntry.Val == 0 && "Node is already scalarized!");
   OpEntry = Result;
 }
 
-void DAGTypeLegalizer::GetExpandedOp(SDOperand Op, SDOperand &Lo, 
-                                     SDOperand &Hi) {
-  std::pair<SDOperand, SDOperand> &Entry = ExpandedNodes[Op];
+void DAGTypeLegalizer::GetExpandedInteger(SDOperand Op, SDOperand &Lo,
+                                          SDOperand &Hi) {
+  std::pair<SDOperand, SDOperand> &Entry = ExpandedIntegers[Op];
+  RemapNode(Entry.first);
+  RemapNode(Entry.second);
+  assert(Entry.first.Val && "Operand isn't expanded");
+  Lo = Entry.first;
+  Hi = Entry.second;
+}
+
+void DAGTypeLegalizer::SetExpandedInteger(SDOperand Op, SDOperand Lo,
+                                          SDOperand Hi) {
+  // Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
+  AnalyzeNewNode(Lo.Val);
+  AnalyzeNewNode(Hi.Val);
+
+  // Remember that this is the result of the node.
+  std::pair<SDOperand, SDOperand> &Entry = ExpandedIntegers[Op];
+  assert(Entry.first.Val == 0 && "Node already expanded");
+  Entry.first = Lo;
+  Entry.second = Hi;
+}
+
+void DAGTypeLegalizer::GetExpandedFloat(SDOperand Op, SDOperand &Lo,
+                                        SDOperand &Hi) {
+  std::pair<SDOperand, SDOperand> &Entry = ExpandedFloats[Op];
   RemapNode(Entry.first);
   RemapNode(Entry.second);
   assert(Entry.first.Val && "Operand isn't expanded");
@@ -436,20 +501,22 @@
   Hi = Entry.second;
 }
 
-void DAGTypeLegalizer::SetExpandedOp(SDOperand Op, SDOperand Lo, SDOperand Hi) {
+void DAGTypeLegalizer::SetExpandedFloat(SDOperand Op, SDOperand Lo,
+                                        SDOperand Hi) {
   // Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
   AnalyzeNewNode(Lo.Val);
   AnalyzeNewNode(Hi.Val);
 
   // Remember that this is the result of the node.
-  std::pair<SDOperand, SDOperand> &Entry = ExpandedNodes[Op];
+  std::pair<SDOperand, SDOperand> &Entry = ExpandedFloats[Op];
   assert(Entry.first.Val == 0 && "Node already expanded");
   Entry.first = Lo;
   Entry.second = Hi;
 }
 
-void DAGTypeLegalizer::GetSplitOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi) {
-  std::pair<SDOperand, SDOperand> &Entry = SplitNodes[Op];
+void DAGTypeLegalizer::GetSplitVector(SDOperand Op, SDOperand &Lo,
+                                      SDOperand &Hi) {
+  std::pair<SDOperand, SDOperand> &Entry = SplitVectors[Op];
   RemapNode(Entry.first);
   RemapNode(Entry.second);
   assert(Entry.first.Val && "Operand isn't split");
@@ -457,31 +524,35 @@
   Hi = Entry.second;
 }
 
-void DAGTypeLegalizer::SetSplitOp(SDOperand Op, SDOperand Lo, SDOperand Hi) {
+void DAGTypeLegalizer::SetSplitVector(SDOperand Op, SDOperand Lo,
+                                      SDOperand Hi) {
   // Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
   AnalyzeNewNode(Lo.Val);
   AnalyzeNewNode(Hi.Val);
 
   // Remember that this is the result of the node.
-  std::pair<SDOperand, SDOperand> &Entry = SplitNodes[Op];
+  std::pair<SDOperand, SDOperand> &Entry = SplitVectors[Op];
   assert(Entry.first.Val == 0 && "Node already split");
   Entry.first = Lo;
   Entry.second = Hi;
 }
 
 
+//===----------------------------------------------------------------------===//
+// Utilities.
+//===----------------------------------------------------------------------===//
+
 /// BitConvertToInteger - Convert to an integer of the same size.
 SDOperand DAGTypeLegalizer::BitConvertToInteger(SDOperand Op) {
-  return DAG.getNode(ISD::BIT_CONVERT,
-                     MVT::getIntegerType(MVT::getSizeInBits(Op.getValueType())),
-                     Op);
+  unsigned BitWidth = Op.getValueType().getSizeInBits();
+  return DAG.getNode(ISD::BIT_CONVERT, MVT::getIntegerVT(BitWidth), Op);
 }
 
-SDOperand DAGTypeLegalizer::CreateStackStoreLoad(SDOperand Op, 
-                                                 MVT::ValueType DestVT) {
+SDOperand DAGTypeLegalizer::CreateStackStoreLoad(SDOperand Op,
+                                                 MVT DestVT) {
   // Create the stack frame object.
   SDOperand FIPtr = DAG.CreateStackTemporary(DestVT);
-  
+
   // Emit a store to the stack slot.
   SDOperand Store = DAG.getStore(DAG.getEntryNode(), Op, FIPtr, NULL, 0);
   // Result is a load from the stack slot.
@@ -490,14 +561,13 @@
 
 /// JoinIntegers - Build an integer with low bits Lo and high bits Hi.
 SDOperand DAGTypeLegalizer::JoinIntegers(SDOperand Lo, SDOperand Hi) {
-  MVT::ValueType LVT = Lo.getValueType();
-  MVT::ValueType HVT = Hi.getValueType();
-  MVT::ValueType NVT = MVT::getIntegerType(MVT::getSizeInBits(LVT) +
-                                           MVT::getSizeInBits(HVT));
+  MVT LVT = Lo.getValueType();
+  MVT HVT = Hi.getValueType();
+  MVT NVT = MVT::getIntegerVT(LVT.getSizeInBits() + HVT.getSizeInBits());
 
   Lo = DAG.getNode(ISD::ZERO_EXTEND, NVT, Lo);
   Hi = DAG.getNode(ISD::ANY_EXTEND, NVT, Hi);
-  Hi = DAG.getNode(ISD::SHL, NVT, Hi, DAG.getConstant(MVT::getSizeInBits(LVT),
+  Hi = DAG.getNode(ISD::SHL, NVT, Hi, DAG.getConstant(LVT.getSizeInBits(),
                                                       TLI.getShiftAmountTy()));
   return DAG.getNode(ISD::OR, NVT, Lo, Hi);
 }
@@ -505,13 +575,13 @@
 /// SplitInteger - Return the lower LoVT bits of Op in Lo and the upper HiVT
 /// bits in Hi.
 void DAGTypeLegalizer::SplitInteger(SDOperand Op,
-                                    MVT::ValueType LoVT, MVT::ValueType HiVT,
+                                    MVT LoVT, MVT HiVT,
                                     SDOperand &Lo, SDOperand &Hi) {
-  assert(MVT::getSizeInBits(LoVT) + MVT::getSizeInBits(HiVT) ==
-         MVT::getSizeInBits(Op.getValueType()) && "Invalid integer splitting!");
+  assert(LoVT.getSizeInBits() + HiVT.getSizeInBits() ==
+         Op.getValueType().getSizeInBits() && "Invalid integer splitting!");
   Lo = DAG.getNode(ISD::TRUNCATE, LoVT, Op);
   Hi = DAG.getNode(ISD::SRL, Op.getValueType(), Op,
-                   DAG.getConstant(MVT::getSizeInBits(LoVT),
+                   DAG.getConstant(LoVT.getSizeInBits(),
                                    TLI.getShiftAmountTy()));
   Hi = DAG.getNode(ISD::TRUNCATE, HiVT, Hi);
 }
@@ -520,14 +590,13 @@
 /// half the size of Op's.
 void DAGTypeLegalizer::SplitInteger(SDOperand Op,
                                     SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType HalfVT =
-    MVT::getIntegerType(MVT::getSizeInBits(Op.getValueType())/2);
+  MVT HalfVT = MVT::getIntegerVT(Op.getValueType().getSizeInBits()/2);
   SplitInteger(Op, HalfVT, HalfVT, Lo, Hi);
 }
 
 /// MakeLibCall - Generate a libcall taking the given operands as arguments and
 /// returning a result of type RetVT.
-SDOperand DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT::ValueType RetVT,
+SDOperand DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT,
                                         const SDOperand *Ops, unsigned NumOps,
                                         bool isSigned) {
   TargetLowering::ArgListTy Args;
@@ -536,7 +605,7 @@
   TargetLowering::ArgListEntry Entry;
   for (unsigned i = 0; i != NumOps; ++i) {
     Entry.Node = Ops[i];
-    Entry.Ty = MVT::getTypeForValueType(Entry.Node.getValueType());
+    Entry.Ty = Entry.Node.getValueType().getTypeForMVT();
     Entry.isSExt = isSigned;
     Entry.isZExt = !isSigned;
     Args.push_back(Entry);
@@ -544,13 +613,50 @@
   SDOperand Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
                                            TLI.getPointerTy());
 
-  const Type *RetTy = MVT::getTypeForValueType(RetVT);
+  const Type *RetTy = RetVT.getTypeForMVT();
   std::pair<SDOperand,SDOperand> CallInfo =
     TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
                     CallingConv::C, false, Callee, Args, DAG);
   return CallInfo.first;
 }
 
+SDOperand DAGTypeLegalizer::GetVectorElementPointer(SDOperand VecPtr, MVT EltVT,
+                                                    SDOperand Index) {
+  // Make sure the index type is big enough to compute in.
+  if (Index.getValueType().bitsGT(TLI.getPointerTy()))
+    Index = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), Index);
+  else
+    Index = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), Index);
+
+  // Calculate the element offset and add it to the pointer.
+  unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
+
+  Index = DAG.getNode(ISD::MUL, Index.getValueType(), Index,
+                      DAG.getConstant(EltSize, Index.getValueType()));
+  return DAG.getNode(ISD::ADD, Index.getValueType(), Index, VecPtr);
+}
+
+/// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
+/// which is split into two not necessarily identical pieces.
+void DAGTypeLegalizer::GetSplitDestVTs(MVT InVT, MVT &LoVT, MVT &HiVT) {
+  if (!InVT.isVector()) {
+    LoVT = HiVT = TLI.getTypeToTransformTo(InVT);
+  } else {
+    MVT NewEltVT = InVT.getVectorElementType();
+    unsigned NumElements = InVT.getVectorNumElements();
+    if ((NumElements & (NumElements-1)) == 0) {  // Simple power of two vector.
+      NumElements >>= 1;
+      LoVT = HiVT =  MVT::getVectorVT(NewEltVT, NumElements);
+    } else {                                     // Non-power-of-two vectors.
+      unsigned NewNumElts_Lo = 1 << Log2_32(NumElements);
+      unsigned NewNumElts_Hi = NumElements - NewNumElts_Lo;
+      LoVT = MVT::getVectorVT(NewEltVT, NewNumElts_Lo);
+      HiVT = MVT::getVectorVT(NewEltVT, NewNumElts_Hi);
+    }
+  }
+}
+
+
 //===----------------------------------------------------------------------===//
 //  Entry Point
 //===----------------------------------------------------------------------===//
@@ -562,6 +668,6 @@
 /// the graph.
 void SelectionDAG::LegalizeTypes() {
   if (ViewLegalizeTypesDAGs) viewGraph();
-  
+
   DAGTypeLegalizer(*this).run();
 }

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypes.h?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypes.h (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypes.h Sun Jul  6 15:45:41 2008
@@ -47,55 +47,58 @@
     /// ReadyToProcess - All operands have been processed, so this node is ready
     /// to be handled.
     ReadyToProcess = 0,
-    
+
     /// NewNode - This is a new node that was created in the process of
     /// legalizing some other node.
     NewNode = -1,
-    
+
     /// Processed - This is a node that has already been processed.
     Processed = -2
-    
+
     // 1+ - This is a node which has this many unlegalized operands.
   };
 private:
   enum LegalizeAction {
-    Legal,      // The target natively supports this type.
-    Promote,    // This type should be executed in a larger type.
-    Expand,     // This type should be split into two types of half the size.
-    FloatToInt, // Convert a floating point type to an integer of the same size.
-    Scalarize,  // Replace this one-element vector type with its element type.
-    Split       // This vector type should be split into smaller vectors.
+    Legal,          // The target natively supports this type.
+    PromoteInteger, // Replace this integer type with a larger one.
+    ExpandInteger,  // Split this integer type into two of half the size.
+    SoftenFloat,    // Convert this float type to a same size integer type.
+    ExpandFloat,    // Split this float type into two of half the size.
+    Scalarize,      // Replace this one-element vector type with its element type.
+    Split           // This vector type should be split into smaller vectors.
   };
 
   /// ValueTypeActions - This is a bitvector that contains two bits for each
   /// simple value type, where the two bits correspond to the LegalizeAction
   /// enum from TargetLowering.  This can be queried with "getTypeAction(VT)".
   TargetLowering::ValueTypeActionImpl ValueTypeActions;
-  
+
   /// getTypeAction - Return how we should legalize values of this type, either
   /// it is already legal, or we need to promote it to a larger integer type, or
   /// we need to expand it into multiple registers of a smaller integer type, or
   /// we need to scalarize a one-element vector type into the element type, or
   /// we need to split a vector type into smaller vector types.
-  LegalizeAction getTypeAction(MVT::ValueType VT) const {
+  LegalizeAction getTypeAction(MVT VT) const {
     switch (ValueTypeActions.getTypeAction(VT)) {
     default:
       assert(false && "Unknown legalize action!");
     case TargetLowering::Legal:
       return Legal;
     case TargetLowering::Promote:
-      return Promote;
+      return PromoteInteger;
     case TargetLowering::Expand:
       // Expand can mean
       // 1) split scalar in half, 2) convert a float to an integer,
       // 3) scalarize a single-element vector, 4) split a vector in two.
-      if (!MVT::isVector(VT)) {
-        if (MVT::getSizeInBits(VT) ==
-            MVT::getSizeInBits(TLI.getTypeToTransformTo(VT)))
-          return FloatToInt;
+      if (!VT.isVector()) {
+        if (VT.isInteger())
+          return ExpandInteger;
+        else if (VT.getSizeInBits() ==
+                 TLI.getTypeToTransformTo(VT).getSizeInBits())
+          return SoftenFloat;
         else
-          return Expand;
-      } else if (MVT::getVectorNumElements(VT) == 1) {
+          return ExpandFloat;
+      } else if (VT.getVectorNumElements() == 1) {
         return Scalarize;
       } else {
         return Split;
@@ -104,30 +107,34 @@
   }
 
   /// isTypeLegal - Return true if this type is legal on this target.
-  bool isTypeLegal(MVT::ValueType VT) const {
+  bool isTypeLegal(MVT VT) const {
     return ValueTypeActions.getTypeAction(VT) == TargetLowering::Legal;
   }
 
-  /// PromotedNodes - For nodes that are below legal width, this map indicates
-  /// what promoted value to use.
-  DenseMap<SDOperand, SDOperand> PromotedNodes;
-  
-  /// ExpandedNodes - For nodes that need to be expanded this map indicates
-  /// which operands are the expanded version of the input.
-  DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > ExpandedNodes;
+  /// PromotedIntegers - For integer nodes that are below legal width, this map
+  /// indicates what promoted value to use.
+  DenseMap<SDOperand, SDOperand> PromotedIntegers;
+
+  /// ExpandedIntegers - For integer nodes that need to be expanded this map
+  /// indicates which operands are the expanded version of the input.
+  DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > ExpandedIntegers;
 
-  /// FloatToIntedNodes - For floating point nodes converted to integers of
+  /// SoftenedFloats - For floating point nodes converted to integers of
   /// the same size, this map indicates the converted value to use.
-  DenseMap<SDOperand, SDOperand> FloatToIntedNodes;
+  DenseMap<SDOperand, SDOperand> SoftenedFloats;
+
+  /// ExpandedFloats - For float nodes that need to be expanded this map
+  /// indicates which operands are the expanded version of the input.
+  DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > ExpandedFloats;
 
-  /// ScalarizedNodes - For nodes that are <1 x ty>, this map indicates the
+  /// ScalarizedVectors - For nodes that are <1 x ty>, this map indicates the
   /// scalar value of type 'ty' to use.
-  DenseMap<SDOperand, SDOperand> ScalarizedNodes;
+  DenseMap<SDOperand, SDOperand> ScalarizedVectors;
 
-  /// SplitNodes - For nodes that need to be split this map indicates
+  /// SplitVectors - For nodes that need to be split this map indicates
   /// which operands are the expanded version of the input.
-  DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > SplitNodes;
-  
+  DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > SplitVectors;
+
   /// ReplacedNodes - For nodes that have been replaced with another,
   /// indicates the replacement node to use.
   DenseMap<SDOperand, SDOperand> ReplacedNodes;
@@ -136,17 +143,17 @@
   /// pushed onto this worklist, all operands of a node must have already been
   /// processed.
   SmallVector<SDNode*, 128> Worklist;
-  
+
 public:
   explicit DAGTypeLegalizer(SelectionDAG &dag)
     : TLI(dag.getTargetLoweringInfo()), DAG(dag),
     ValueTypeActions(TLI.getValueTypeActions()) {
     assert(MVT::LAST_VALUETYPE <= 32 &&
            "Too many value types for ValueTypeActions to hold!");
-  }      
-  
+  }
+
   void run();
-  
+
   /// ReanalyzeNode - Recompute the NodeID and correct processed operands
   /// for the specified node, adding it to the worklist if ready.
   void ReanalyzeNode(SDNode *N) {
@@ -154,6 +161,13 @@
     AnalyzeNewNode(N);
   }
 
+  void NoteDeletion(SDNode *Old, SDNode *New) {
+    ExpungeNode(Old);
+    ExpungeNode(New);
+    for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i)
+      ReplacedNodes[SDOperand(Old, i)] = SDOperand(New, i);
+  }
+
 private:
   void AnalyzeNewNode(SDNode *&N);
 
@@ -161,240 +175,329 @@
   void ReplaceNodeWith(SDNode *From, SDNode *To);
 
   void RemapNode(SDOperand &N);
+  void ExpungeNode(SDNode *N);
 
   // Common routines.
+  SDOperand CreateStackStoreLoad(SDOperand Op, MVT DestVT);
+  SDOperand MakeLibCall(RTLIB::Libcall LC, MVT RetVT,
+                        const SDOperand *Ops, unsigned NumOps, bool isSigned);
+
   SDOperand BitConvertToInteger(SDOperand Op);
-  SDOperand CreateStackStoreLoad(SDOperand Op, MVT::ValueType DestVT);
   SDOperand JoinIntegers(SDOperand Lo, SDOperand Hi);
   void SplitInteger(SDOperand Op, SDOperand &Lo, SDOperand &Hi);
-  void SplitInteger(SDOperand Op, MVT::ValueType LoVT, MVT::ValueType HiVT,
+  void SplitInteger(SDOperand Op, MVT LoVT, MVT HiVT,
                     SDOperand &Lo, SDOperand &Hi);
-  SDOperand MakeLibCall(RTLIB::Libcall LC, MVT::ValueType RetVT,
-                        const SDOperand *Ops, unsigned NumOps, bool isSigned);
+
+  SDOperand GetVectorElementPointer(SDOperand VecPtr, MVT EltVT,
+                                    SDOperand Index);
 
   //===--------------------------------------------------------------------===//
-  // Promotion Support: LegalizeTypesPromote.cpp
+  // Integer Promotion Support: LegalizeIntegerTypes.cpp
   //===--------------------------------------------------------------------===//
-  
-  SDOperand GetPromotedOp(SDOperand Op) {
-    SDOperand &PromotedOp = PromotedNodes[Op];
+
+  SDOperand GetPromotedInteger(SDOperand Op) {
+    SDOperand &PromotedOp = PromotedIntegers[Op];
     RemapNode(PromotedOp);
     assert(PromotedOp.Val && "Operand wasn't promoted?");
     return PromotedOp;
   }
-  void SetPromotedOp(SDOperand Op, SDOperand Result);
-  
-  /// GetPromotedZExtOp - Get a promoted operand and zero extend it to the final
-  /// size.
-  SDOperand GetPromotedZExtOp(SDOperand Op) {
-    MVT::ValueType OldVT = Op.getValueType();
-    Op = GetPromotedOp(Op);
+  void SetPromotedInteger(SDOperand Op, SDOperand Result);
+
+  /// ZExtPromotedInteger - Get a promoted operand and zero extend it to the
+  /// final size.
+  SDOperand ZExtPromotedInteger(SDOperand Op) {
+    MVT OldVT = Op.getValueType();
+    Op = GetPromotedInteger(Op);
     return DAG.getZeroExtendInReg(Op, OldVT);
-  }    
-    
-  // Result Promotion.
-  void PromoteResult(SDNode *N, unsigned ResNo);
-  SDOperand PromoteResult_BIT_CONVERT(SDNode *N);
-  SDOperand PromoteResult_BUILD_PAIR(SDNode *N);
-  SDOperand PromoteResult_Constant(SDNode *N);
-  SDOperand PromoteResult_CTLZ(SDNode *N);
-  SDOperand PromoteResult_CTPOP(SDNode *N);
-  SDOperand PromoteResult_CTTZ(SDNode *N);
-  SDOperand PromoteResult_EXTRACT_VECTOR_ELT(SDNode *N);
-  SDOperand PromoteResult_FP_ROUND(SDNode *N);
-  SDOperand PromoteResult_FP_TO_XINT(SDNode *N);
-  SDOperand PromoteResult_INT_EXTEND(SDNode *N);
-  SDOperand PromoteResult_LOAD(LoadSDNode *N);
-  SDOperand PromoteResult_SDIV(SDNode *N);
-  SDOperand PromoteResult_SELECT   (SDNode *N);
-  SDOperand PromoteResult_SELECT_CC(SDNode *N);
-  SDOperand PromoteResult_SETCC(SDNode *N);
-  SDOperand PromoteResult_SHL(SDNode *N);
-  SDOperand PromoteResult_SimpleIntBinOp(SDNode *N);
-  SDOperand PromoteResult_SRA(SDNode *N);
-  SDOperand PromoteResult_SRL(SDNode *N);
-  SDOperand PromoteResult_TRUNCATE(SDNode *N);
-  SDOperand PromoteResult_UDIV(SDNode *N);
-  SDOperand PromoteResult_UNDEF(SDNode *N);
-
-  // Operand Promotion.
-  bool PromoteOperand(SDNode *N, unsigned OperandNo);
-  SDOperand PromoteOperand_ANY_EXTEND(SDNode *N);
-  SDOperand PromoteOperand_BUILD_PAIR(SDNode *N);
-  SDOperand PromoteOperand_BR_CC(SDNode *N, unsigned OpNo);
-  SDOperand PromoteOperand_BRCOND(SDNode *N, unsigned OpNo);
-  SDOperand PromoteOperand_BUILD_VECTOR(SDNode *N);
-  SDOperand PromoteOperand_FP_EXTEND(SDNode *N);
-  SDOperand PromoteOperand_FP_ROUND(SDNode *N);
-  SDOperand PromoteOperand_INT_TO_FP(SDNode *N);
-  SDOperand PromoteOperand_INSERT_VECTOR_ELT(SDNode *N, unsigned OpNo);
-  SDOperand PromoteOperand_MEMBARRIER(SDNode *N);
-  SDOperand PromoteOperand_RET(SDNode *N, unsigned OpNo);
-  SDOperand PromoteOperand_SELECT(SDNode *N, unsigned OpNo);
-  SDOperand PromoteOperand_SETCC(SDNode *N, unsigned OpNo);
-  SDOperand PromoteOperand_SIGN_EXTEND(SDNode *N);
-  SDOperand PromoteOperand_STORE(StoreSDNode *N, unsigned OpNo);
-  SDOperand PromoteOperand_TRUNCATE(SDNode *N);
-  SDOperand PromoteOperand_ZERO_EXTEND(SDNode *N);
+  }
+
+  // Integer Result Promotion.
+  void PromoteIntegerResult(SDNode *N, unsigned ResNo);
+  SDOperand PromoteIntRes_BIT_CONVERT(SDNode *N);
+  SDOperand PromoteIntRes_BUILD_PAIR(SDNode *N);
+  SDOperand PromoteIntRes_Constant(SDNode *N);
+  SDOperand PromoteIntRes_CTLZ(SDNode *N);
+  SDOperand PromoteIntRes_CTPOP(SDNode *N);
+  SDOperand PromoteIntRes_CTTZ(SDNode *N);
+  SDOperand PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N);
+  SDOperand PromoteIntRes_FP_ROUND(SDNode *N);
+  SDOperand PromoteIntRes_FP_TO_XINT(SDNode *N);
+  SDOperand PromoteIntRes_INT_EXTEND(SDNode *N);
+  SDOperand PromoteIntRes_LOAD(LoadSDNode *N);
+  SDOperand PromoteIntRes_SDIV(SDNode *N);
+  SDOperand PromoteIntRes_SELECT   (SDNode *N);
+  SDOperand PromoteIntRes_SELECT_CC(SDNode *N);
+  SDOperand PromoteIntRes_SETCC(SDNode *N);
+  SDOperand PromoteIntRes_SHL(SDNode *N);
+  SDOperand PromoteIntRes_SimpleIntBinOp(SDNode *N);
+  SDOperand PromoteIntRes_SRA(SDNode *N);
+  SDOperand PromoteIntRes_SRL(SDNode *N);
+  SDOperand PromoteIntRes_TRUNCATE(SDNode *N);
+  SDOperand PromoteIntRes_UDIV(SDNode *N);
+  SDOperand PromoteIntRes_UNDEF(SDNode *N);
+  SDOperand PromoteIntRes_VAARG(SDNode *N);
+
+  // Integer Operand Promotion.
+  bool PromoteIntegerOperand(SDNode *N, unsigned OperandNo);
+  SDOperand PromoteIntOp_ANY_EXTEND(SDNode *N);
+  SDOperand PromoteIntOp_BUILD_PAIR(SDNode *N);
+  SDOperand PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo);
+  SDOperand PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo);
+  SDOperand PromoteIntOp_BUILD_VECTOR(SDNode *N);
+  SDOperand PromoteIntOp_FP_EXTEND(SDNode *N);
+  SDOperand PromoteIntOp_FP_ROUND(SDNode *N);
+  SDOperand PromoteIntOp_INT_TO_FP(SDNode *N);
+  SDOperand PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, unsigned OpNo);
+  SDOperand PromoteIntOp_MEMBARRIER(SDNode *N);
+  SDOperand PromoteIntOp_SELECT(SDNode *N, unsigned OpNo);
+  SDOperand PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo);
+  SDOperand PromoteIntOp_SETCC(SDNode *N, unsigned OpNo);
+  SDOperand PromoteIntOp_SIGN_EXTEND(SDNode *N);
+  SDOperand PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo);
+  SDOperand PromoteIntOp_TRUNCATE(SDNode *N);
+  SDOperand PromoteIntOp_ZERO_EXTEND(SDNode *N);
 
   void PromoteSetCCOperands(SDOperand &LHS,SDOperand &RHS, ISD::CondCode Code);
 
   //===--------------------------------------------------------------------===//
-  // Expansion Support: LegalizeTypesExpand.cpp
+  // Integer Expansion Support: LegalizeIntegerTypes.cpp
   //===--------------------------------------------------------------------===//
-  
-  void GetExpandedOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi);
-  void SetExpandedOp(SDOperand Op, SDOperand Lo, SDOperand Hi);
-    
-  // Result Expansion.
-  void ExpandResult(SDNode *N, unsigned ResNo);
-  void ExpandResult_ANY_EXTEND (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_AssertZext (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_BIT_CONVERT(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_BUILD_PAIR (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_Constant   (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_CTLZ       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_CTPOP      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_CTTZ       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_EXTRACT_VECTOR_ELT(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_LOAD       (LoadSDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_MERGE_VALUES(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_SIGN_EXTEND(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_SIGN_EXTEND_INREG(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_TRUNCATE   (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_UNDEF      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_ZERO_EXTEND(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_FP_TO_SINT (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_FP_TO_UINT (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-
-  void ExpandResult_Logical    (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_BSWAP      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_ADDSUB     (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_ADDSUBC    (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_ADDSUBE    (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_SELECT     (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_SELECT_CC  (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_MUL        (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_SDIV       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_SREM       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_UDIV       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_UREM       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void ExpandResult_Shift      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  
-  void ExpandShiftByConstant(SDNode *N, unsigned Amt, 
+
+  void GetExpandedInteger(SDOperand Op, SDOperand &Lo, SDOperand &Hi);
+  void SetExpandedInteger(SDOperand Op, SDOperand Lo, SDOperand Hi);
+
+  // Integer Result Expansion.
+  void ExpandIntegerResult(SDNode *N, unsigned ResNo);
+  void ExpandIntRes_ANY_EXTEND        (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_AssertZext        (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_Constant          (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_CTLZ              (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_CTPOP             (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_CTTZ              (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_LOAD          (LoadSDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_SIGN_EXTEND       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_SIGN_EXTEND_INREG (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_TRUNCATE          (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_ZERO_EXTEND       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_FP_TO_SINT        (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_FP_TO_UINT        (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+
+  void ExpandIntRes_Logical           (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_ADDSUB            (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_ADDSUBC           (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_ADDSUBE           (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_BSWAP             (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_MUL               (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_SDIV              (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_SREM              (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_UDIV              (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_UREM              (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandIntRes_Shift             (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+
+  void ExpandShiftByConstant(SDNode *N, unsigned Amt,
                              SDOperand &Lo, SDOperand &Hi);
   bool ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi);
 
-  // Operand Expansion.
-  bool ExpandOperand(SDNode *N, unsigned OperandNo);
-  SDOperand ExpandOperand_BIT_CONVERT(SDNode *N);
-  SDOperand ExpandOperand_BR_CC(SDNode *N);
-  SDOperand ExpandOperand_BUILD_VECTOR(SDNode *N);
-  SDOperand ExpandOperand_EXTRACT_ELEMENT(SDNode *N);
-  SDOperand ExpandOperand_SETCC(SDNode *N);
-  SDOperand ExpandOperand_SINT_TO_FP(SDOperand Source, MVT::ValueType DestTy);
-  SDOperand ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo);
-  SDOperand ExpandOperand_TRUNCATE(SDNode *N);
-  SDOperand ExpandOperand_UINT_TO_FP(SDOperand Source, MVT::ValueType DestTy);
+  // Integer Operand Expansion.
+  bool ExpandIntegerOperand(SDNode *N, unsigned OperandNo);
+  SDOperand ExpandIntOp_BIT_CONVERT(SDNode *N);
+  SDOperand ExpandIntOp_BR_CC(SDNode *N);
+  SDOperand ExpandIntOp_BUILD_VECTOR(SDNode *N);
+  SDOperand ExpandIntOp_EXTRACT_ELEMENT(SDNode *N);
+  SDOperand ExpandIntOp_SELECT_CC(SDNode *N);
+  SDOperand ExpandIntOp_SETCC(SDNode *N);
+  SDOperand ExpandIntOp_SINT_TO_FP(SDOperand Source, MVT DestTy);
+  SDOperand ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo);
+  SDOperand ExpandIntOp_TRUNCATE(SDNode *N);
+  SDOperand ExpandIntOp_UINT_TO_FP(SDOperand Source, MVT DestTy);
+
+  void IntegerExpandSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS,
+                                  ISD::CondCode &CCCode);
 
-  void ExpandSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS,
-                           ISD::CondCode &CCCode);
-  
   //===--------------------------------------------------------------------===//
-  // Float to Integer Conversion Support: LegalizeTypesFloatToInt.cpp
+  // Float to Integer Conversion Support: LegalizeFloatTypes.cpp
   //===--------------------------------------------------------------------===//
 
-  SDOperand GetIntegerOp(SDOperand Op) {
-    SDOperand &IntegerOp = FloatToIntedNodes[Op];
-    RemapNode(IntegerOp);
-    assert(IntegerOp.Val && "Operand wasn't converted to integer?");
-    return IntegerOp;
+  SDOperand GetSoftenedFloat(SDOperand Op) {
+    SDOperand &SoftenedOp = SoftenedFloats[Op];
+    RemapNode(SoftenedOp);
+    assert(SoftenedOp.Val && "Operand wasn't converted to integer?");
+    return SoftenedOp;
   }
-  void SetIntegerOp(SDOperand Op, SDOperand Result);
+  void SetSoftenedFloat(SDOperand Op, SDOperand Result);
 
   // Result Float to Integer Conversion.
-  void FloatToIntResult(SDNode *N, unsigned OpNo);
-  SDOperand FloatToIntRes_BIT_CONVERT(SDNode *N);
-  SDOperand FloatToIntRes_BUILD_PAIR(SDNode *N);
-  SDOperand FloatToIntRes_ConstantFP(ConstantFPSDNode *N);
-  SDOperand FloatToIntRes_FADD(SDNode *N);
-  SDOperand FloatToIntRes_FCOPYSIGN(SDNode *N);
-  SDOperand FloatToIntRes_FMUL(SDNode *N);
-  SDOperand FloatToIntRes_FSUB(SDNode *N);
-  SDOperand FloatToIntRes_LOAD(SDNode *N);
-  SDOperand FloatToIntRes_XINT_TO_FP(SDNode *N);
+  void SoftenFloatResult(SDNode *N, unsigned OpNo);
+  SDOperand SoftenFloatRes_BIT_CONVERT(SDNode *N);
+  SDOperand SoftenFloatRes_BUILD_PAIR(SDNode *N);
+  SDOperand SoftenFloatRes_ConstantFP(ConstantFPSDNode *N);
+  SDOperand SoftenFloatRes_FADD(SDNode *N);
+  SDOperand SoftenFloatRes_FCOPYSIGN(SDNode *N);
+  SDOperand SoftenFloatRes_FMUL(SDNode *N);
+  SDOperand SoftenFloatRes_FSUB(SDNode *N);
+  SDOperand SoftenFloatRes_LOAD(SDNode *N);
+  SDOperand SoftenFloatRes_XINT_TO_FP(SDNode *N);
 
   // Operand Float to Integer Conversion.
-  bool FloatToIntOperand(SDNode *N, unsigned OpNo);
-  SDOperand FloatToIntOp_BIT_CONVERT(SDNode *N);
+  bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
+  SDOperand SoftenFloatOp_BIT_CONVERT(SDNode *N);
+  SDOperand SoftenFloatOp_BR_CC(SDNode *N);
+  SDOperand SoftenFloatOp_SELECT_CC(SDNode *N);
+  SDOperand SoftenFloatOp_SETCC(SDNode *N);
+
+  void SoftenSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS,
+                           ISD::CondCode &CCCode);
+
+  //===--------------------------------------------------------------------===//
+  // Float Expansion Support: LegalizeFloatTypes.cpp
+  //===--------------------------------------------------------------------===//
+
+  void GetExpandedFloat(SDOperand Op, SDOperand &Lo, SDOperand &Hi);
+  void SetExpandedFloat(SDOperand Op, SDOperand Lo, SDOperand Hi);
+
+  // Float Result Expansion.
+  void ExpandFloatResult(SDNode *N, unsigned ResNo);
+  void ExpandFloatRes_ConstantFP(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandFloatRes_FADD      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandFloatRes_FDIV      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandFloatRes_FMUL      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandFloatRes_FSUB      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandFloatRes_LOAD      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandFloatRes_XINT_TO_FP(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+
+  // Float Operand Expansion.
+  bool ExpandFloatOperand(SDNode *N, unsigned OperandNo);
+  SDOperand ExpandFloatOp_BR_CC(SDNode *N);
+  SDOperand ExpandFloatOp_FP_ROUND(SDNode *N);
+  SDOperand ExpandFloatOp_FP_TO_SINT(SDNode *N);
+  SDOperand ExpandFloatOp_FP_TO_UINT(SDNode *N);
+  SDOperand ExpandFloatOp_SELECT_CC(SDNode *N);
+  SDOperand ExpandFloatOp_SETCC(SDNode *N);
+  SDOperand ExpandFloatOp_STORE(SDNode *N, unsigned OpNo);
+
+  void FloatExpandSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS,
+                                ISD::CondCode &CCCode);
 
   //===--------------------------------------------------------------------===//
-  // Scalarization Support: LegalizeTypesScalarize.cpp
+  // Scalarization Support: LegalizeVectorTypes.cpp
   //===--------------------------------------------------------------------===//
-  
-  SDOperand GetScalarizedOp(SDOperand Op) {
-    SDOperand &ScalarOp = ScalarizedNodes[Op];
-    RemapNode(ScalarOp);
-    assert(ScalarOp.Val && "Operand wasn't scalarized?");
-    return ScalarOp;
-  }
-  void SetScalarizedOp(SDOperand Op, SDOperand Result);
-    
-  // Result Vector Scalarization: <1 x ty> -> ty.
+
+  SDOperand GetScalarizedVector(SDOperand Op) {
+    SDOperand &ScalarizedOp = ScalarizedVectors[Op];
+    RemapNode(ScalarizedOp);
+    assert(ScalarizedOp.Val && "Operand wasn't scalarized?");
+    return ScalarizedOp;
+  }
+  void SetScalarizedVector(SDOperand Op, SDOperand Result);
+
+  // Vector Result Scalarization: <1 x ty> -> ty.
   void ScalarizeResult(SDNode *N, unsigned OpNo);
-  SDOperand ScalarizeRes_BinOp(SDNode *N);
-  SDOperand ScalarizeRes_UnaryOp(SDNode *N);
+  SDOperand ScalarizeVecRes_BinOp(SDNode *N);
+  SDOperand ScalarizeVecRes_UnaryOp(SDNode *N);
 
-  SDOperand ScalarizeRes_BIT_CONVERT(SDNode *N);
-  SDOperand ScalarizeRes_FPOWI(SDNode *N);
-  SDOperand ScalarizeRes_INSERT_VECTOR_ELT(SDNode *N);
-  SDOperand ScalarizeRes_LOAD(LoadSDNode *N);
-  SDOperand ScalarizeRes_SELECT(SDNode *N);
-  SDOperand ScalarizeRes_UNDEF(SDNode *N);
-  SDOperand ScalarizeRes_VECTOR_SHUFFLE(SDNode *N);
+  SDOperand ScalarizeVecRes_BIT_CONVERT(SDNode *N);
+  SDOperand ScalarizeVecRes_FPOWI(SDNode *N);
+  SDOperand ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N);
+  SDOperand ScalarizeVecRes_LOAD(LoadSDNode *N);
+  SDOperand ScalarizeVecRes_SELECT(SDNode *N);
+  SDOperand ScalarizeVecRes_UNDEF(SDNode *N);
+  SDOperand ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N);
 
-  // Operand Vector Scalarization: <1 x ty> -> ty.
+  // Vector Operand Scalarization: <1 x ty> -> ty.
   bool ScalarizeOperand(SDNode *N, unsigned OpNo);
-  SDOperand ScalarizeOp_BIT_CONVERT(SDNode *N);
-  SDOperand ScalarizeOp_EXTRACT_VECTOR_ELT(SDNode *N);
-  SDOperand ScalarizeOp_STORE(StoreSDNode *N, unsigned OpNo);
+  SDOperand ScalarizeVecOp_BIT_CONVERT(SDNode *N);
+  SDOperand ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
+  SDOperand ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo);
 
   //===--------------------------------------------------------------------===//
-  // Vector Splitting Support: LegalizeTypesSplit.cpp
-  //===--------------------------------------------------------------------===//
-  
-  void GetSplitOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi);
-  void SetSplitOp(SDOperand Op, SDOperand Lo, SDOperand Hi);
-  
-  // Result Vector Splitting: <128 x ty> -> 2 x <64 x ty>.
+  // Vector Splitting Support: LegalizeVectorTypes.cpp
+  //===--------------------------------------------------------------------===//
+
+  void GetSplitVector(SDOperand Op, SDOperand &Lo, SDOperand &Hi);
+  void SetSplitVector(SDOperand Op, SDOperand Lo, SDOperand Hi);
+
+  // Vector Result Splitting: <128 x ty> -> 2 x <64 x ty>.
   void SplitResult(SDNode *N, unsigned OpNo);
 
-  void SplitRes_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_LOAD(LoadSDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_BUILD_PAIR(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_INSERT_VECTOR_ELT(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_VECTOR_SHUFFLE(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-
-  void SplitRes_BUILD_VECTOR(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_CONCAT_VECTORS(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_BIT_CONVERT(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_UnOp(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_BinOp(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_FPOWI(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  void SplitRes_SELECT(SDNode *N, SDOperand &Lo, SDOperand &Hi);
-  
-  // Operand Vector Splitting: <128 x ty> -> 2 x <64 x ty>.
+  void SplitVecRes_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_LOAD(LoadSDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_BUILD_PAIR(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+
+  void SplitVecRes_BUILD_VECTOR(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_BIT_CONVERT(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_UnOp(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_BinOp(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitVecRes_FPOWI(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+
+  // Vector Operand Splitting: <128 x ty> -> 2 x <64 x ty>.
   bool SplitOperand(SDNode *N, unsigned OpNo);
 
-  SDOperand SplitOp_BIT_CONVERT(SDNode *N);
-  SDOperand SplitOp_EXTRACT_SUBVECTOR(SDNode *N);
-  SDOperand SplitOp_EXTRACT_VECTOR_ELT(SDNode *N);
-  SDOperand SplitOp_RET(SDNode *N, unsigned OpNo);
-  SDOperand SplitOp_STORE(StoreSDNode *N, unsigned OpNo);
-  SDOperand SplitOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo);
+  SDOperand SplitVecOp_BIT_CONVERT(SDNode *N);
+  SDOperand SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N);
+  SDOperand SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
+  SDOperand SplitVecOp_RET(SDNode *N, unsigned OpNo);
+  SDOperand SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo);
+  SDOperand SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo);
+
+  //===--------------------------------------------------------------------===//
+  // Generic Splitting: LegalizeTypesGeneric.cpp
+  //===--------------------------------------------------------------------===//
+
+  // Legalization methods which only use that the illegal type is split into two
+  // not necessarily identical types.  As such they can be used for splitting
+  // vectors and expanding integers and floats.
+
+  void GetSplitOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi) {
+    if (Op.getValueType().isVector())
+      GetSplitVector(Op, Lo, Hi);
+    else if (Op.getValueType().isInteger())
+      GetExpandedInteger(Op, Lo, Hi);
+    else
+      GetExpandedFloat(Op, Lo, Hi);
+  }
+
+  /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
+  /// which is split (or expanded) into two not necessarily identical pieces.
+  void GetSplitDestVTs(MVT InVT, MVT &LoVT, MVT &HiVT);
+
+  // Generic Result Splitting.
+  void SplitRes_MERGE_VALUES(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitRes_SELECT      (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitRes_SELECT_CC   (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void SplitRes_UNDEF       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+
+  //===--------------------------------------------------------------------===//
+  // Generic Expansion: LegalizeTypesGeneric.cpp
+  //===--------------------------------------------------------------------===//
+
+  // Legalization methods which only use that the illegal type is split into two
+  // identical types of half the size, and that the Lo/Hi part is stored first
+  // in memory on little/big-endian machines, followed by the Hi/Lo part.  As
+  // such they can be used for expanding integers and floats.
+
+  void GetExpandedOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi) {
+    if (Op.getValueType().isInteger())
+      GetExpandedInteger(Op, Lo, Hi);
+    else
+      GetExpandedFloat(Op, Lo, Hi);
+  }
+
+  // Generic Result Expansion.
+  void ExpandRes_BIT_CONVERT       (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandRes_BUILD_PAIR        (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandRes_EXTRACT_ELEMENT   (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDOperand &Lo, SDOperand &Hi);
+  void ExpandRes_NormalLoad        (SDNode *N, SDOperand &Lo, SDOperand &Hi);
+
+  // Generic Operand Expansion.
+  SDOperand ExpandOp_BIT_CONVERT    (SDNode *N);
+  SDOperand ExpandOp_BUILD_VECTOR   (SDNode *N);
+  SDOperand ExpandOp_EXTRACT_ELEMENT(SDNode *N);
+  SDOperand ExpandOp_NormalStore    (SDNode *N, unsigned OpNo);
 
-public:
-  void SanityCheck(SDNode *N);
 };
 
 } // end namespace llvm.

Removed: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesExpand.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesExpand.cpp?rev=53162&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesExpand.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesExpand.cpp (removed)
@@ -1,1466 +0,0 @@
-//===-- LegalizeTypesExpand.cpp - Expansion for LegalizeTypes -------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements expansion support for LegalizeTypes.  Expansion is the
-// act of changing a computation in an invalid type to be a computation in
-// multiple registers of a smaller type.  For example, implementing i64
-// arithmetic in two i32 registers (as is often needed on 32-bit targets, for
-// example).
-//
-//===----------------------------------------------------------------------===//
-
-#include "LegalizeTypes.h"
-#include "llvm/Constants.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-//  Result Expansion
-//===----------------------------------------------------------------------===//
-
-/// ExpandResult - This method is called when the specified result of the
-/// specified node is found to need expansion.  At this point, the node may also
-/// have invalid operands or may have other results that need promotion, we just
-/// know that (at least) one result needs expansion.
-void DAGTypeLegalizer::ExpandResult(SDNode *N, unsigned ResNo) {
-  DEBUG(cerr << "Expand node result: "; N->dump(&DAG); cerr << "\n");
-  SDOperand Lo, Hi;
-  Lo = Hi = SDOperand();
-
-  // See if the target wants to custom expand this node.
-  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(0)) == 
-          TargetLowering::Custom) {
-    // If the target wants to, allow it to lower this itself.
-    if (SDNode *P = TLI.ExpandOperationResult(N, DAG)) {
-      // Everything that once used N now uses P.  We are guaranteed that the
-      // result value types of N and the result value types of P match.
-      ReplaceNodeWith(N, P);
-      return;
-    }
-  }
-
-  switch (N->getOpcode()) {
-  default:
-#ifndef NDEBUG
-    cerr << "ExpandResult #" << ResNo << ": ";
-    N->dump(&DAG); cerr << "\n";
-#endif
-    assert(0 && "Do not know how to expand the result of this operator!");
-    abort();
-      
-  case ISD::UNDEF:       ExpandResult_UNDEF(N, Lo, Hi); break;
-  case ISD::Constant:    ExpandResult_Constant(N, Lo, Hi); break;
-  case ISD::BUILD_PAIR:  ExpandResult_BUILD_PAIR(N, Lo, Hi); break;
-  case ISD::MERGE_VALUES: ExpandResult_MERGE_VALUES(N, Lo, Hi); break;
-  case ISD::ANY_EXTEND:  ExpandResult_ANY_EXTEND(N, Lo, Hi); break;
-  case ISD::ZERO_EXTEND: ExpandResult_ZERO_EXTEND(N, Lo, Hi); break;
-  case ISD::SIGN_EXTEND: ExpandResult_SIGN_EXTEND(N, Lo, Hi); break;
-  case ISD::AssertZext:  ExpandResult_AssertZext(N, Lo, Hi); break;
-  case ISD::TRUNCATE:    ExpandResult_TRUNCATE(N, Lo, Hi); break;
-  case ISD::BIT_CONVERT: ExpandResult_BIT_CONVERT(N, Lo, Hi); break;
-  case ISD::SIGN_EXTEND_INREG: ExpandResult_SIGN_EXTEND_INREG(N, Lo, Hi); break;
-  case ISD::FP_TO_SINT:  ExpandResult_FP_TO_SINT(N, Lo, Hi); break;
-  case ISD::FP_TO_UINT:  ExpandResult_FP_TO_UINT(N, Lo, Hi); break;
-  case ISD::LOAD:        ExpandResult_LOAD(cast<LoadSDNode>(N), Lo, Hi); break;
-    
-  case ISD::AND:
-  case ISD::OR:
-  case ISD::XOR:         ExpandResult_Logical(N, Lo, Hi); break;
-  case ISD::BSWAP:       ExpandResult_BSWAP(N, Lo, Hi); break;
-  case ISD::ADD:
-  case ISD::SUB:         ExpandResult_ADDSUB(N, Lo, Hi); break;
-  case ISD::ADDC:
-  case ISD::SUBC:        ExpandResult_ADDSUBC(N, Lo, Hi); break;
-  case ISD::ADDE:
-  case ISD::SUBE:        ExpandResult_ADDSUBE(N, Lo, Hi); break;
-  case ISD::SELECT:      ExpandResult_SELECT(N, Lo, Hi); break;
-  case ISD::SELECT_CC:   ExpandResult_SELECT_CC(N, Lo, Hi); break;
-  case ISD::MUL:         ExpandResult_MUL(N, Lo, Hi); break;
-  case ISD::SDIV:        ExpandResult_SDIV(N, Lo, Hi); break;
-  case ISD::SREM:        ExpandResult_SREM(N, Lo, Hi); break;
-  case ISD::UDIV:        ExpandResult_UDIV(N, Lo, Hi); break;
-  case ISD::UREM:        ExpandResult_UREM(N, Lo, Hi); break;
-  case ISD::SHL:
-  case ISD::SRA:
-  case ISD::SRL:         ExpandResult_Shift(N, Lo, Hi); break;
-
-  case ISD::CTLZ:        ExpandResult_CTLZ(N, Lo, Hi); break;
-  case ISD::CTPOP:       ExpandResult_CTPOP(N, Lo, Hi); break;
-  case ISD::CTTZ:        ExpandResult_CTTZ(N, Lo, Hi); break;
-
-  case ISD::EXTRACT_VECTOR_ELT:
-    ExpandResult_EXTRACT_VECTOR_ELT(N, Lo, Hi);
-    break;
-  }
-
-  // If Lo/Hi is null, the sub-method took care of registering results etc.
-  if (Lo.Val)
-    SetExpandedOp(SDOperand(N, ResNo), Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_UNDEF(SDNode *N,
-                                          SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  Lo = Hi = DAG.getNode(ISD::UNDEF, NVT);
-}
-
-void DAGTypeLegalizer::ExpandResult_Constant(SDNode *N,
-                                             SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  unsigned NBitWidth = MVT::getSizeInBits(NVT);
-  const APInt &Cst = cast<ConstantSDNode>(N)->getAPIntValue();
-  Lo = DAG.getConstant(APInt(Cst).trunc(NBitWidth), NVT);
-  Hi = DAG.getConstant(Cst.lshr(NBitWidth).trunc(NBitWidth), NVT);
-}
-
-void DAGTypeLegalizer::ExpandResult_BUILD_PAIR(SDNode *N,
-                                               SDOperand &Lo, SDOperand &Hi) {
-  // Return the operands.
-  Lo = N->getOperand(0);
-  Hi = N->getOperand(1);
-}
-
-void DAGTypeLegalizer::ExpandResult_MERGE_VALUES(SDNode *N,
-                                                 SDOperand &Lo, SDOperand &Hi) {
-  // A MERGE_VALUES node can produce any number of values.  We know that the
-  // first illegal one needs to be expanded into Lo/Hi.
-  unsigned i;
-  
-  // The string of legal results gets turns into the input operands, which have
-  // the same type.
-  for (i = 0; isTypeLegal(N->getValueType(i)); ++i)
-    ReplaceValueWith(SDOperand(N, i), SDOperand(N->getOperand(i)));
-
-  // The first illegal result must be the one that needs to be expanded.
-  GetExpandedOp(N->getOperand(i), Lo, Hi);
-
-  // Legalize the rest of the results into the input operands whether they are
-  // legal or not.
-  unsigned e = N->getNumValues();
-  for (++i; i != e; ++i)
-    ReplaceValueWith(SDOperand(N, i), SDOperand(N->getOperand(i)));
-}
-
-void DAGTypeLegalizer::ExpandResult_ANY_EXTEND(SDNode *N,
-                                               SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  SDOperand Op = N->getOperand(0);
-  if (MVT::getSizeInBits(Op.getValueType()) <= MVT::getSizeInBits(NVT)) {
-    // The low part is any extension of the input (which degenerates to a copy).
-    Lo = DAG.getNode(ISD::ANY_EXTEND, NVT, Op);
-    Hi = DAG.getNode(ISD::UNDEF, NVT);   // The high part is undefined.
-  } else {
-    // For example, extension of an i48 to an i64.  The operand type necessarily
-    // promotes to the result type, so will end up being expanded too.
-    assert(getTypeAction(Op.getValueType()) == Promote &&
-           "Only know how to promote this result!");
-    SDOperand Res = GetPromotedOp(Op);
-    assert(Res.getValueType() == N->getValueType(0) &&
-           "Operand over promoted?");
-    // Split the promoted operand.  This will simplify when it is expanded.
-    SplitInteger(Res, Lo, Hi);
-  }
-}
-
-void DAGTypeLegalizer::ExpandResult_ZERO_EXTEND(SDNode *N,
-                                                SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  SDOperand Op = N->getOperand(0);
-  if (MVT::getSizeInBits(Op.getValueType()) <= MVT::getSizeInBits(NVT)) {
-    // The low part is zero extension of the input (which degenerates to a copy).
-    Lo = DAG.getNode(ISD::ZERO_EXTEND, NVT, N->getOperand(0));
-    Hi = DAG.getConstant(0, NVT);   // The high part is just a zero.
-  } else {
-    // For example, extension of an i48 to an i64.  The operand type necessarily
-    // promotes to the result type, so will end up being expanded too.
-    assert(getTypeAction(Op.getValueType()) == Promote &&
-           "Only know how to promote this result!");
-    SDOperand Res = GetPromotedOp(Op);
-    assert(Res.getValueType() == N->getValueType(0) &&
-           "Operand over promoted?");
-    // Split the promoted operand.  This will simplify when it is expanded.
-    SplitInteger(Res, Lo, Hi);
-    unsigned ExcessBits =
-      MVT::getSizeInBits(Op.getValueType()) - MVT::getSizeInBits(NVT);
-    Hi = DAG.getZeroExtendInReg(Hi, MVT::getIntegerType(ExcessBits));
-  }
-}
-
-void DAGTypeLegalizer::ExpandResult_SIGN_EXTEND(SDNode *N,
-                                                SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  SDOperand Op = N->getOperand(0);
-  if (MVT::getSizeInBits(Op.getValueType()) <= MVT::getSizeInBits(NVT)) {
-    // The low part is sign extension of the input (which degenerates to a copy).
-    Lo = DAG.getNode(ISD::SIGN_EXTEND, NVT, N->getOperand(0));
-    // The high part is obtained by SRA'ing all but one of the bits of low part.
-    unsigned LoSize = MVT::getSizeInBits(NVT);
-    Hi = DAG.getNode(ISD::SRA, NVT, Lo,
-                     DAG.getConstant(LoSize-1, TLI.getShiftAmountTy()));
-  } else {
-    // For example, extension of an i48 to an i64.  The operand type necessarily
-    // promotes to the result type, so will end up being expanded too.
-    assert(getTypeAction(Op.getValueType()) == Promote &&
-           "Only know how to promote this result!");
-    SDOperand Res = GetPromotedOp(Op);
-    assert(Res.getValueType() == N->getValueType(0) &&
-           "Operand over promoted?");
-    // Split the promoted operand.  This will simplify when it is expanded.
-    SplitInteger(Res, Lo, Hi);
-    unsigned ExcessBits =
-      MVT::getSizeInBits(Op.getValueType()) - MVT::getSizeInBits(NVT);
-    Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, Hi.getValueType(), Hi,
-                     DAG.getValueType(MVT::getIntegerType(ExcessBits)));
-  }
-}
-
-void DAGTypeLegalizer::ExpandResult_AssertZext(SDNode *N,
-                                               SDOperand &Lo, SDOperand &Hi) {
-  GetExpandedOp(N->getOperand(0), Lo, Hi);
-  MVT::ValueType NVT = Lo.getValueType();
-  MVT::ValueType EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
-  unsigned NVTBits = MVT::getSizeInBits(NVT);
-  unsigned EVTBits = MVT::getSizeInBits(EVT);
-
-  if (NVTBits < EVTBits) {
-    Hi = DAG.getNode(ISD::AssertZext, NVT, Hi,
-                     DAG.getValueType(MVT::getIntegerType(EVTBits - NVTBits)));
-  } else {
-    Lo = DAG.getNode(ISD::AssertZext, NVT, Lo, DAG.getValueType(EVT));
-    // The high part must be zero, make it explicit.
-    Hi = DAG.getConstant(0, NVT);
-  }
-}
-
-void DAGTypeLegalizer::ExpandResult_TRUNCATE(SDNode *N,
-                                             SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  Lo = DAG.getNode(ISD::TRUNCATE, NVT, N->getOperand(0));
-  Hi = DAG.getNode(ISD::SRL, N->getOperand(0).getValueType(), N->getOperand(0),
-                   DAG.getConstant(MVT::getSizeInBits(NVT),
-                                   TLI.getShiftAmountTy()));
-  Hi = DAG.getNode(ISD::TRUNCATE, NVT, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_BIT_CONVERT(SDNode *N,
-                                                SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  SDOperand InOp = N->getOperand(0);
-  MVT::ValueType InVT = InOp.getValueType();
-
-  // Handle some special cases efficiently.
-  switch (getTypeAction(InVT)) {
-    default:
-      assert(false && "Unknown type action!");
-    case Legal:
-    case Promote:
-      break;
-    case Expand:
-      // Convert the expanded pieces of the input.
-      GetExpandedOp(InOp, Lo, Hi);
-      Lo = DAG.getNode(ISD::BIT_CONVERT, NVT, Lo);
-      Hi = DAG.getNode(ISD::BIT_CONVERT, NVT, Hi);
-      return;
-    case FloatToInt:
-      // Convert the integer operand instead.
-      SplitInteger(GetIntegerOp(InOp), Lo, Hi);
-      Lo = DAG.getNode(ISD::BIT_CONVERT, NVT, Lo);
-      Hi = DAG.getNode(ISD::BIT_CONVERT, NVT, Hi);
-      return;
-    case Split:
-      // Convert the split parts of the input if it was split in two.
-      GetSplitOp(InOp, Lo, Hi);
-      if (Lo.getValueType() == Hi.getValueType()) {
-        if (TLI.isBigEndian())
-          std::swap(Lo, Hi);
-        Lo = DAG.getNode(ISD::BIT_CONVERT, NVT, Lo);
-        Hi = DAG.getNode(ISD::BIT_CONVERT, NVT, Hi);
-        return;
-      }
-      break;
-    case Scalarize:
-      // Convert the element instead.
-      SplitInteger(BitConvertToInteger(GetScalarizedOp(InOp)), Lo, Hi);
-      Lo = DAG.getNode(ISD::BIT_CONVERT, NVT, Lo);
-      Hi = DAG.getNode(ISD::BIT_CONVERT, NVT, Hi);
-      return;
-  }
-
-  // Lower the bit-convert to a store/load from the stack, then expand the load.
-  SDOperand Op = CreateStackStoreLoad(InOp, N->getValueType(0));
-  ExpandResult_LOAD(cast<LoadSDNode>(Op.Val), Lo, Hi);
-}
-
-void DAGTypeLegalizer::
-ExpandResult_SIGN_EXTEND_INREG(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
-  GetExpandedOp(N->getOperand(0), Lo, Hi);
-  MVT::ValueType EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
-
-  if (MVT::getSizeInBits(EVT) <= MVT::getSizeInBits(Lo.getValueType())) {
-    // sext_inreg the low part if needed.
-    Lo = DAG.getNode(ISD::SIGN_EXTEND_INREG, Lo.getValueType(), Lo,
-                     N->getOperand(1));
-
-    // The high part gets the sign extension from the lo-part.  This handles
-    // things like sextinreg V:i64 from i8.
-    Hi = DAG.getNode(ISD::SRA, Hi.getValueType(), Lo,
-                     DAG.getConstant(MVT::getSizeInBits(Hi.getValueType())-1,
-                                     TLI.getShiftAmountTy()));
-  } else {
-    // For example, extension of an i48 to an i64.  Leave the low part alone,
-    // sext_inreg the high part.
-    unsigned ExcessBits =
-      MVT::getSizeInBits(EVT) - MVT::getSizeInBits(Lo.getValueType());
-    Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, Hi.getValueType(), Hi,
-                     DAG.getValueType(MVT::getIntegerType(ExcessBits)));
-  }
-}
-
-void DAGTypeLegalizer::ExpandResult_FP_TO_SINT(SDNode *N, SDOperand &Lo,
-                                               SDOperand &Hi) {
-  MVT::ValueType VT = N->getValueType(0);
-  SDOperand Op = N->getOperand(0);
-  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
-  if (VT == MVT::i64) {
-    if (Op.getValueType() == MVT::f32)
-      LC = RTLIB::FPTOSINT_F32_I64;
-    else if (Op.getValueType() == MVT::f64)
-      LC = RTLIB::FPTOSINT_F64_I64;
-    else if (Op.getValueType() == MVT::f80)
-      LC = RTLIB::FPTOSINT_F80_I64;
-    else if (Op.getValueType() == MVT::ppcf128)
-      LC = RTLIB::FPTOSINT_PPCF128_I64;
-  } else if (VT == MVT::i128) {
-    if (Op.getValueType() == MVT::f32)
-      LC = RTLIB::FPTOSINT_F32_I128;
-    else if (Op.getValueType() == MVT::f64)
-      LC = RTLIB::FPTOSINT_F64_I128;
-    else if (Op.getValueType() == MVT::f80)
-      LC = RTLIB::FPTOSINT_F80_I128;
-    else if (Op.getValueType() == MVT::ppcf128)
-      LC = RTLIB::FPTOSINT_PPCF128_I128;
-  } else {
-    assert(0 && "Unexpected fp-to-sint conversion!");
-  }
-  SplitInteger(MakeLibCall(LC, VT, &Op, 1, true/*sign irrelevant*/), Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_FP_TO_UINT(SDNode *N, SDOperand &Lo,
-                                               SDOperand &Hi) {
-  MVT::ValueType VT = N->getValueType(0);
-  SDOperand Op = N->getOperand(0);
-  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
-  if (VT == MVT::i64) {
-    if (Op.getValueType() == MVT::f32)
-      LC = RTLIB::FPTOUINT_F32_I64;
-    else if (Op.getValueType() == MVT::f64)
-      LC = RTLIB::FPTOUINT_F64_I64;
-    else if (Op.getValueType() == MVT::f80)
-      LC = RTLIB::FPTOUINT_F80_I64;
-    else if (Op.getValueType() == MVT::ppcf128)
-      LC = RTLIB::FPTOUINT_PPCF128_I64;
-  } else if (VT == MVT::i128) {
-    if (Op.getValueType() == MVT::f32)
-      LC = RTLIB::FPTOUINT_F32_I128;
-    else if (Op.getValueType() == MVT::f64)
-      LC = RTLIB::FPTOUINT_F64_I128;
-    else if (Op.getValueType() == MVT::f80)
-      LC = RTLIB::FPTOUINT_F80_I128;
-    else if (Op.getValueType() == MVT::ppcf128)
-      LC = RTLIB::FPTOUINT_PPCF128_I128;
-  } else {
-    assert(0 && "Unexpected fp-to-uint conversion!");
-  }
-  SplitInteger(MakeLibCall(LC, VT, &Op, 1, false/*sign irrelevant*/), Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_LOAD(LoadSDNode *N,
-                                         SDOperand &Lo, SDOperand &Hi) {
-  // FIXME: Add support for indexed loads.
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
-  SDOperand Ch  = N->getChain();    // Legalize the chain.
-  SDOperand Ptr = N->getBasePtr();  // Legalize the pointer.
-  ISD::LoadExtType ExtType = N->getExtensionType();
-  int SVOffset = N->getSrcValueOffset();
-  unsigned Alignment = N->getAlignment();
-  bool isVolatile = N->isVolatile();
-
-  assert(!(MVT::getSizeInBits(NVT) & 7) && "Expanded type not byte sized!");
-
-  if (ExtType == ISD::NON_EXTLOAD) {
-    Lo = DAG.getLoad(NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
-                     isVolatile, Alignment);
-    // Increment the pointer to the other half.
-    unsigned IncrementSize = MVT::getSizeInBits(NVT)/8;
-    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
-                      DAG.getIntPtrConstant(IncrementSize));
-    Hi = DAG.getLoad(NVT, Ch, Ptr, N->getSrcValue(), SVOffset+IncrementSize,
-                     isVolatile, MinAlign(Alignment, IncrementSize));
-
-    // Build a factor node to remember that this load is independent of the
-    // other one.
-    Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
-                     Hi.getValue(1));
-
-    // Handle endianness of the load.
-    if (TLI.isBigEndian())
-      std::swap(Lo, Hi);
-  } else if (MVT::getSizeInBits(N->getMemoryVT()) <= MVT::getSizeInBits(NVT)) {
-    MVT::ValueType EVT = N->getMemoryVT();
-
-    Lo = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(), SVOffset, EVT,
-                        isVolatile, Alignment);
-
-    // Remember the chain.
-    Ch = Lo.getValue(1);
-
-    if (ExtType == ISD::SEXTLOAD) {
-      // The high part is obtained by SRA'ing all but one of the bits of the
-      // lo part.
-      unsigned LoSize = MVT::getSizeInBits(Lo.getValueType());
-      Hi = DAG.getNode(ISD::SRA, NVT, Lo,
-                       DAG.getConstant(LoSize-1, TLI.getShiftAmountTy()));
-    } else if (ExtType == ISD::ZEXTLOAD) {
-      // The high part is just a zero.
-      Hi = DAG.getConstant(0, NVT);
-    } else {
-      assert(ExtType == ISD::EXTLOAD && "Unknown extload!");
-      // The high part is undefined.
-      Hi = DAG.getNode(ISD::UNDEF, NVT);
-    }
-  } else if (TLI.isLittleEndian()) {
-    // Little-endian - low bits are at low addresses.
-    Lo = DAG.getLoad(NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
-                     isVolatile, Alignment);
-
-    unsigned ExcessBits =
-      MVT::getSizeInBits(N->getMemoryVT()) - MVT::getSizeInBits(NVT);
-    MVT::ValueType NEVT = MVT::getIntegerType(ExcessBits);
-
-    // Increment the pointer to the other half.
-    unsigned IncrementSize = MVT::getSizeInBits(NVT)/8;
-    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
-                      DAG.getIntPtrConstant(IncrementSize));
-    Hi = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(),
-                        SVOffset+IncrementSize, NEVT,
-                        isVolatile, MinAlign(Alignment, IncrementSize));
-
-    // Build a factor node to remember that this load is independent of the
-    // other one.
-    Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
-                     Hi.getValue(1));
-  } else {
-    // Big-endian - high bits are at low addresses.  Favor aligned loads at
-    // the cost of some bit-fiddling.
-    MVT::ValueType EVT = N->getMemoryVT();
-    unsigned EBytes = MVT::getStoreSizeInBits(EVT)/8;
-    unsigned IncrementSize = MVT::getSizeInBits(NVT)/8;
-    unsigned ExcessBits = (EBytes - IncrementSize)*8;
-
-    // Load both the high bits and maybe some of the low bits.
-    Hi = DAG.getExtLoad(ExtType, NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
-                        MVT::getIntegerType(MVT::getSizeInBits(EVT)-ExcessBits),
-                        isVolatile, Alignment);
-
-    // Increment the pointer to the other half.
-    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
-                      DAG.getIntPtrConstant(IncrementSize));
-    // Load the rest of the low bits.
-    Lo = DAG.getExtLoad(ISD::ZEXTLOAD, NVT, Ch, Ptr, N->getSrcValue(),
-                        SVOffset+IncrementSize, MVT::getIntegerType(ExcessBits),
-                        isVolatile, MinAlign(Alignment, IncrementSize));
-
-    // Build a factor node to remember that this load is independent of the
-    // other one.
-    Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
-                     Hi.getValue(1));
-
-    if (ExcessBits < MVT::getSizeInBits(NVT)) {
-      // Transfer low bits from the bottom of Hi to the top of Lo.
-      Lo = DAG.getNode(ISD::OR, NVT, Lo,
-                       DAG.getNode(ISD::SHL, NVT, Hi,
-                                   DAG.getConstant(ExcessBits,
-                                                   TLI.getShiftAmountTy())));
-      // Move high bits to the right position in Hi.
-      Hi = DAG.getNode(ExtType == ISD::SEXTLOAD ? ISD::SRA : ISD::SRL, NVT, Hi,
-                       DAG.getConstant(MVT::getSizeInBits(NVT) - ExcessBits,
-                                       TLI.getShiftAmountTy()));
-    }
-  }
-
-  // Legalized the chain result - switch anything that used the old chain to
-  // use the new one.
-  ReplaceValueWith(SDOperand(N, 1), Ch);
-}
-
-void DAGTypeLegalizer::ExpandResult_Logical(SDNode *N,
-                                            SDOperand &Lo, SDOperand &Hi) {
-  SDOperand LL, LH, RL, RH;
-  GetExpandedOp(N->getOperand(0), LL, LH);
-  GetExpandedOp(N->getOperand(1), RL, RH);
-  Lo = DAG.getNode(N->getOpcode(), LL.getValueType(), LL, RL);
-  Hi = DAG.getNode(N->getOpcode(), LL.getValueType(), LH, RH);
-}
-
-void DAGTypeLegalizer::ExpandResult_BSWAP(SDNode *N,
-                                          SDOperand &Lo, SDOperand &Hi) {
-  GetExpandedOp(N->getOperand(0), Hi, Lo);  // Note swapped operands.
-  Lo = DAG.getNode(ISD::BSWAP, Lo.getValueType(), Lo);
-  Hi = DAG.getNode(ISD::BSWAP, Hi.getValueType(), Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_SELECT(SDNode *N,
-                                           SDOperand &Lo, SDOperand &Hi) {
-  SDOperand LL, LH, RL, RH;
-  GetExpandedOp(N->getOperand(1), LL, LH);
-  GetExpandedOp(N->getOperand(2), RL, RH);
-  Lo = DAG.getNode(ISD::SELECT, LL.getValueType(), N->getOperand(0), LL, RL);
-  Hi = DAG.getNode(ISD::SELECT, LL.getValueType(), N->getOperand(0), LH, RH);
-}
-
-void DAGTypeLegalizer::ExpandResult_SELECT_CC(SDNode *N,
-                                              SDOperand &Lo, SDOperand &Hi) {
-  SDOperand LL, LH, RL, RH;
-  GetExpandedOp(N->getOperand(2), LL, LH);
-  GetExpandedOp(N->getOperand(3), RL, RH);
-  Lo = DAG.getNode(ISD::SELECT_CC, LL.getValueType(), N->getOperand(0), 
-                   N->getOperand(1), LL, RL, N->getOperand(4));
-  Hi = DAG.getNode(ISD::SELECT_CC, LL.getValueType(), N->getOperand(0), 
-                   N->getOperand(1), LH, RH, N->getOperand(4));
-}
-
-void DAGTypeLegalizer::ExpandResult_ADDSUB(SDNode *N,
-                                           SDOperand &Lo, SDOperand &Hi) {
-  // Expand the subcomponents.
-  SDOperand LHSL, LHSH, RHSL, RHSH;
-  GetExpandedOp(N->getOperand(0), LHSL, LHSH);
-  GetExpandedOp(N->getOperand(1), RHSL, RHSH);
-  SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
-  SDOperand LoOps[2] = { LHSL, RHSL };
-  SDOperand HiOps[3] = { LHSH, RHSH };
-
-  if (N->getOpcode() == ISD::ADD) {
-    Lo = DAG.getNode(ISD::ADDC, VTList, LoOps, 2);
-    HiOps[2] = Lo.getValue(1);
-    Hi = DAG.getNode(ISD::ADDE, VTList, HiOps, 3);
-  } else {
-    Lo = DAG.getNode(ISD::SUBC, VTList, LoOps, 2);
-    HiOps[2] = Lo.getValue(1);
-    Hi = DAG.getNode(ISD::SUBE, VTList, HiOps, 3);
-  }
-}
-
-void DAGTypeLegalizer::ExpandResult_ADDSUBC(SDNode *N,
-                                            SDOperand &Lo, SDOperand &Hi) {
-  // Expand the subcomponents.
-  SDOperand LHSL, LHSH, RHSL, RHSH;
-  GetExpandedOp(N->getOperand(0), LHSL, LHSH);
-  GetExpandedOp(N->getOperand(1), RHSL, RHSH);
-  SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
-  SDOperand LoOps[2] = { LHSL, RHSL };
-  SDOperand HiOps[3] = { LHSH, RHSH };
-
-  if (N->getOpcode() == ISD::ADDC) {
-    Lo = DAG.getNode(ISD::ADDC, VTList, LoOps, 2);
-    HiOps[2] = Lo.getValue(1);
-    Hi = DAG.getNode(ISD::ADDE, VTList, HiOps, 3);
-  } else {
-    Lo = DAG.getNode(ISD::SUBC, VTList, LoOps, 2);
-    HiOps[2] = Lo.getValue(1);
-    Hi = DAG.getNode(ISD::SUBE, VTList, HiOps, 3);
-  }
-
-  // Legalized the flag result - switch anything that used the old flag to
-  // use the new one.
-  ReplaceValueWith(SDOperand(N, 1), Hi.getValue(1));
-}
-
-void DAGTypeLegalizer::ExpandResult_ADDSUBE(SDNode *N,
-                                            SDOperand &Lo, SDOperand &Hi) {
-  // Expand the subcomponents.
-  SDOperand LHSL, LHSH, RHSL, RHSH;
-  GetExpandedOp(N->getOperand(0), LHSL, LHSH);
-  GetExpandedOp(N->getOperand(1), RHSL, RHSH);
-  SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
-  SDOperand LoOps[3] = { LHSL, RHSL, N->getOperand(2) };
-  SDOperand HiOps[3] = { LHSH, RHSH };
-
-  Lo = DAG.getNode(N->getOpcode(), VTList, LoOps, 3);
-  HiOps[2] = Lo.getValue(1);
-  Hi = DAG.getNode(N->getOpcode(), VTList, HiOps, 3);
-
-  // Legalized the flag result - switch anything that used the old flag to
-  // use the new one.
-  ReplaceValueWith(SDOperand(N, 1), Hi.getValue(1));
-}
-
-void DAGTypeLegalizer::ExpandResult_MUL(SDNode *N,
-                                        SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
-  
-  bool HasMULHS = TLI.isOperationLegal(ISD::MULHS, NVT);
-  bool HasMULHU = TLI.isOperationLegal(ISD::MULHU, NVT);
-  bool HasSMUL_LOHI = TLI.isOperationLegal(ISD::SMUL_LOHI, NVT);
-  bool HasUMUL_LOHI = TLI.isOperationLegal(ISD::UMUL_LOHI, NVT);
-  if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) {
-    SDOperand LL, LH, RL, RH;
-    GetExpandedOp(N->getOperand(0), LL, LH);
-    GetExpandedOp(N->getOperand(1), RL, RH);
-    unsigned OuterBitSize = MVT::getSizeInBits(VT);
-    unsigned BitSize = MVT::getSizeInBits(NVT);
-    unsigned LHSSB = DAG.ComputeNumSignBits(N->getOperand(0));
-    unsigned RHSSB = DAG.ComputeNumSignBits(N->getOperand(1));
-    
-    if (DAG.MaskedValueIsZero(N->getOperand(0),
-                              APInt::getHighBitsSet(OuterBitSize, LHSSB)) &&
-        DAG.MaskedValueIsZero(N->getOperand(1),
-                              APInt::getHighBitsSet(OuterBitSize, RHSSB))) {
-      // The inputs are both zero-extended.
-      if (HasUMUL_LOHI) {
-        // We can emit a umul_lohi.
-        Lo = DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL);
-        Hi = SDOperand(Lo.Val, 1);
-        return;
-      }
-      if (HasMULHU) {
-        // We can emit a mulhu+mul.
-        Lo = DAG.getNode(ISD::MUL, NVT, LL, RL);
-        Hi = DAG.getNode(ISD::MULHU, NVT, LL, RL);
-        return;
-      }
-    }
-    if (LHSSB > BitSize && RHSSB > BitSize) {
-      // The input values are both sign-extended.
-      if (HasSMUL_LOHI) {
-        // We can emit a smul_lohi.
-        Lo = DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL);
-        Hi = SDOperand(Lo.Val, 1);
-        return;
-      }
-      if (HasMULHS) {
-        // We can emit a mulhs+mul.
-        Lo = DAG.getNode(ISD::MUL, NVT, LL, RL);
-        Hi = DAG.getNode(ISD::MULHS, NVT, LL, RL);
-        return;
-      }
-    }
-    if (HasUMUL_LOHI) {
-      // Lo,Hi = umul LHS, RHS.
-      SDOperand UMulLOHI = DAG.getNode(ISD::UMUL_LOHI,
-                                       DAG.getVTList(NVT, NVT), LL, RL);
-      Lo = UMulLOHI;
-      Hi = UMulLOHI.getValue(1);
-      RH = DAG.getNode(ISD::MUL, NVT, LL, RH);
-      LH = DAG.getNode(ISD::MUL, NVT, LH, RL);
-      Hi = DAG.getNode(ISD::ADD, NVT, Hi, RH);
-      Hi = DAG.getNode(ISD::ADD, NVT, Hi, LH);
-      return;
-    }
-  }
-
-  // If nothing else, we can make a libcall.
-  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
-  SplitInteger(MakeLibCall(RTLIB::MUL_I64, VT, Ops, 2, true/*sign irrelevant*/),
-               Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_SDIV(SDNode *N,
-                                         SDOperand &Lo, SDOperand &Hi) {
-  assert(N->getValueType(0) == MVT::i64 && "Unsupported sdiv!");
-  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
-  SplitInteger(MakeLibCall(RTLIB::SDIV_I64, N->getValueType(0), Ops, 2, true),
-               Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_SREM(SDNode *N,
-                                         SDOperand &Lo, SDOperand &Hi) {
-  assert(N->getValueType(0) == MVT::i64 && "Unsupported srem!");
-  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
-  SplitInteger(MakeLibCall(RTLIB::SREM_I64, N->getValueType(0), Ops, 2, true),
-               Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_UDIV(SDNode *N,
-                                         SDOperand &Lo, SDOperand &Hi) {
-  assert(N->getValueType(0) == MVT::i64 && "Unsupported udiv!");
-  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
-  SplitInteger(MakeLibCall(RTLIB::UDIV_I64, N->getValueType(0), Ops, 2, false),
-               Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_UREM(SDNode *N,
-                                         SDOperand &Lo, SDOperand &Hi) {
-  assert(N->getValueType(0) == MVT::i64 && "Unsupported urem!");
-  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
-  SplitInteger(MakeLibCall(RTLIB::UREM_I64, N->getValueType(0), Ops, 2, false),
-               Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_Shift(SDNode *N,
-                                          SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType VT = N->getValueType(0);
-  
-  // If we can emit an efficient shift operation, do so now.  Check to see if 
-  // the RHS is a constant.
-  if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
-    return ExpandShiftByConstant(N, CN->getValue(), Lo, Hi);
-
-  // If we can determine that the high bit of the shift is zero or one, even if
-  // the low bits are variable, emit this shift in an optimized form.
-  if (ExpandShiftWithKnownAmountBit(N, Lo, Hi))
-    return;
-  
-  // If this target supports shift_PARTS, use it.  First, map to the _PARTS opc.
-  unsigned PartsOpc;
-  if (N->getOpcode() == ISD::SHL) {
-    PartsOpc = ISD::SHL_PARTS;
-  } else if (N->getOpcode() == ISD::SRL) {
-    PartsOpc = ISD::SRL_PARTS;
-  } else {
-    assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
-    PartsOpc = ISD::SRA_PARTS;
-  }
-  
-  // Next check to see if the target supports this SHL_PARTS operation or if it
-  // will custom expand it.
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
-  TargetLowering::LegalizeAction Action = TLI.getOperationAction(PartsOpc, NVT);
-  if ((Action == TargetLowering::Legal && TLI.isTypeLegal(NVT)) ||
-      Action == TargetLowering::Custom) {
-    // Expand the subcomponents.
-    SDOperand LHSL, LHSH;
-    GetExpandedOp(N->getOperand(0), LHSL, LHSH);
-    
-    SDOperand Ops[] = { LHSL, LHSH, N->getOperand(1) };
-    MVT::ValueType VT = LHSL.getValueType();
-    Lo = DAG.getNode(PartsOpc, DAG.getNodeValueTypes(VT, VT), 2, Ops, 3);
-    Hi = Lo.getValue(1);
-    return;
-  }
-
-  // Otherwise, emit a libcall.
-  assert(VT == MVT::i64 && "Unsupported shift!");
-
-  RTLIB::Libcall LC;
-  bool isSigned;
-  if (N->getOpcode() == ISD::SHL) {
-    LC = RTLIB::SHL_I64;
-    isSigned = false; /*sign irrelevant*/
-  } else if (N->getOpcode() == ISD::SRL) {
-    LC = RTLIB::SRL_I64;
-    isSigned = false;
-  } else {
-    assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
-    LC = RTLIB::SRA_I64;
-    isSigned = true;
-  }
-
-  SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) };
-  SplitInteger(MakeLibCall(LC, VT, Ops, 2, isSigned), Lo, Hi);
-}
-
-void DAGTypeLegalizer::ExpandResult_CTLZ(SDNode *N,
-                                         SDOperand &Lo, SDOperand &Hi) {
-  // ctlz (HiLo) -> Hi != 0 ? ctlz(Hi) : (ctlz(Lo)+32)
-  GetExpandedOp(N->getOperand(0), Lo, Hi);
-  MVT::ValueType NVT = Lo.getValueType();
-
-  SDOperand HiNotZero = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi,
-                                     DAG.getConstant(0, NVT), ISD::SETNE);
-
-  SDOperand LoLZ = DAG.getNode(ISD::CTLZ, NVT, Lo);
-  SDOperand HiLZ = DAG.getNode(ISD::CTLZ, NVT, Hi);
-
-  Lo = DAG.getNode(ISD::SELECT, NVT, HiNotZero, HiLZ,
-                   DAG.getNode(ISD::ADD, NVT, LoLZ,
-                               DAG.getConstant(MVT::getSizeInBits(NVT), NVT)));
-  Hi = DAG.getConstant(0, NVT);
-}
-
-void DAGTypeLegalizer::ExpandResult_CTPOP(SDNode *N,
-                                          SDOperand &Lo, SDOperand &Hi) {
-  // ctpop(HiLo) -> ctpop(Hi)+ctpop(Lo)
-  GetExpandedOp(N->getOperand(0), Lo, Hi);
-  MVT::ValueType NVT = Lo.getValueType();
-  Lo = DAG.getNode(ISD::ADD, NVT, DAG.getNode(ISD::CTPOP, NVT, Lo),
-                   DAG.getNode(ISD::CTPOP, NVT, Hi));
-  Hi = DAG.getConstant(0, NVT);
-}
-
-void DAGTypeLegalizer::ExpandResult_CTTZ(SDNode *N,
-                                         SDOperand &Lo, SDOperand &Hi) {
-  // cttz (HiLo) -> Lo != 0 ? cttz(Lo) : (cttz(Hi)+32)
-  GetExpandedOp(N->getOperand(0), Lo, Hi);
-  MVT::ValueType NVT = Lo.getValueType();
-
-  SDOperand LoNotZero = DAG.getSetCC(TLI.getSetCCResultType(Lo), Lo,
-                                     DAG.getConstant(0, NVT), ISD::SETNE);
-
-  SDOperand LoLZ = DAG.getNode(ISD::CTTZ, NVT, Lo);
-  SDOperand HiLZ = DAG.getNode(ISD::CTTZ, NVT, Hi);
-
-  Lo = DAG.getNode(ISD::SELECT, NVT, LoNotZero, LoLZ,
-                   DAG.getNode(ISD::ADD, NVT, HiLZ,
-                               DAG.getConstant(MVT::getSizeInBits(NVT), NVT)));
-  Hi = DAG.getConstant(0, NVT);
-}
-
-void DAGTypeLegalizer::ExpandResult_EXTRACT_VECTOR_ELT(SDNode *N,
-                                                       SDOperand &Lo,
-                                                       SDOperand &Hi) {
-  SDOperand OldVec = N->getOperand(0);
-  unsigned OldElts = MVT::getVectorNumElements(OldVec.getValueType());
-
-  // Convert to a vector of the expanded element type, for example
-  // <2 x i64> -> <4 x i32>.
-  MVT::ValueType OldVT = N->getValueType(0);
-  MVT::ValueType NewVT = TLI.getTypeToTransformTo(OldVT);
-  assert(MVT::getSizeInBits(OldVT) == 2 * MVT::getSizeInBits(NewVT) &&
-         "Do not know how to handle this expansion!");
-
-  SDOperand NewVec = DAG.getNode(ISD::BIT_CONVERT,
-                                 MVT::getVectorType(NewVT, 2 * OldElts),
-                                 OldVec);
-
-  // Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector.
-  SDOperand Idx = N->getOperand(1);
-
-  // Make sure the type of Idx is big enough to hold the new values.
-  if (MVT::getSizeInBits(Idx.getValueType()) <
-      MVT::getSizeInBits(TLI.getPointerTy()))
-    Idx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), Idx);
-
-  Idx = DAG.getNode(ISD::ADD, Idx.getValueType(), Idx, Idx);
-  Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, NewVT, NewVec, Idx);
-
-  Idx = DAG.getNode(ISD::ADD, Idx.getValueType(), Idx,
-                    DAG.getConstant(1, Idx.getValueType()));
-  Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, NewVT, NewVec, Idx);
-
-  if (TLI.isBigEndian())
-    std::swap(Lo, Hi);
-}
-
-/// ExpandShiftByConstant - N is a shift by a value that needs to be expanded,
-/// and the shift amount is a constant 'Amt'.  Expand the operation.
-void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt, 
-                                             SDOperand &Lo, SDOperand &Hi) {
-  // Expand the incoming operand to be shifted, so that we have its parts
-  SDOperand InL, InH;
-  GetExpandedOp(N->getOperand(0), InL, InH);
-  
-  MVT::ValueType NVT = InL.getValueType();
-  unsigned VTBits = MVT::getSizeInBits(N->getValueType(0));
-  unsigned NVTBits = MVT::getSizeInBits(NVT);
-  MVT::ValueType ShTy = N->getOperand(1).getValueType();
-
-  if (N->getOpcode() == ISD::SHL) {
-    if (Amt > VTBits) {
-      Lo = Hi = DAG.getConstant(0, NVT);
-    } else if (Amt > NVTBits) {
-      Lo = DAG.getConstant(0, NVT);
-      Hi = DAG.getNode(ISD::SHL, NVT, InL, DAG.getConstant(Amt-NVTBits,ShTy));
-    } else if (Amt == NVTBits) {
-      Lo = DAG.getConstant(0, NVT);
-      Hi = InL;
-    } else {
-      Lo = DAG.getNode(ISD::SHL, NVT, InL, DAG.getConstant(Amt, ShTy));
-      Hi = DAG.getNode(ISD::OR, NVT,
-                       DAG.getNode(ISD::SHL, NVT, InH,
-                                   DAG.getConstant(Amt, ShTy)),
-                       DAG.getNode(ISD::SRL, NVT, InL,
-                                   DAG.getConstant(NVTBits-Amt, ShTy)));
-    }
-    return;
-  }
-  
-  if (N->getOpcode() == ISD::SRL) {
-    if (Amt > VTBits) {
-      Lo = DAG.getConstant(0, NVT);
-      Hi = DAG.getConstant(0, NVT);
-    } else if (Amt > NVTBits) {
-      Lo = DAG.getNode(ISD::SRL, NVT, InH, DAG.getConstant(Amt-NVTBits,ShTy));
-      Hi = DAG.getConstant(0, NVT);
-    } else if (Amt == NVTBits) {
-      Lo = InH;
-      Hi = DAG.getConstant(0, NVT);
-    } else {
-      Lo = DAG.getNode(ISD::OR, NVT,
-                       DAG.getNode(ISD::SRL, NVT, InL,
-                                   DAG.getConstant(Amt, ShTy)),
-                       DAG.getNode(ISD::SHL, NVT, InH,
-                                   DAG.getConstant(NVTBits-Amt, ShTy)));
-      Hi = DAG.getNode(ISD::SRL, NVT, InH, DAG.getConstant(Amt, ShTy));
-    }
-    return;
-  }
-  
-  assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
-  if (Amt > VTBits) {
-    Hi = Lo = DAG.getNode(ISD::SRA, NVT, InH,
-                          DAG.getConstant(NVTBits-1, ShTy));
-  } else if (Amt > NVTBits) {
-    Lo = DAG.getNode(ISD::SRA, NVT, InH,
-                     DAG.getConstant(Amt-NVTBits, ShTy));
-    Hi = DAG.getNode(ISD::SRA, NVT, InH,
-                     DAG.getConstant(NVTBits-1, ShTy));
-  } else if (Amt == NVTBits) {
-    Lo = InH;
-    Hi = DAG.getNode(ISD::SRA, NVT, InH,
-                     DAG.getConstant(NVTBits-1, ShTy));
-  } else {
-    Lo = DAG.getNode(ISD::OR, NVT,
-                     DAG.getNode(ISD::SRL, NVT, InL,
-                                 DAG.getConstant(Amt, ShTy)),
-                     DAG.getNode(ISD::SHL, NVT, InH,
-                                 DAG.getConstant(NVTBits-Amt, ShTy)));
-    Hi = DAG.getNode(ISD::SRA, NVT, InH, DAG.getConstant(Amt, ShTy));
-  }
-}
-
-/// ExpandShiftWithKnownAmountBit - Try to determine whether we can simplify
-/// this shift based on knowledge of the high bit of the shift amount.  If we
-/// can tell this, we know that it is >= 32 or < 32, without knowing the actual
-/// shift amount.
-bool DAGTypeLegalizer::
-ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
-  SDOperand Amt = N->getOperand(1);
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  MVT::ValueType ShTy = Amt.getValueType();
-  MVT::ValueType ShBits = MVT::getSizeInBits(ShTy);
-  unsigned NVTBits = MVT::getSizeInBits(NVT);
-  assert(isPowerOf2_32(NVTBits) &&
-         "Expanded integer type size not a power of two!");
-
-  APInt HighBitMask = APInt::getHighBitsSet(ShBits, ShBits - Log2_32(NVTBits));
-  APInt KnownZero, KnownOne;
-  DAG.ComputeMaskedBits(N->getOperand(1), HighBitMask, KnownZero, KnownOne);
-  
-  // If we don't know anything about the high bits, exit.
-  if (((KnownZero|KnownOne) & HighBitMask) == 0)
-    return false;
-
-  // Get the incoming operand to be shifted.
-  SDOperand InL, InH;
-  GetExpandedOp(N->getOperand(0), InL, InH);
-
-  // If we know that any of the high bits of the shift amount are one, then we
-  // can do this as a couple of simple shifts.
-  if (KnownOne.intersects(HighBitMask)) {
-    // Mask out the high bit, which we know is set.
-    Amt = DAG.getNode(ISD::AND, ShTy, Amt,
-                      DAG.getConstant(~HighBitMask, ShTy));
-    
-    switch (N->getOpcode()) {
-    default: assert(0 && "Unknown shift");
-    case ISD::SHL:
-      Lo = DAG.getConstant(0, NVT);              // Low part is zero.
-      Hi = DAG.getNode(ISD::SHL, NVT, InL, Amt); // High part from Lo part.
-      return true;
-    case ISD::SRL:
-      Hi = DAG.getConstant(0, NVT);              // Hi part is zero.
-      Lo = DAG.getNode(ISD::SRL, NVT, InH, Amt); // Lo part from Hi part.
-      return true;
-    case ISD::SRA:
-      Hi = DAG.getNode(ISD::SRA, NVT, InH,       // Sign extend high part.
-                       DAG.getConstant(NVTBits-1, ShTy));
-      Lo = DAG.getNode(ISD::SRA, NVT, InH, Amt); // Lo part from Hi part.
-      return true;
-    }
-  }
-  
-  // If we know that all of the high bits of the shift amount are zero, then we
-  // can do this as a couple of simple shifts.
-  if ((KnownZero & HighBitMask) == HighBitMask) {
-    // Compute 32-amt.
-    SDOperand Amt2 = DAG.getNode(ISD::SUB, ShTy,
-                                 DAG.getConstant(NVTBits, ShTy),
-                                 Amt);
-    unsigned Op1, Op2;
-    switch (N->getOpcode()) {
-    default: assert(0 && "Unknown shift");
-    case ISD::SHL:  Op1 = ISD::SHL; Op2 = ISD::SRL; break;
-    case ISD::SRL:
-    case ISD::SRA:  Op1 = ISD::SRL; Op2 = ISD::SHL; break;
-    }
-    
-    Lo = DAG.getNode(N->getOpcode(), NVT, InL, Amt);
-    Hi = DAG.getNode(ISD::OR, NVT,
-                     DAG.getNode(Op1, NVT, InH, Amt),
-                     DAG.getNode(Op2, NVT, InL, Amt2));
-    return true;
-  }
-
-  return false;
-}
-
-
-//===----------------------------------------------------------------------===//
-//  Operand Expansion
-//===----------------------------------------------------------------------===//
-
-/// ExpandOperand - This method is called when the specified operand of the
-/// specified node is found to need expansion.  At this point, all of the result
-/// types of the node are known to be legal, but other operands of the node may
-/// need promotion or expansion as well as the specified one.
-bool DAGTypeLegalizer::ExpandOperand(SDNode *N, unsigned OpNo) {
-  DEBUG(cerr << "Expand node operand: "; N->dump(&DAG); cerr << "\n");
-  SDOperand Res(0, 0);
-  
-  if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType())
-      == TargetLowering::Custom)
-    Res = TLI.LowerOperation(SDOperand(N, 0), DAG);
-  
-  if (Res.Val == 0) {
-    switch (N->getOpcode()) {
-    default:
-  #ifndef NDEBUG
-      cerr << "ExpandOperand Op #" << OpNo << ": ";
-      N->dump(&DAG); cerr << "\n";
-  #endif
-      assert(0 && "Do not know how to expand this operator's operand!");
-      abort();
-      
-    case ISD::TRUNCATE:        Res = ExpandOperand_TRUNCATE(N); break;
-    case ISD::BIT_CONVERT:     Res = ExpandOperand_BIT_CONVERT(N); break;
-
-    case ISD::SINT_TO_FP:
-      Res = ExpandOperand_SINT_TO_FP(N->getOperand(0), N->getValueType(0));
-      break;
-    case ISD::UINT_TO_FP:
-      Res = ExpandOperand_UINT_TO_FP(N->getOperand(0), N->getValueType(0)); 
-      break;
-    case ISD::EXTRACT_ELEMENT: Res = ExpandOperand_EXTRACT_ELEMENT(N); break;
-
-    case ISD::BR_CC:           Res = ExpandOperand_BR_CC(N); break;
-    case ISD::SETCC:           Res = ExpandOperand_SETCC(N); break;
-
-    case ISD::STORE:
-      Res = ExpandOperand_STORE(cast<StoreSDNode>(N), OpNo);
-      break;
-
-    case ISD::BUILD_VECTOR: Res = ExpandOperand_BUILD_VECTOR(N); break;
-    }
-  }
-  
-  // If the result is null, the sub-method took care of registering results etc.
-  if (!Res.Val) return false;
-  // If the result is N, the sub-method updated N in place.  Check to see if any
-  // operands are new, and if so, mark them.
-  if (Res.Val == N) {
-    // Mark N as new and remark N and its operands.  This allows us to correctly
-    // revisit N if it needs another step of promotion and allows us to visit
-    // any new operands to N.
-    ReanalyzeNode(N);
-    return true;
-  }
-
-  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
-         "Invalid operand expansion");
-  
-  ReplaceValueWith(SDOperand(N, 0), Res);
-  return false;
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_TRUNCATE(SDNode *N) {
-  SDOperand InL, InH;
-  GetExpandedOp(N->getOperand(0), InL, InH);
-  // Just truncate the low part of the source.
-  return DAG.getNode(ISD::TRUNCATE, N->getValueType(0), InL);
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_BIT_CONVERT(SDNode *N) {
-  if (MVT::isVector(N->getValueType(0))) {
-    // An illegal integer type is being converted to a legal vector type.
-    // Make a two element vector out of the expanded parts and convert that
-    // instead, but only if the new vector type is legal (otherwise there
-    // is no point, and it might create expansion loops).  For example, on
-    // x86 this turns v1i64 = BIT_CONVERT i64 into v1i64 = BIT_CONVERT v2i32.
-    MVT::ValueType OVT = N->getOperand(0).getValueType();
-    MVT::ValueType NVT = MVT::getVectorType(TLI.getTypeToTransformTo(OVT), 2);
-
-    if (isTypeLegal(NVT)) {
-      SDOperand Parts[2];
-      GetExpandedOp(N->getOperand(0), Parts[0], Parts[1]);
-
-      if (TLI.isBigEndian())
-        std::swap(Parts[0], Parts[1]);
-
-      SDOperand Vec = DAG.getNode(ISD::BUILD_VECTOR, NVT, Parts, 2);
-      return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Vec);
-    }
-  }
-
-  // Otherwise, store to a temporary and load out again as the new type.
-  return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0));
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_SINT_TO_FP(SDOperand Source, 
-                                                     MVT::ValueType DestTy) {
-  // We know the destination is legal, but that the input needs to be expanded.
-  MVT::ValueType SourceVT = Source.getValueType();
-  
-  // Check to see if the target has a custom way to lower this.  If so, use it.
-  switch (TLI.getOperationAction(ISD::SINT_TO_FP, SourceVT)) {
-  default: assert(0 && "This action not implemented for this operation!");
-  case TargetLowering::Legal:
-  case TargetLowering::Expand:
-    break;   // This case is handled below.
-  case TargetLowering::Custom:
-    SDOperand NV = TLI.LowerOperation(DAG.getNode(ISD::SINT_TO_FP, DestTy,
-                                                  Source), DAG);
-    if (NV.Val) return NV;
-    break;   // The target lowered this.
-  }
-  
-  RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
-  if (SourceVT == MVT::i64) {
-    if (DestTy == MVT::f32)
-      LC = RTLIB::SINTTOFP_I64_F32;
-    else {
-      assert(DestTy == MVT::f64 && "Unknown fp value type!");
-      LC = RTLIB::SINTTOFP_I64_F64;
-    }
-  } else if (SourceVT == MVT::i128) {
-    if (DestTy == MVT::f32)
-      LC = RTLIB::SINTTOFP_I128_F32;
-    else if (DestTy == MVT::f64)
-      LC = RTLIB::SINTTOFP_I128_F64;
-    else if (DestTy == MVT::f80)
-      LC = RTLIB::SINTTOFP_I128_F80;
-    else {
-      assert(DestTy == MVT::ppcf128 && "Unknown fp value type!");
-      LC = RTLIB::SINTTOFP_I128_PPCF128;
-    }
-  } else {
-    assert(0 && "Unknown int value type!");
-  }
-
-  assert(LC != RTLIB::UNKNOWN_LIBCALL &&
-         "Don't know how to expand this SINT_TO_FP!");
-  return MakeLibCall(LC, DestTy, &Source, 1, true);
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_UINT_TO_FP(SDOperand Source, 
-                                                     MVT::ValueType DestTy) {
-  // We know the destination is legal, but that the input needs to be expanded.
-  assert(getTypeAction(Source.getValueType()) == Expand &&
-         "This is not an expansion!");
-  
-  // If this is unsigned, and not supported, first perform the conversion to
-  // signed, then adjust the result if the sign bit is set.
-  SDOperand SignedConv = ExpandOperand_SINT_TO_FP(Source, DestTy);
-
-  // The 64-bit value loaded will be incorrectly if the 'sign bit' of the
-  // incoming integer is set.  To handle this, we dynamically test to see if
-  // it is set, and, if so, add a fudge factor.
-  SDOperand Lo, Hi;
-  GetExpandedOp(Source, Lo, Hi);
-  
-  SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi,
-                                   DAG.getConstant(0, Hi.getValueType()),
-                                   ISD::SETLT);
-  SDOperand Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
-  SDOperand CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(),
-                                    SignSet, Four, Zero);
-  uint64_t FF = 0x5f800000ULL;
-  if (TLI.isLittleEndian()) FF <<= 32;
-  Constant *FudgeFactor = ConstantInt::get((Type*)Type::Int64Ty, FF);
-  
-  SDOperand CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy());
-  CPIdx = DAG.getNode(ISD::ADD, TLI.getPointerTy(), CPIdx, CstOffset);
-  SDOperand FudgeInReg;
-  if (DestTy == MVT::f32)
-    FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx, NULL, 0);
-  else if (MVT::getSizeInBits(DestTy) > MVT::getSizeInBits(MVT::f32))
-    // FIXME: Avoid the extend by construction the right constantpool?
-    FudgeInReg = DAG.getExtLoad(ISD::EXTLOAD, DestTy, DAG.getEntryNode(),
-                                CPIdx, NULL, 0, MVT::f32);
-  else 
-    assert(0 && "Unexpected conversion");
-  
-  return DAG.getNode(ISD::FADD, DestTy, SignedConv, FudgeInReg);
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_EXTRACT_ELEMENT(SDNode *N) {
-  SDOperand Lo, Hi;
-  GetExpandedOp(N->getOperand(0), Lo, Hi);
-  return cast<ConstantSDNode>(N->getOperand(1))->getValue() ? Hi : Lo;
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_BR_CC(SDNode *N) {
-  SDOperand NewLHS = N->getOperand(2), NewRHS = N->getOperand(3);
-  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get();
-  ExpandSetCCOperands(NewLHS, NewRHS, CCCode);
-
-  // If ExpandSetCCOperands returned a scalar, we need to compare the result
-  // against zero to select between true and false values.
-  if (NewRHS.Val == 0) {
-    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
-    CCCode = ISD::SETNE;
-  }
-
-  // Update N to have the operands specified.
-  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
-                                DAG.getCondCode(CCCode), NewLHS, NewRHS,
-                                N->getOperand(4));
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_SETCC(SDNode *N) {
-  SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
-  ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get();
-  ExpandSetCCOperands(NewLHS, NewRHS, CCCode);
-  
-  // If ExpandSetCCOperands returned a scalar, use it.
-  if (NewRHS.Val == 0) return NewLHS;
-
-  // Otherwise, update N to have the operands specified.
-  return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS,
-                                DAG.getCondCode(CCCode));
-}
-
-/// ExpandSetCCOperands - Expand the operands of a comparison.  This code is
-/// shared among BR_CC, SELECT_CC, and SETCC handlers.
-void DAGTypeLegalizer::ExpandSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS,
-                                           ISD::CondCode &CCCode) {
-  SDOperand LHSLo, LHSHi, RHSLo, RHSHi;
-  GetExpandedOp(NewLHS, LHSLo, LHSHi);
-  GetExpandedOp(NewRHS, RHSLo, RHSHi);
-
-  MVT::ValueType VT = NewLHS.getValueType();
-  if (VT == MVT::ppcf128) {
-    // FIXME:  This generated code sucks.  We want to generate
-    //         FCMP crN, hi1, hi2
-    //         BNE crN, L:
-    //         FCMP crN, lo1, lo2
-    // The following can be improved, but not that much.
-    SDOperand Tmp1, Tmp2, Tmp3;
-    Tmp1 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, ISD::SETEQ);
-    Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(LHSLo), LHSLo, RHSLo, CCCode);
-    Tmp3 = DAG.getNode(ISD::AND, Tmp1.getValueType(), Tmp1, Tmp2);
-    Tmp1 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, ISD::SETNE);
-    Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, CCCode);
-    Tmp1 = DAG.getNode(ISD::AND, Tmp1.getValueType(), Tmp1, Tmp2);
-    NewLHS = DAG.getNode(ISD::OR, Tmp1.getValueType(), Tmp1, Tmp3);
-    NewRHS = SDOperand();   // LHS is the result, not a compare.
-    return;
-  }
-
-  if (CCCode == ISD::SETEQ || CCCode == ISD::SETNE) {
-    if (RHSLo == RHSHi)
-      if (ConstantSDNode *RHSCST = dyn_cast<ConstantSDNode>(RHSLo))
-        if (RHSCST->isAllOnesValue()) {
-          // Equality comparison to -1.
-          NewLHS = DAG.getNode(ISD::AND, LHSLo.getValueType(), LHSLo, LHSHi);
-          NewRHS = RHSLo;
-          return;
-        }
-          
-    NewLHS = DAG.getNode(ISD::XOR, LHSLo.getValueType(), LHSLo, RHSLo);
-    NewRHS = DAG.getNode(ISD::XOR, LHSLo.getValueType(), LHSHi, RHSHi);
-    NewLHS = DAG.getNode(ISD::OR, NewLHS.getValueType(), NewLHS, NewRHS);
-    NewRHS = DAG.getConstant(0, NewLHS.getValueType());
-    return;
-  }
-  
-  // If this is a comparison of the sign bit, just look at the top part.
-  // X > -1,  x < 0
-  if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(NewRHS))
-    if ((CCCode == ISD::SETLT && CST->isNullValue()) ||     // X < 0
-        (CCCode == ISD::SETGT && CST->isAllOnesValue())) {  // X > -1
-      NewLHS = LHSHi;
-      NewRHS = RHSHi;
-      return;
-    }
-      
-  // FIXME: This generated code sucks.
-  ISD::CondCode LowCC;
-  switch (CCCode) {
-  default: assert(0 && "Unknown integer setcc!");
-  case ISD::SETLT:
-  case ISD::SETULT: LowCC = ISD::SETULT; break;
-  case ISD::SETGT:
-  case ISD::SETUGT: LowCC = ISD::SETUGT; break;
-  case ISD::SETLE:
-  case ISD::SETULE: LowCC = ISD::SETULE; break;
-  case ISD::SETGE:
-  case ISD::SETUGE: LowCC = ISD::SETUGE; break;
-  }
-  
-  // Tmp1 = lo(op1) < lo(op2)   // Always unsigned comparison
-  // Tmp2 = hi(op1) < hi(op2)   // Signedness depends on operands
-  // dest = hi(op1) == hi(op2) ? Tmp1 : Tmp2;
-  
-  // NOTE: on targets without efficient SELECT of bools, we can always use
-  // this identity: (B1 ? B2 : B3) --> (B1 & B2)|(!B1&B3)
-  TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, false, true, NULL);
-  SDOperand Tmp1, Tmp2;
-  Tmp1 = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSLo), LHSLo, RHSLo, LowCC,
-                           false, DagCombineInfo);
-  if (!Tmp1.Val)
-    Tmp1 = DAG.getSetCC(TLI.getSetCCResultType(LHSLo), LHSLo, RHSLo, LowCC);
-  Tmp2 = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi,
-                           CCCode, false, DagCombineInfo);
-  if (!Tmp2.Val)
-    Tmp2 = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi,
-                       DAG.getCondCode(CCCode));
-  
-  ConstantSDNode *Tmp1C = dyn_cast<ConstantSDNode>(Tmp1.Val);
-  ConstantSDNode *Tmp2C = dyn_cast<ConstantSDNode>(Tmp2.Val);
-  if ((Tmp1C && Tmp1C->isNullValue()) ||
-      (Tmp2C && Tmp2C->isNullValue() &&
-       (CCCode == ISD::SETLE || CCCode == ISD::SETGE ||
-        CCCode == ISD::SETUGE || CCCode == ISD::SETULE)) ||
-      (Tmp2C && Tmp2C->getAPIntValue() == 1 &&
-       (CCCode == ISD::SETLT || CCCode == ISD::SETGT ||
-        CCCode == ISD::SETUGT || CCCode == ISD::SETULT))) {
-    // low part is known false, returns high part.
-    // For LE / GE, if high part is known false, ignore the low part.
-    // For LT / GT, if high part is known true, ignore the low part.
-    NewLHS = Tmp2;
-    NewRHS = SDOperand();
-    return;
-  }
-  
-  NewLHS = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi,
-                             ISD::SETEQ, false, DagCombineInfo);
-  if (!NewLHS.Val)
-    NewLHS = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi,
-                          ISD::SETEQ);
-  NewLHS = DAG.getNode(ISD::SELECT, Tmp1.getValueType(),
-                       NewLHS, Tmp1, Tmp2);
-  NewRHS = SDOperand();
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_STORE(StoreSDNode *N, unsigned OpNo) {
-  // FIXME: Add support for indexed stores.
-  assert(OpNo == 1 && "Can only expand the stored value so far");
-
-  MVT::ValueType VT = N->getOperand(1).getValueType();
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
-  SDOperand Ch  = N->getChain();
-  SDOperand Ptr = N->getBasePtr();
-  int SVOffset = N->getSrcValueOffset();
-  unsigned Alignment = N->getAlignment();
-  bool isVolatile = N->isVolatile();
-  SDOperand Lo, Hi;
-
-  assert(!(MVT::getSizeInBits(NVT) & 7) && "Expanded type not byte sized!");
-
-  if (!N->isTruncatingStore()) {
-    unsigned IncrementSize = 0;
-    GetExpandedOp(N->getValue(), Lo, Hi);
-    IncrementSize = MVT::getSizeInBits(Hi.getValueType())/8;
-
-    if (TLI.isBigEndian())
-      std::swap(Lo, Hi);
-
-    Lo = DAG.getStore(Ch, Lo, Ptr, N->getSrcValue(),
-                      SVOffset, isVolatile, Alignment);
-
-    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
-                      DAG.getIntPtrConstant(IncrementSize));
-    assert(isTypeLegal(Ptr.getValueType()) && "Pointers must be legal!");
-    Hi = DAG.getStore(Ch, Hi, Ptr, N->getSrcValue(), SVOffset+IncrementSize,
-                      isVolatile, MinAlign(Alignment, IncrementSize));
-    return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
-  } else if (MVT::getSizeInBits(N->getMemoryVT()) <= MVT::getSizeInBits(NVT)) {
-    GetExpandedOp(N->getValue(), Lo, Hi);
-    return DAG.getTruncStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset,
-                             N->getMemoryVT(), isVolatile, Alignment);
-  } else if (TLI.isLittleEndian()) {
-    // Little-endian - low bits are at low addresses.
-    GetExpandedOp(N->getValue(), Lo, Hi);
-
-    Lo = DAG.getStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset,
-                      isVolatile, Alignment);
-
-    unsigned ExcessBits =
-      MVT::getSizeInBits(N->getMemoryVT()) - MVT::getSizeInBits(NVT);
-    MVT::ValueType NEVT = MVT::getIntegerType(ExcessBits);
-
-    // Increment the pointer to the other half.
-    unsigned IncrementSize = MVT::getSizeInBits(NVT)/8;
-    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
-                      DAG.getIntPtrConstant(IncrementSize));
-    Hi = DAG.getTruncStore(Ch, Hi, Ptr, N->getSrcValue(),
-                           SVOffset+IncrementSize, NEVT,
-                           isVolatile, MinAlign(Alignment, IncrementSize));
-    return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
-  } else {
-    // Big-endian - high bits are at low addresses.  Favor aligned stores at
-    // the cost of some bit-fiddling.
-    GetExpandedOp(N->getValue(), Lo, Hi);
-
-    MVT::ValueType EVT = N->getMemoryVT();
-    unsigned EBytes = MVT::getStoreSizeInBits(EVT)/8;
-    unsigned IncrementSize = MVT::getSizeInBits(NVT)/8;
-    unsigned ExcessBits = (EBytes - IncrementSize)*8;
-    MVT::ValueType HiVT =
-      MVT::getIntegerType(MVT::getSizeInBits(EVT)-ExcessBits);
-
-    if (ExcessBits < MVT::getSizeInBits(NVT)) {
-      // Transfer high bits from the top of Lo to the bottom of Hi.
-      Hi = DAG.getNode(ISD::SHL, NVT, Hi,
-                       DAG.getConstant(MVT::getSizeInBits(NVT) - ExcessBits,
-                                       TLI.getShiftAmountTy()));
-      Hi = DAG.getNode(ISD::OR, NVT, Hi,
-                       DAG.getNode(ISD::SRL, NVT, Lo,
-                                   DAG.getConstant(ExcessBits,
-                                                   TLI.getShiftAmountTy())));
-    }
-
-    // Store both the high bits and maybe some of the low bits.
-    Hi = DAG.getTruncStore(Ch, Hi, Ptr, N->getSrcValue(),
-                           SVOffset, HiVT, isVolatile, Alignment);
-
-    // Increment the pointer to the other half.
-    Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
-                      DAG.getIntPtrConstant(IncrementSize));
-    // Store the lowest ExcessBits bits in the second half.
-    Lo = DAG.getTruncStore(Ch, Lo, Ptr, N->getSrcValue(),
-                           SVOffset+IncrementSize,
-                           MVT::getIntegerType(ExcessBits),
-                           isVolatile, MinAlign(Alignment, IncrementSize));
-    return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
-  }
-}
-
-SDOperand DAGTypeLegalizer::ExpandOperand_BUILD_VECTOR(SDNode *N) {
-  // The vector type is legal but the element type needs expansion.
-  MVT::ValueType VecVT = N->getValueType(0);
-  unsigned NumElts = MVT::getVectorNumElements(VecVT);
-  MVT::ValueType OldVT = N->getOperand(0).getValueType();
-  MVT::ValueType NewVT = TLI.getTypeToTransformTo(OldVT);
-
-  assert(MVT::getSizeInBits(OldVT) == 2 * MVT::getSizeInBits(NewVT) &&
-         "Do not know how to expand this operand!");
-
-  // Build a vector of twice the length out of the expanded elements.
-  // For example <2 x i64> -> <4 x i32>.
-  std::vector<SDOperand> NewElts;
-  NewElts.reserve(NumElts*2);
-
-  for (unsigned i = 0; i < NumElts; ++i) {
-    SDOperand Lo, Hi;
-    GetExpandedOp(N->getOperand(i), Lo, Hi);
-    if (TLI.isBigEndian())
-      std::swap(Lo, Hi);
-    NewElts.push_back(Lo);
-    NewElts.push_back(Hi);
-  }
-
-  SDOperand NewVec = DAG.getNode(ISD::BUILD_VECTOR,
-                                 MVT::getVectorType(NewVT, NewElts.size()),
-                                 &NewElts[0], NewElts.size());
-
-  // Convert the new vector to the old vector type.
-  return DAG.getNode(ISD::BIT_CONVERT, VecVT, NewVec);
-}

Removed: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesFloatToInt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesFloatToInt.cpp?rev=53162&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesFloatToInt.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesFloatToInt.cpp (removed)
@@ -1,356 +0,0 @@
-//===-- LegalizeTypesFloatToInt.cpp - LegalizeTypes float to int support --===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements float to integer conversion for LegalizeTypes.  This
-// is the act of turning a computation in an invalid floating point type into
-// a computation in an integer type of the same size.  For example, turning
-// f32 arithmetic into operations using i32.  Also known as "soft float".
-// The result is equivalent to bitcasting the float value to the integer type.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/DerivedTypes.h"
-#include "LegalizeTypes.h"
-using namespace llvm;
-
-/// GetFPLibCall - Return the right libcall for the given floating point type.
-static RTLIB::Libcall GetFPLibCall(MVT::ValueType VT,
-                                   RTLIB::Libcall Call_F32,
-                                   RTLIB::Libcall Call_F64,
-                                   RTLIB::Libcall Call_F80,
-                                   RTLIB::Libcall Call_PPCF128) {
-  return
-    VT == MVT::f32 ? Call_F32 :
-    VT == MVT::f64 ? Call_F64 :
-    VT == MVT::f80 ? Call_F80 :
-    VT == MVT::ppcf128 ? Call_PPCF128 :
-    RTLIB::UNKNOWN_LIBCALL;
-}
-
-//===----------------------------------------------------------------------===//
-//  Result Float to Integer Conversion.
-//===----------------------------------------------------------------------===//
-
-void DAGTypeLegalizer::FloatToIntResult(SDNode *N, unsigned ResNo) {
-  DEBUG(cerr << "FloatToInt node result " << ResNo << ": "; N->dump(&DAG);
-        cerr << "\n");
-  SDOperand R = SDOperand();
-
-  // FIXME: Custom lowering for float-to-int?
-#if 0
-  // See if the target wants to custom convert this node to an integer.
-  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(0)) ==
-      TargetLowering::Custom) {
-    // If the target wants to, allow it to lower this itself.
-    if (SDNode *P = TLI.FloatToIntOperationResult(N, DAG)) {
-      // Everything that once used N now uses P.  We are guaranteed that the
-      // result value types of N and the result value types of P match.
-      ReplaceNodeWith(N, P);
-      return;
-    }
-  }
-#endif
-
-  switch (N->getOpcode()) {
-  default:
-#ifndef NDEBUG
-    cerr << "FloatToIntResult #" << ResNo << ": ";
-    N->dump(&DAG); cerr << "\n";
-#endif
-    assert(0 && "Do not know how to convert the result of this operator!");
-    abort();
-
-    case ISD::BIT_CONVERT: R = FloatToIntRes_BIT_CONVERT(N); break;
-    case ISD::BUILD_PAIR:  R = FloatToIntRes_BUILD_PAIR(N); break;
-    case ISD::ConstantFP:
-      R = FloatToIntRes_ConstantFP(cast<ConstantFPSDNode>(N));
-      break;
-    case ISD::FCOPYSIGN:   R = FloatToIntRes_FCOPYSIGN(N); break;
-    case ISD::LOAD:        R = FloatToIntRes_LOAD(N); break;
-    case ISD::SINT_TO_FP:
-    case ISD::UINT_TO_FP:  R = FloatToIntRes_XINT_TO_FP(N); break;
-
-    case ISD::FADD: R = FloatToIntRes_FADD(N); break;
-    case ISD::FMUL: R = FloatToIntRes_FMUL(N); break;
-    case ISD::FSUB: R = FloatToIntRes_FSUB(N); break;
-  }
-
-  // If R is null, the sub-method took care of registering the result.
-  if (R.Val)
-    SetIntegerOp(SDOperand(N, ResNo), R);
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_BIT_CONVERT(SDNode *N) {
-  return BitConvertToInteger(N->getOperand(0));
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_BUILD_PAIR(SDNode *N) {
-  // Convert the inputs to integers, and build a new pair out of them.
-  return DAG.getNode(ISD::BUILD_PAIR,
-                     TLI.getTypeToTransformTo(N->getValueType(0)),
-                     BitConvertToInteger(N->getOperand(0)),
-                     BitConvertToInteger(N->getOperand(1)));
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_ConstantFP(ConstantFPSDNode *N) {
-  return DAG.getConstant(N->getValueAPF().convertToAPInt(),
-                         TLI.getTypeToTransformTo(N->getValueType(0)));
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_FADD(SDNode *N) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  SDOperand Ops[2] = { GetIntegerOp(N->getOperand(0)),
-                       GetIntegerOp(N->getOperand(1)) };
-  return MakeLibCall(GetFPLibCall(N->getValueType(0),
-                                  RTLIB::ADD_F32,
-                                  RTLIB::ADD_F64,
-                                  RTLIB::ADD_F80,
-                                  RTLIB::ADD_PPCF128),
-                     NVT, Ops, 2, false/*sign irrelevant*/);
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_FCOPYSIGN(SDNode *N) {
-  SDOperand LHS = GetIntegerOp(N->getOperand(0));
-  SDOperand RHS = BitConvertToInteger(N->getOperand(1));
-
-  MVT::ValueType LVT = LHS.getValueType();
-  MVT::ValueType RVT = RHS.getValueType();
-
-  unsigned LSize = MVT::getSizeInBits(LVT);
-  unsigned RSize = MVT::getSizeInBits(RVT);
-
-  // First get the sign bit of second operand.
-  SDOperand SignBit = DAG.getNode(ISD::SHL, RVT, DAG.getConstant(1, RVT),
-                                  DAG.getConstant(RSize - 1,
-                                                  TLI.getShiftAmountTy()));
-  SignBit = DAG.getNode(ISD::AND, RVT, RHS, SignBit);
-
-  // Shift right or sign-extend it if the two operands have different types.
-  int SizeDiff = MVT::getSizeInBits(RVT) - MVT::getSizeInBits(LVT);
-  if (SizeDiff > 0) {
-    SignBit = DAG.getNode(ISD::SRL, RVT, SignBit,
-                          DAG.getConstant(SizeDiff, TLI.getShiftAmountTy()));
-    SignBit = DAG.getNode(ISD::TRUNCATE, LVT, SignBit);
-  } else if (SizeDiff < 0) {
-    SignBit = DAG.getNode(ISD::ANY_EXTEND, LVT, SignBit);
-    SignBit = DAG.getNode(ISD::SHL, LVT, SignBit,
-                          DAG.getConstant(-SizeDiff, TLI.getShiftAmountTy()));
-  }
-
-  // Clear the sign bit of the first operand.
-  SDOperand Mask = DAG.getNode(ISD::SHL, LVT, DAG.getConstant(1, LVT),
-                               DAG.getConstant(LSize - 1,
-                                               TLI.getShiftAmountTy()));
-  Mask = DAG.getNode(ISD::SUB, LVT, Mask, DAG.getConstant(1, LVT));
-  LHS = DAG.getNode(ISD::AND, LVT, LHS, Mask);
-
-  // Or the value with the sign bit.
-  return DAG.getNode(ISD::OR, LVT, LHS, SignBit);
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_FMUL(SDNode *N) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  SDOperand Ops[2] = { GetIntegerOp(N->getOperand(0)),
-                       GetIntegerOp(N->getOperand(1)) };
-  return MakeLibCall(GetFPLibCall(N->getValueType(0),
-                                  RTLIB::MUL_F32,
-                                  RTLIB::MUL_F64,
-                                  RTLIB::MUL_F80,
-                                  RTLIB::MUL_PPCF128),
-                     NVT, Ops, 2, false/*sign irrelevant*/);
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_FSUB(SDNode *N) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  SDOperand Ops[2] = { GetIntegerOp(N->getOperand(0)),
-                       GetIntegerOp(N->getOperand(1)) };
-  return MakeLibCall(GetFPLibCall(N->getValueType(0),
-                                  RTLIB::SUB_F32,
-                                  RTLIB::SUB_F64,
-                                  RTLIB::SUB_F80,
-                                  RTLIB::SUB_PPCF128),
-                     NVT, Ops, 2, false/*sign irrelevant*/);
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_LOAD(SDNode *N) {
-  LoadSDNode *L = cast<LoadSDNode>(N);
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
-
-  if (L->getExtensionType() == ISD::NON_EXTLOAD)
-     return DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
-                        NVT, L->getChain(), L->getBasePtr(), L->getOffset(),
-                        L->getSrcValue(), L->getSrcValueOffset(), NVT,
-                        L->isVolatile(), L->getAlignment());
-
-  // Do a non-extending load followed by FP_EXTEND.
-  SDOperand NL = DAG.getLoad(L->getAddressingMode(), ISD::NON_EXTLOAD,
-                             L->getMemoryVT(), L->getChain(),
-                             L->getBasePtr(), L->getOffset(),
-                             L->getSrcValue(), L->getSrcValueOffset(),
-                             L->getMemoryVT(),
-                             L->isVolatile(), L->getAlignment());
-  return BitConvertToInteger(DAG.getNode(ISD::FP_EXTEND, VT, NL));
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntRes_XINT_TO_FP(SDNode *N) {
-  bool isSigned = N->getOpcode() == ISD::SINT_TO_FP;
-  MVT::ValueType DestVT = N->getValueType(0);
-  SDOperand Op = N->getOperand(0);
-
-  if (Op.getValueType() == MVT::i32) {
-    // simple 32-bit [signed|unsigned] integer to float/double expansion
-
-    // Get the stack frame index of a 8 byte buffer.
-    SDOperand StackSlot = DAG.CreateStackTemporary(MVT::f64);
-
-    // word offset constant for Hi/Lo address computation
-    SDOperand Offset = DAG.getConstant(MVT::getSizeInBits(MVT::i32) / 8,
-                                       TLI.getPointerTy());
-    // set up Hi and Lo (into buffer) address based on endian
-    SDOperand Hi = StackSlot;
-    SDOperand Lo = DAG.getNode(ISD::ADD, TLI.getPointerTy(), StackSlot, Offset);
-    if (TLI.isLittleEndian())
-      std::swap(Hi, Lo);
-
-    // if signed map to unsigned space
-    SDOperand OpMapped;
-    if (isSigned) {
-      // constant used to invert sign bit (signed to unsigned mapping)
-      SDOperand SignBit = DAG.getConstant(0x80000000u, MVT::i32);
-      OpMapped = DAG.getNode(ISD::XOR, MVT::i32, Op, SignBit);
-    } else {
-      OpMapped = Op;
-    }
-    // store the lo of the constructed double - based on integer input
-    SDOperand Store1 = DAG.getStore(DAG.getEntryNode(),
-                                    OpMapped, Lo, NULL, 0);
-    // initial hi portion of constructed double
-    SDOperand InitialHi = DAG.getConstant(0x43300000u, MVT::i32);
-    // store the hi of the constructed double - biased exponent
-    SDOperand Store2=DAG.getStore(Store1, InitialHi, Hi, NULL, 0);
-    // load the constructed double
-    SDOperand Load = DAG.getLoad(MVT::f64, Store2, StackSlot, NULL, 0);
-    // FP constant to bias correct the final result
-    SDOperand Bias = DAG.getConstantFP(isSigned ?
-                                            BitsToDouble(0x4330000080000000ULL)
-                                          : BitsToDouble(0x4330000000000000ULL),
-                                     MVT::f64);
-    // subtract the bias
-    SDOperand Sub = DAG.getNode(ISD::FSUB, MVT::f64, Load, Bias);
-    // final result
-    SDOperand Result;
-    // handle final rounding
-    if (DestVT == MVT::f64) {
-      // do nothing
-      Result = Sub;
-    } else if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(MVT::f64)) {
-      Result = DAG.getNode(ISD::FP_ROUND, DestVT, Sub,
-                           DAG.getIntPtrConstant(0));
-    } else if (MVT::getSizeInBits(DestVT) > MVT::getSizeInBits(MVT::f64)) {
-      Result = DAG.getNode(ISD::FP_EXTEND, DestVT, Sub);
-    }
-    return BitConvertToInteger(Result);
-  }
-  assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet");
-  SDOperand Tmp1 = DAG.getNode(ISD::SINT_TO_FP, DestVT, Op);
-
-  SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultType(Op), Op,
-                                   DAG.getConstant(0, Op.getValueType()),
-                                   ISD::SETLT);
-  SDOperand Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4);
-  SDOperand CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(),
-                                    SignSet, Four, Zero);
-
-  // If the sign bit of the integer is set, the large number will be treated
-  // as a negative number.  To counteract this, the dynamic code adds an
-  // offset depending on the data type.
-  uint64_t FF;
-  switch (Op.getValueType()) {
-  default: assert(0 && "Unsupported integer type!");
-  case MVT::i8 : FF = 0x43800000ULL; break;  // 2^8  (as a float)
-  case MVT::i16: FF = 0x47800000ULL; break;  // 2^16 (as a float)
-  case MVT::i32: FF = 0x4F800000ULL; break;  // 2^32 (as a float)
-  case MVT::i64: FF = 0x5F800000ULL; break;  // 2^64 (as a float)
-  }
-  if (TLI.isLittleEndian()) FF <<= 32;
-  static Constant *FudgeFactor = ConstantInt::get(Type::Int64Ty, FF);
-
-  SDOperand CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy());
-  CPIdx = DAG.getNode(ISD::ADD, TLI.getPointerTy(), CPIdx, CstOffset);
-  SDOperand FudgeInReg;
-  if (DestVT == MVT::f32)
-    FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx,
-                             PseudoSourceValue::getConstantPool(), 0);
-  else {
-    FudgeInReg = DAG.getExtLoad(ISD::EXTLOAD, DestVT,
-                                DAG.getEntryNode(), CPIdx,
-                                PseudoSourceValue::getConstantPool(), 0,
-                                MVT::f32);
-  }
-
-  return BitConvertToInteger(DAG.getNode(ISD::FADD, DestVT, Tmp1, FudgeInReg));
-}
-
-
-//===----------------------------------------------------------------------===//
-//  Operand Float to Integer Conversion..
-//===----------------------------------------------------------------------===//
-
-bool DAGTypeLegalizer::FloatToIntOperand(SDNode *N, unsigned OpNo) {
-  DEBUG(cerr << "FloatToInt node operand " << OpNo << ": "; N->dump(&DAG);
-        cerr << "\n");
-  SDOperand Res(0, 0);
-
-  // FIXME: Custom lowering for float-to-int?
-#if 0
-  if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType())
-      == TargetLowering::Custom)
-    Res = TLI.LowerOperation(SDOperand(N, 0), DAG);
-#endif
-
-  if (Res.Val == 0) {
-    switch (N->getOpcode()) {
-    default:
-#ifndef NDEBUG
-      cerr << "FloatToIntOperand Op #" << OpNo << ": ";
-      N->dump(&DAG); cerr << "\n";
-#endif
-      assert(0 && "Do not know how to convert this operator's operand!");
-      abort();
-
-      case ISD::BIT_CONVERT: Res = FloatToIntOp_BIT_CONVERT(N); break;
-    }
-  }
-
-  // If the result is null, the sub-method took care of registering results etc.
-  if (!Res.Val) return false;
-
-  // If the result is N, the sub-method updated N in place.  Check to see if any
-  // operands are new, and if so, mark them.
-  if (Res.Val == N) {
-    // Mark N as new and remark N and its operands.  This allows us to correctly
-    // revisit N if it needs another step of promotion and allows us to visit
-    // any new operands to N.
-    ReanalyzeNode(N);
-    return true;
-  }
-
-  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
-         "Invalid operand expansion");
-
-  ReplaceValueWith(SDOperand(N, 0), Res);
-  return false;
-}
-
-SDOperand DAGTypeLegalizer::FloatToIntOp_BIT_CONVERT(SDNode *N) {
-  return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0),
-                     GetIntegerOp(N->getOperand(0)));
-}

Added: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp?rev=53163&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp (added)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp Sun Jul  6 15:45:41 2008
@@ -0,0 +1,330 @@
+//===-------- LegalizeTypesGeneric.cpp - Generic type legalization --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements generic type expansion and splitting for LegalizeTypes.
+// The routines here perform legalization when the details of the type (such as
+// whether it is an integer or a float) do not matter.
+// Expansion is the act of changing a computation in an illegal type to be a
+// computation in two identical registers of a smaller type.
+// Splitting is the act of changing a computation in an illegal type to be a
+// computation in two not necessarily identical registers of a smaller type.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LegalizeTypes.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Generic Result Expansion.
+//===----------------------------------------------------------------------===//
+
+// These routines assume that the Lo/Hi part is stored first in memory on
+// little/big-endian machines, followed by the Hi/Lo part.  This means that
+// they cannot be used as is on vectors, for which Lo is always stored first.
+
+void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDOperand &Lo,
+                                             SDOperand &Hi) {
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+  SDOperand InOp = N->getOperand(0);
+  MVT InVT = InOp.getValueType();
+
+  // Handle some special cases efficiently.
+  switch (getTypeAction(InVT)) {
+    default:
+      assert(false && "Unknown type action!");
+    case Legal:
+    case PromoteInteger:
+      break;
+    case SoftenFloat:
+      // Convert the integer operand instead.
+      SplitInteger(GetSoftenedFloat(InOp), Lo, Hi);
+      Lo = DAG.getNode(ISD::BIT_CONVERT, NVT, Lo);
+      Hi = DAG.getNode(ISD::BIT_CONVERT, NVT, Hi);
+      return;
+    case ExpandInteger:
+    case ExpandFloat:
+      // Convert the expanded pieces of the input.
+      GetExpandedOp(InOp, Lo, Hi);
+      Lo = DAG.getNode(ISD::BIT_CONVERT, NVT, Lo);
+      Hi = DAG.getNode(ISD::BIT_CONVERT, NVT, Hi);
+      return;
+    case Split:
+      // Convert the split parts of the input if it was split in two.
+      GetSplitVector(InOp, Lo, Hi);
+      if (Lo.getValueType() == Hi.getValueType()) {
+        if (TLI.isBigEndian())
+          std::swap(Lo, Hi);
+        Lo = DAG.getNode(ISD::BIT_CONVERT, NVT, Lo);
+        Hi = DAG.getNode(ISD::BIT_CONVERT, NVT, Hi);
+        return;
+      }
+      break;
+    case Scalarize:
+      // Convert the element instead.
+      SplitInteger(BitConvertToInteger(GetScalarizedVector(InOp)), Lo, Hi);
+      Lo = DAG.getNode(ISD::BIT_CONVERT, NVT, Lo);
+      Hi = DAG.getNode(ISD::BIT_CONVERT, NVT, Hi);
+      return;
+  }
+
+  // Lower the bit-convert to a store/load from the stack, then expand the load.
+  SDOperand Op = CreateStackStoreLoad(InOp, N->getValueType(0));
+  ExpandRes_NormalLoad(Op.Val, Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandRes_BUILD_PAIR(SDNode *N, SDOperand &Lo,
+                                            SDOperand &Hi) {
+  // Return the operands.
+  Lo = N->getOperand(0);
+  Hi = N->getOperand(1);
+}
+
+void DAGTypeLegalizer::ExpandRes_EXTRACT_ELEMENT(SDNode *N, SDOperand &Lo,
+                                                 SDOperand &Hi) {
+  GetExpandedOp(N->getOperand(0), Lo, Hi);
+  SDOperand Part = cast<ConstantSDNode>(N->getOperand(1))->getValue() ? Hi : Lo;
+
+  assert(Part.getValueType() == N->getValueType(0) &&
+         "Type twice as big as expanded type not itself expanded!");
+  MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
+
+  Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Part,
+                   DAG.getConstant(0, TLI.getPointerTy()));
+  Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Part,
+                   DAG.getConstant(1, TLI.getPointerTy()));
+}
+
+void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDOperand &Lo,
+                                                    SDOperand &Hi) {
+  SDOperand OldVec = N->getOperand(0);
+  unsigned OldElts = OldVec.getValueType().getVectorNumElements();
+
+  // Convert to a vector of the expanded element type, for example
+  // <3 x i64> -> <6 x i32>.
+  MVT OldVT = N->getValueType(0);
+  MVT NewVT = TLI.getTypeToTransformTo(OldVT);
+
+  SDOperand NewVec = DAG.getNode(ISD::BIT_CONVERT,
+                                 MVT::getVectorVT(NewVT, 2*OldElts),
+                                 OldVec);
+
+  // Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector.
+  SDOperand Idx = N->getOperand(1);
+
+  // Make sure the type of Idx is big enough to hold the new values.
+  if (Idx.getValueType().bitsLT(TLI.getPointerTy()))
+    Idx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), Idx);
+
+  Idx = DAG.getNode(ISD::ADD, Idx.getValueType(), Idx, Idx);
+  Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, NewVT, NewVec, Idx);
+
+  Idx = DAG.getNode(ISD::ADD, Idx.getValueType(), Idx,
+                    DAG.getConstant(1, Idx.getValueType()));
+  Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, NewVT, NewVec, Idx);
+
+  if (TLI.isBigEndian())
+    std::swap(Lo, Hi);
+}
+
+void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDOperand &Lo,
+                                            SDOperand &Hi) {
+  assert(ISD::isNormalLoad(N) && "This routine only for normal loads!");
+
+  LoadSDNode *LD = cast<LoadSDNode>(N);
+  MVT NVT = TLI.getTypeToTransformTo(LD->getValueType(0));
+  SDOperand Chain = LD->getChain();
+  SDOperand Ptr = LD->getBasePtr();
+  int SVOffset = LD->getSrcValueOffset();
+  unsigned Alignment = LD->getAlignment();
+  bool isVolatile = LD->isVolatile();
+
+  assert(NVT.isByteSized() && "Expanded type not byte sized!");
+
+  Lo = DAG.getLoad(NVT, Chain, Ptr, LD->getSrcValue(), SVOffset,
+                   isVolatile, Alignment);
+
+  // Increment the pointer to the other half.
+  unsigned IncrementSize = NVT.getSizeInBits() / 8;
+  Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
+                    DAG.getIntPtrConstant(IncrementSize));
+  Hi = DAG.getLoad(NVT, Chain, Ptr, LD->getSrcValue(), SVOffset+IncrementSize,
+                   isVolatile, MinAlign(Alignment, IncrementSize));
+
+  // Build a factor node to remember that this load is independent of the
+  // other one.
+  Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
+                      Hi.getValue(1));
+
+  // Handle endianness of the load.
+  if (TLI.isBigEndian())
+    std::swap(Lo, Hi);
+
+  // Modified the chain - switch anything that used the old chain to use
+  // the new one.
+  ReplaceValueWith(SDOperand(N, 1), Chain);
+}
+
+//===--------------------------------------------------------------------===//
+// Generic Operand Expansion.
+//===--------------------------------------------------------------------===//
+
+SDOperand DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
+  if (N->getValueType(0).isVector()) {
+    // An illegal expanding type is being converted to a legal vector type.
+    // Make a two element vector out of the expanded parts and convert that
+    // instead, but only if the new vector type is legal (otherwise there
+    // is no point, and it might create expansion loops).  For example, on
+    // x86 this turns v1i64 = BIT_CONVERT i64 into v1i64 = BIT_CONVERT v2i32.
+    MVT OVT = N->getOperand(0).getValueType();
+    MVT NVT = MVT::getVectorVT(TLI.getTypeToTransformTo(OVT), 2);
+
+    if (isTypeLegal(NVT)) {
+      SDOperand Parts[2];
+      GetExpandedOp(N->getOperand(0), Parts[0], Parts[1]);
+
+      if (TLI.isBigEndian())
+        std::swap(Parts[0], Parts[1]);
+
+      SDOperand Vec = DAG.getNode(ISD::BUILD_VECTOR, NVT, Parts, 2);
+      return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Vec);
+    }
+  }
+
+  // Otherwise, store to a temporary and load out again as the new type.
+  return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0));
+}
+
+SDOperand DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
+  // The vector type is legal but the element type needs expansion.
+  MVT VecVT = N->getValueType(0);
+  unsigned NumElts = VecVT.getVectorNumElements();
+  MVT OldVT = N->getOperand(0).getValueType();
+  MVT NewVT = TLI.getTypeToTransformTo(OldVT);
+
+  // Build a vector of twice the length out of the expanded elements.
+  // For example <3 x i64> -> <6 x i32>.
+  std::vector<SDOperand> NewElts;
+  NewElts.reserve(NumElts*2);
+
+  for (unsigned i = 0; i < NumElts; ++i) {
+    SDOperand Lo, Hi;
+    GetExpandedOp(N->getOperand(i), Lo, Hi);
+    if (TLI.isBigEndian())
+      std::swap(Lo, Hi);
+    NewElts.push_back(Lo);
+    NewElts.push_back(Hi);
+  }
+
+  SDOperand NewVec = DAG.getNode(ISD::BUILD_VECTOR,
+                                 MVT::getVectorVT(NewVT, NewElts.size()),
+                                 &NewElts[0], NewElts.size());
+
+  // Convert the new vector to the old vector type.
+  return DAG.getNode(ISD::BIT_CONVERT, VecVT, NewVec);
+}
+
+SDOperand DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) {
+  SDOperand Lo, Hi;
+  GetExpandedOp(N->getOperand(0), Lo, Hi);
+  return cast<ConstantSDNode>(N->getOperand(1))->getValue() ? Hi : Lo;
+}
+
+SDOperand DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
+  assert(ISD::isNormalStore(N) && "This routine only for normal stores!");
+  assert(OpNo == 1 && "Can only expand the stored value so far");
+
+  StoreSDNode *St = cast<StoreSDNode>(N);
+  MVT NVT = TLI.getTypeToTransformTo(St->getValue().getValueType());
+  SDOperand Chain = St->getChain();
+  SDOperand Ptr = St->getBasePtr();
+  int SVOffset = St->getSrcValueOffset();
+  unsigned Alignment = St->getAlignment();
+  bool isVolatile = St->isVolatile();
+
+  assert(NVT.isByteSized() && "Expanded type not byte sized!");
+  unsigned IncrementSize = NVT.getSizeInBits() / 8;
+
+  SDOperand Lo, Hi;
+  GetExpandedOp(St->getValue(), Lo, Hi);
+
+  if (TLI.isBigEndian())
+    std::swap(Lo, Hi);
+
+  Lo = DAG.getStore(Chain, Lo, Ptr, St->getSrcValue(), SVOffset,
+                    isVolatile, Alignment);
+
+  Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
+                    DAG.getIntPtrConstant(IncrementSize));
+  assert(isTypeLegal(Ptr.getValueType()) && "Pointers must be legal!");
+  Hi = DAG.getStore(Chain, Hi, Ptr, St->getSrcValue(), SVOffset + IncrementSize,
+                    isVolatile, MinAlign(Alignment, IncrementSize));
+
+  return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
+}
+
+
+//===--------------------------------------------------------------------===//
+// Generic Result Splitting.
+//===--------------------------------------------------------------------===//
+
+// Be careful to make no assumptions about which of Lo/Hi is stored first in
+// memory (for vectors it is always Lo first followed by Hi in the following
+// bytes; for integers and floats it is Lo first if and only if the machine is
+// little-endian).
+
+void DAGTypeLegalizer::SplitRes_MERGE_VALUES(SDNode *N,
+                                             SDOperand &Lo, SDOperand &Hi) {
+  // A MERGE_VALUES node can produce any number of values.  We know that the
+  // first illegal one needs to be expanded into Lo/Hi.
+  unsigned i;
+
+  // The string of legal results gets turns into the input operands, which have
+  // the same type.
+  for (i = 0; isTypeLegal(N->getValueType(i)); ++i)
+    ReplaceValueWith(SDOperand(N, i), SDOperand(N->getOperand(i)));
+
+  // The first illegal result must be the one that needs to be expanded.
+  GetSplitOp(N->getOperand(i), Lo, Hi);
+
+  // Legalize the rest of the results into the input operands whether they are
+  // legal or not.
+  unsigned e = N->getNumValues();
+  for (++i; i != e; ++i)
+    ReplaceValueWith(SDOperand(N, i), SDOperand(N->getOperand(i)));
+}
+
+void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDOperand &Lo,
+                                       SDOperand &Hi) {
+  SDOperand LL, LH, RL, RH;
+  GetSplitOp(N->getOperand(1), LL, LH);
+  GetSplitOp(N->getOperand(2), RL, RH);
+
+  SDOperand Cond = N->getOperand(0);
+  Lo = DAG.getNode(ISD::SELECT, LL.getValueType(), Cond, LL, RL);
+  Hi = DAG.getNode(ISD::SELECT, LH.getValueType(), Cond, LH, RH);
+}
+
+void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDOperand &Lo,
+                                          SDOperand &Hi) {
+  SDOperand LL, LH, RL, RH;
+  GetSplitOp(N->getOperand(2), LL, LH);
+  GetSplitOp(N->getOperand(3), RL, RH);
+
+  Lo = DAG.getNode(ISD::SELECT_CC, LL.getValueType(), N->getOperand(0),
+                   N->getOperand(1), LL, RL, N->getOperand(4));
+  Hi = DAG.getNode(ISD::SELECT_CC, LH.getValueType(), N->getOperand(0),
+                   N->getOperand(1), LH, RH, N->getOperand(4));
+}
+
+void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
+  MVT LoVT, HiVT;
+  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
+  Lo = DAG.getNode(ISD::UNDEF, LoVT);
+  Hi = DAG.getNode(ISD::UNDEF, HiVT);
+}

Removed: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesPromote.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesPromote.cpp?rev=53162&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesPromote.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesPromote.cpp (removed)
@@ -1,748 +0,0 @@
-//===-- LegalizeTypesPromote.cpp - Promotion for LegalizeTypes ------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements promotion support for LegalizeTypes.  Promotion is the
-// act of changing a computation in an invalid type to be a computation in a 
-// larger type.  For example, implementing i8 arithmetic in an i32 register (as
-// is often needed on powerpc for example).
-//
-//===----------------------------------------------------------------------===//
-
-#include "LegalizeTypes.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-//  Result Promotion
-//===----------------------------------------------------------------------===//
-
-/// PromoteResult - This method is called when a result of a node is found to be
-/// in need of promotion to a larger type.  At this point, the node may also
-/// have invalid operands or may have other results that need expansion, we just
-/// know that (at least) one result needs promotion.
-void DAGTypeLegalizer::PromoteResult(SDNode *N, unsigned ResNo) {
-  DEBUG(cerr << "Promote node result: "; N->dump(&DAG); cerr << "\n");
-  SDOperand Result = SDOperand();
-  
-  switch (N->getOpcode()) {
-  default:
-#ifndef NDEBUG
-    cerr << "PromoteResult #" << ResNo << ": ";
-    N->dump(&DAG); cerr << "\n";
-#endif
-    assert(0 && "Do not know how to promote this operator!");
-    abort();
-  case ISD::UNDEF:    Result = PromoteResult_UNDEF(N); break;
-  case ISD::Constant: Result = PromoteResult_Constant(N); break;
-
-  case ISD::TRUNCATE:    Result = PromoteResult_TRUNCATE(N); break;
-  case ISD::SIGN_EXTEND:
-  case ISD::ZERO_EXTEND:
-  case ISD::ANY_EXTEND:  Result = PromoteResult_INT_EXTEND(N); break;
-  case ISD::FP_ROUND:    Result = PromoteResult_FP_ROUND(N); break;
-  case ISD::FP_TO_SINT:
-  case ISD::FP_TO_UINT:  Result = PromoteResult_FP_TO_XINT(N); break;
-  case ISD::SETCC:    Result = PromoteResult_SETCC(N); break;
-  case ISD::LOAD:     Result = PromoteResult_LOAD(cast<LoadSDNode>(N)); break;
-  case ISD::BUILD_PAIR:  Result = PromoteResult_BUILD_PAIR(N); break;
-  case ISD::BIT_CONVERT: Result = PromoteResult_BIT_CONVERT(N); break;
-
-  case ISD::AND:
-  case ISD::OR:
-  case ISD::XOR:
-  case ISD::ADD:
-  case ISD::SUB:
-  case ISD::MUL:      Result = PromoteResult_SimpleIntBinOp(N); break;
-
-  case ISD::SDIV:
-  case ISD::SREM:     Result = PromoteResult_SDIV(N); break;
-
-  case ISD::UDIV:
-  case ISD::UREM:     Result = PromoteResult_UDIV(N); break;
-
-  case ISD::SHL:      Result = PromoteResult_SHL(N); break;
-  case ISD::SRA:      Result = PromoteResult_SRA(N); break;
-  case ISD::SRL:      Result = PromoteResult_SRL(N); break;
-
-  case ISD::SELECT:    Result = PromoteResult_SELECT(N); break;
-  case ISD::SELECT_CC: Result = PromoteResult_SELECT_CC(N); break;
-
-  case ISD::CTLZ:     Result = PromoteResult_CTLZ(N); break;
-  case ISD::CTPOP:    Result = PromoteResult_CTPOP(N); break;
-  case ISD::CTTZ:     Result = PromoteResult_CTTZ(N); break;
-
-  case ISD::EXTRACT_VECTOR_ELT:
-    Result = PromoteResult_EXTRACT_VECTOR_ELT(N);
-    break;
-  }      
-
-  // If Result is null, the sub-method took care of registering the result.
-  if (Result.Val)
-    SetPromotedOp(SDOperand(N, ResNo), Result);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_UNDEF(SDNode *N) {
-  return DAG.getNode(ISD::UNDEF, TLI.getTypeToTransformTo(N->getValueType(0)));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_Constant(SDNode *N) {
-  MVT::ValueType VT = N->getValueType(0);
-  // Zero extend things like i1, sign extend everything else.  It shouldn't
-  // matter in theory which one we pick, but this tends to give better code?
-  unsigned Opc = VT != MVT::i1 ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
-  SDOperand Result = DAG.getNode(Opc, TLI.getTypeToTransformTo(VT),
-                                 SDOperand(N, 0));
-  assert(isa<ConstantSDNode>(Result) && "Didn't constant fold ext?");
-  return Result;
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_TRUNCATE(SDNode *N) {
-  SDOperand Res;
-
-  switch (getTypeAction(N->getOperand(0).getValueType())) {
-  default: assert(0 && "Unknown type action!");
-  case Legal:
-  case Expand:
-    Res = N->getOperand(0);
-    break;
-  case Promote:
-    Res = GetPromotedOp(N->getOperand(0));
-    break;
-  }
-
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  assert(MVT::getSizeInBits(Res.getValueType()) >= MVT::getSizeInBits(NVT) &&
-         "Truncation doesn't make sense!");
-  if (Res.getValueType() == NVT)
-    return Res;
-
-  // Truncate to NVT instead of VT
-  return DAG.getNode(ISD::TRUNCATE, NVT, Res);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_INT_EXTEND(SDNode *N) {
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-
-  if (getTypeAction(N->getOperand(0).getValueType()) == Promote) {
-    SDOperand Res = GetPromotedOp(N->getOperand(0));
-    assert(MVT::getSizeInBits(Res.getValueType()) <= MVT::getSizeInBits(NVT) &&
-           "Extension doesn't make sense!");
-
-    // If the result and operand types are the same after promotion, simplify
-    // to an in-register extension.
-    if (NVT == Res.getValueType()) {
-      // The high bits are not guaranteed to be anything.  Insert an extend.
-      if (N->getOpcode() == ISD::SIGN_EXTEND)
-        return DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Res,
-                           DAG.getValueType(N->getOperand(0).getValueType()));
-      if (N->getOpcode() == ISD::ZERO_EXTEND)
-        return DAG.getZeroExtendInReg(Res, N->getOperand(0).getValueType());
-      assert(N->getOpcode() == ISD::ANY_EXTEND && "Unknown integer extension!");
-      return Res;
-    }
-  }
-
-  // Otherwise, just extend the original operand all the way to the larger type.
-  return DAG.getNode(N->getOpcode(), NVT, N->getOperand(0));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_FP_ROUND(SDNode *N) {
-  // NOTE: Assumes input is legal.
-  if (N->getConstantOperandVal(1) == 0) 
-    return DAG.getNode(ISD::FP_ROUND_INREG, N->getOperand(0).getValueType(),
-                       N->getOperand(0), DAG.getValueType(N->getValueType(0)));
-  // If the precision discard isn't needed, just return the operand unrounded.
-  return N->getOperand(0);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_FP_TO_XINT(SDNode *N) {
-  SDOperand Op = N->getOperand(0);
-  // If the operand needed to be promoted, do so now.
-  if (getTypeAction(Op.getValueType()) == Promote)
-    // The input result is prerounded, so we don't have to do anything special.
-    Op = GetPromotedOp(Op);
-  
-  unsigned NewOpc = N->getOpcode();
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  
-  // If we're promoting a UINT to a larger size, check to see if the new node
-  // will be legal.  If it isn't, check to see if FP_TO_SINT is legal, since
-  // we can use that instead.  This allows us to generate better code for
-  // FP_TO_UINT for small destination sizes on targets where FP_TO_UINT is not
-  // legal, such as PowerPC.
-  if (N->getOpcode() == ISD::FP_TO_UINT) {
-    if (!TLI.isOperationLegal(ISD::FP_TO_UINT, NVT) &&
-        (TLI.isOperationLegal(ISD::FP_TO_SINT, NVT) ||
-         TLI.getOperationAction(ISD::FP_TO_SINT, NVT)==TargetLowering::Custom))
-      NewOpc = ISD::FP_TO_SINT;
-  }
-
-  return DAG.getNode(NewOpc, NVT, Op);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_SETCC(SDNode *N) {
-  assert(isTypeLegal(TLI.getSetCCResultType(N->getOperand(0)))
-         && "SetCC type is not legal??");
-  return DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(N->getOperand(0)),
-                     N->getOperand(0), N->getOperand(1), N->getOperand(2));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_LOAD(LoadSDNode *N) {
-  // FIXME: Add support for indexed loads.
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(N->getValueType(0));
-  ISD::LoadExtType ExtType =
-    ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType();
-  SDOperand Res = DAG.getExtLoad(ExtType, NVT, N->getChain(), N->getBasePtr(),
-                                 N->getSrcValue(), N->getSrcValueOffset(),
-                                 N->getMemoryVT(), N->isVolatile(),
-                                 N->getAlignment());
-
-  // Legalized the chain result - switch anything that used the old chain to
-  // use the new one.
-  ReplaceValueWith(SDOperand(N, 1), Res.getValue(1));
-  return Res;
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_BUILD_PAIR(SDNode *N) {
-  // The pair element type may be legal, or may not promote to the same type as
-  // the result, for example i14 = BUILD_PAIR (i7, i7).  Handle all cases.
-  return DAG.getNode(ISD::ANY_EXTEND,
-                     TLI.getTypeToTransformTo(N->getValueType(0)),
-                     JoinIntegers(N->getOperand(0), N->getOperand(1)));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_BIT_CONVERT(SDNode *N) {
-  SDOperand InOp = N->getOperand(0);
-  MVT::ValueType InVT = InOp.getValueType();
-  MVT::ValueType NInVT = TLI.getTypeToTransformTo(InVT);
-  MVT::ValueType OutVT = TLI.getTypeToTransformTo(N->getValueType(0));
-
-  switch (getTypeAction(InVT)) {
-  default:
-    assert(false && "Unknown type action!");
-    break;
-  case Legal:
-    break;
-  case Promote:
-    if (MVT::getSizeInBits(OutVT) == MVT::getSizeInBits(NInVT))
-      // The input promotes to the same size.  Convert the promoted value.
-      return DAG.getNode(ISD::BIT_CONVERT, OutVT, GetPromotedOp(InOp));
-    break;
-  case Expand:
-    break;
-  case FloatToInt:
-    // Promote the integer operand by hand.
-    return DAG.getNode(ISD::ANY_EXTEND, OutVT, GetIntegerOp(InOp));
-  case Scalarize:
-    // Convert the element to an integer and promote it by hand.
-    return DAG.getNode(ISD::ANY_EXTEND, OutVT,
-                       BitConvertToInteger(GetScalarizedOp(InOp)));
-  case Split:
-    // For example, i32 = BIT_CONVERT v2i16 on alpha.  Convert the split
-    // pieces of the input into integers and reassemble in the final type.
-    SDOperand Lo, Hi;
-    GetSplitOp(N->getOperand(0), Lo, Hi);
-    Lo = BitConvertToInteger(Lo);
-    Hi = BitConvertToInteger(Hi);
-
-    if (TLI.isBigEndian())
-      std::swap(Lo, Hi);
-
-    InOp = DAG.getNode(ISD::ANY_EXTEND,
-                       MVT::getIntegerType(MVT::getSizeInBits(OutVT)),
-                       JoinIntegers(Lo, Hi));
-    return DAG.getNode(ISD::BIT_CONVERT, OutVT, InOp);
-  }
-
-  // Otherwise, lower the bit-convert to a store/load from the stack, then
-  // promote the load.
-  SDOperand Op = CreateStackStoreLoad(InOp, N->getValueType(0));
-  return PromoteResult_LOAD(cast<LoadSDNode>(Op.Val));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_SimpleIntBinOp(SDNode *N) {
-  // The input may have strange things in the top bits of the registers, but
-  // these operations don't care.  They may have weird bits going out, but
-  // that too is okay if they are integer operations.
-  SDOperand LHS = GetPromotedOp(N->getOperand(0));
-  SDOperand RHS = GetPromotedOp(N->getOperand(1));
-  return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_SDIV(SDNode *N) {
-  // Sign extend the input.
-  SDOperand LHS = GetPromotedOp(N->getOperand(0));
-  SDOperand RHS = GetPromotedOp(N->getOperand(1));
-  MVT::ValueType VT = N->getValueType(0);
-  LHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, LHS.getValueType(), LHS,
-                    DAG.getValueType(VT));
-  RHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, RHS.getValueType(), RHS,
-                    DAG.getValueType(VT));
-
-  return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_UDIV(SDNode *N) {
-  // Zero extend the input.
-  SDOperand LHS = GetPromotedOp(N->getOperand(0));
-  SDOperand RHS = GetPromotedOp(N->getOperand(1));
-  MVT::ValueType VT = N->getValueType(0);
-  LHS = DAG.getZeroExtendInReg(LHS, VT);
-  RHS = DAG.getZeroExtendInReg(RHS, VT);
-
-  return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_SHL(SDNode *N) {
-  return DAG.getNode(ISD::SHL, TLI.getTypeToTransformTo(N->getValueType(0)),
-                     GetPromotedOp(N->getOperand(0)), N->getOperand(1));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_SRA(SDNode *N) {
-  // The input value must be properly sign extended.
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
-  SDOperand Res = GetPromotedOp(N->getOperand(0));
-  Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Res, DAG.getValueType(VT));
-  return DAG.getNode(ISD::SRA, NVT, Res, N->getOperand(1));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_SRL(SDNode *N) {
-  // The input value must be properly zero extended.
-  MVT::ValueType VT = N->getValueType(0);
-  MVT::ValueType NVT = TLI.getTypeToTransformTo(VT);
-  SDOperand Res = GetPromotedZExtOp(N->getOperand(0));
-  return DAG.getNode(ISD::SRL, NVT, Res, N->getOperand(1));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_SELECT(SDNode *N) {
-  SDOperand LHS = GetPromotedOp(N->getOperand(1));
-  SDOperand RHS = GetPromotedOp(N->getOperand(2));
-  return DAG.getNode(ISD::SELECT, LHS.getValueType(), N->getOperand(0),LHS,RHS);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_SELECT_CC(SDNode *N) {
-  SDOperand LHS = GetPromotedOp(N->getOperand(2));
-  SDOperand RHS = GetPromotedOp(N->getOperand(3));
-  return DAG.getNode(ISD::SELECT_CC, LHS.getValueType(), N->getOperand(0),
-                     N->getOperand(1), LHS, RHS, N->getOperand(4));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_CTLZ(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  MVT::ValueType OVT = N->getValueType(0);
-  MVT::ValueType NVT = Op.getValueType();
-  // Zero extend to the promoted type and do the count there.
-  Op = DAG.getNode(ISD::CTLZ, NVT, DAG.getZeroExtendInReg(Op, OVT));
-  // Subtract off the extra leading bits in the bigger type.
-  return DAG.getNode(ISD::SUB, NVT, Op,
-                     DAG.getConstant(MVT::getSizeInBits(NVT) -
-                                     MVT::getSizeInBits(OVT), NVT));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_CTPOP(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  MVT::ValueType OVT = N->getValueType(0);
-  MVT::ValueType NVT = Op.getValueType();
-  // Zero extend to the promoted type and do the count there.
-  return DAG.getNode(ISD::CTPOP, NVT, DAG.getZeroExtendInReg(Op, OVT));
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_CTTZ(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  MVT::ValueType OVT = N->getValueType(0);
-  MVT::ValueType NVT = Op.getValueType();
-  // The count is the same in the promoted type except if the original
-  // value was zero.  This can be handled by setting the bit just off
-  // the top of the original type.
-  Op = DAG.getNode(ISD::OR, NVT, Op,
-                   // FIXME: Do this using an APINT constant.
-                   DAG.getConstant(1UL << MVT::getSizeInBits(OVT), NVT));
-  return DAG.getNode(ISD::CTTZ, NVT, Op);
-}
-
-SDOperand DAGTypeLegalizer::PromoteResult_EXTRACT_VECTOR_ELT(SDNode *N) {
-  MVT::ValueType OldVT = N->getValueType(0);
-  SDOperand OldVec = N->getOperand(0);
-  unsigned OldElts = MVT::getVectorNumElements(OldVec.getValueType());
-
-  if (OldElts == 1) {
-    assert(!isTypeLegal(OldVec.getValueType()) &&
-           "Legal one-element vector of a type needing promotion!");
-    // It is tempting to follow GetScalarizedOp by a call to GetPromotedOp,
-    // but this would be wrong because the scalarized value may not yet have
-    // been processed.
-    return DAG.getNode(ISD::ANY_EXTEND, TLI.getTypeToTransformTo(OldVT),
-                       GetScalarizedOp(OldVec));
-  }
-
-  // Convert to a vector half as long with an element type of twice the width,
-  // for example <4 x i16> -> <2 x i32>.
-  assert(!(OldElts & 1) && "Odd length vectors not supported!");
-  MVT::ValueType NewVT = MVT::getIntegerType(2 * MVT::getSizeInBits(OldVT));
-  assert(!MVT::isExtendedVT(OldVT) && !MVT::isExtendedVT(NewVT));
-
-  SDOperand NewVec = DAG.getNode(ISD::BIT_CONVERT,
-                                 MVT::getVectorType(NewVT, OldElts / 2),
-                                 OldVec);
-
-  // Extract the element at OldIdx / 2 from the new vector.
-  SDOperand OldIdx = N->getOperand(1);
-  SDOperand NewIdx = DAG.getNode(ISD::SRL, OldIdx.getValueType(), OldIdx,
-                                 DAG.getConstant(1, TLI.getShiftAmountTy()));
-  SDOperand Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, NewVT, NewVec, NewIdx);
-
-  // Select the appropriate half of the element: Lo if OldIdx was even,
-  // Hi if it was odd.
-  SDOperand Lo = Elt;
-  SDOperand Hi = DAG.getNode(ISD::SRL, NewVT, Elt,
-                             DAG.getConstant(MVT::getSizeInBits(OldVT),
-                                             TLI.getShiftAmountTy()));
-  if (TLI.isBigEndian())
-    std::swap(Lo, Hi);
-
-  SDOperand Odd = DAG.getNode(ISD::AND, OldIdx.getValueType(), OldIdx,
-                              DAG.getConstant(1, TLI.getShiftAmountTy()));
-  return DAG.getNode(ISD::SELECT, NewVT, Odd, Hi, Lo);
-}
-
-//===----------------------------------------------------------------------===//
-//  Operand Promotion
-//===----------------------------------------------------------------------===//
-
-/// PromoteOperand - This method is called when the specified operand of the
-/// specified node is found to need promotion.  At this point, all of the result
-/// types of the node are known to be legal, but other operands of the node may
-/// need promotion or expansion as well as the specified one.
-bool DAGTypeLegalizer::PromoteOperand(SDNode *N, unsigned OpNo) {
-  DEBUG(cerr << "Promote node operand: "; N->dump(&DAG); cerr << "\n");
-  SDOperand Res;
-  switch (N->getOpcode()) {
-    default:
-#ifndef NDEBUG
-    cerr << "PromoteOperand Op #" << OpNo << ": ";
-    N->dump(&DAG); cerr << "\n";
-#endif
-    assert(0 && "Do not know how to promote this operator's operand!");
-    abort();
-    
-  case ISD::ANY_EXTEND:  Res = PromoteOperand_ANY_EXTEND(N); break;
-  case ISD::ZERO_EXTEND: Res = PromoteOperand_ZERO_EXTEND(N); break;
-  case ISD::SIGN_EXTEND: Res = PromoteOperand_SIGN_EXTEND(N); break;
-  case ISD::TRUNCATE:    Res = PromoteOperand_TRUNCATE(N); break;
-  case ISD::FP_EXTEND:   Res = PromoteOperand_FP_EXTEND(N); break;
-  case ISD::FP_ROUND:    Res = PromoteOperand_FP_ROUND(N); break;
-  case ISD::SINT_TO_FP:
-  case ISD::UINT_TO_FP:  Res = PromoteOperand_INT_TO_FP(N); break;
-  case ISD::BUILD_PAIR:  Res = PromoteOperand_BUILD_PAIR(N); break;
-
-  case ISD::SELECT:      Res = PromoteOperand_SELECT(N, OpNo); break;
-  case ISD::BRCOND:      Res = PromoteOperand_BRCOND(N, OpNo); break;
-  case ISD::BR_CC:       Res = PromoteOperand_BR_CC(N, OpNo); break;
-  case ISD::SETCC:       Res = PromoteOperand_SETCC(N, OpNo); break;
-
-  case ISD::STORE:       Res = PromoteOperand_STORE(cast<StoreSDNode>(N),
-                                                    OpNo); break;
-
-  case ISD::BUILD_VECTOR: Res = PromoteOperand_BUILD_VECTOR(N); break;
-  case ISD::INSERT_VECTOR_ELT:
-    Res = PromoteOperand_INSERT_VECTOR_ELT(N, OpNo);
-    break;
-
-  case ISD::RET:         Res = PromoteOperand_RET(N, OpNo); break;
-
-  case ISD::MEMBARRIER:  Res = PromoteOperand_MEMBARRIER(N); break;
-  }
-
-  // If the result is null, the sub-method took care of registering results etc.
-  if (!Res.Val) return false;
-  // If the result is N, the sub-method updated N in place.
-  if (Res.Val == N) {
-    // Mark N as new and remark N and its operands.  This allows us to correctly
-    // revisit N if it needs another step of promotion and allows us to visit
-    // any new operands to N.
-    ReanalyzeNode(N);
-    return true;
-  }
-
-  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
-         "Invalid operand expansion");
-  
-  ReplaceValueWith(SDOperand(N, 0), Res);
-  return false;
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_ANY_EXTEND(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  return DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op);
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_ZERO_EXTEND(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  Op = DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op);
-  return DAG.getZeroExtendInReg(Op, N->getOperand(0).getValueType());
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_SIGN_EXTEND(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  Op = DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op);
-  return DAG.getNode(ISD::SIGN_EXTEND_INREG, Op.getValueType(),
-                     Op, DAG.getValueType(N->getOperand(0).getValueType()));
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_TRUNCATE(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  return DAG.getNode(ISD::TRUNCATE, N->getValueType(0), Op);
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_FP_EXTEND(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  return DAG.getNode(ISD::FP_EXTEND, N->getValueType(0), Op);
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_FP_ROUND(SDNode *N) {
-  SDOperand Op = GetPromotedOp(N->getOperand(0));
-  return DAG.getNode(ISD::FP_ROUND, N->getValueType(0), Op,
-                     DAG.getIntPtrConstant(0));
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_INT_TO_FP(SDNode *N) {
-  SDOperand In = GetPromotedOp(N->getOperand(0));
-  MVT::ValueType OpVT = N->getOperand(0).getValueType();
-  if (N->getOpcode() == ISD::UINT_TO_FP)
-    In = DAG.getZeroExtendInReg(In, OpVT);
-  else
-    In = DAG.getNode(ISD::SIGN_EXTEND_INREG, In.getValueType(),
-                     In, DAG.getValueType(OpVT));
-  
-  return DAG.UpdateNodeOperands(SDOperand(N, 0), In);
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_BUILD_PAIR(SDNode *N) {
-  // Since the result type is legal, the operands must promote to it.
-  MVT::ValueType OVT = N->getOperand(0).getValueType();
-  SDOperand Lo = GetPromotedOp(N->getOperand(0));
-  SDOperand Hi = GetPromotedOp(N->getOperand(1));
-  assert(Lo.getValueType() == N->getValueType(0) && "Operand over promoted?");
-
-  Lo = DAG.getZeroExtendInReg(Lo, OVT);
-  Hi = DAG.getNode(ISD::SHL, N->getValueType(0), Hi,
-                   DAG.getConstant(MVT::getSizeInBits(OVT),
-                                   TLI.getShiftAmountTy()));
-  return DAG.getNode(ISD::OR, N->getValueType(0), Lo, Hi);
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_SELECT(SDNode *N, unsigned OpNo) {
-  assert(OpNo == 0 && "Only know how to promote condition");
-  SDOperand Cond = GetPromotedOp(N->getOperand(0));  // Promote the condition.
-
-  // The top bits of the promoted condition are not necessarily zero, ensure
-  // that the value is properly zero extended.
-  unsigned BitWidth = Cond.getValueSizeInBits();
-  if (!DAG.MaskedValueIsZero(Cond, 
-                             APInt::getHighBitsSet(BitWidth, BitWidth-1)))
-    Cond = DAG.getZeroExtendInReg(Cond, MVT::i1);
-
-  // The chain (Op#0) and basic block destination (Op#2) are always legal types.
-  return DAG.UpdateNodeOperands(SDOperand(N, 0), Cond, N->getOperand(1),
-                                N->getOperand(2));
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_BRCOND(SDNode *N, unsigned OpNo) {
-  assert(OpNo == 1 && "only know how to promote condition");
-  SDOperand Cond = GetPromotedOp(N->getOperand(1));  // Promote the condition.
-  
-  // The top bits of the promoted condition are not necessarily zero, ensure
-  // that the value is properly zero extended.
-  unsigned BitWidth = Cond.getValueSizeInBits();
-  if (!DAG.MaskedValueIsZero(Cond, 
-                             APInt::getHighBitsSet(BitWidth, BitWidth-1)))
-    Cond = DAG.getZeroExtendInReg(Cond, MVT::i1);
-
-  // The chain (Op#0) and basic block destination (Op#2) are always legal types.
-  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), Cond,
-                                N->getOperand(2));
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_BR_CC(SDNode *N, unsigned OpNo) {
-  assert(OpNo == 2 && "Don't know how to promote this operand");
-  
-  SDOperand LHS = N->getOperand(2);
-  SDOperand RHS = N->getOperand(3);
-  PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(1))->get());
-  
-  // The chain (Op#0), CC (#1) and basic block destination (Op#4) are always
-  // legal types.
-  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
-                                N->getOperand(1), LHS, RHS, N->getOperand(4));
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_SETCC(SDNode *N, unsigned OpNo) {
-  assert(OpNo == 0 && "Don't know how to promote this operand");
-
-  SDOperand LHS = N->getOperand(0);
-  SDOperand RHS = N->getOperand(1);
-  PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(2))->get());
-
-  // The CC (#2) is always legal.
-  return DAG.UpdateNodeOperands(SDOperand(N, 0), LHS, RHS, N->getOperand(2));
-}
-
-/// PromoteSetCCOperands - Promote the operands of a comparison.  This code is
-/// shared among BR_CC, SELECT_CC, and SETCC handlers.
-void DAGTypeLegalizer::PromoteSetCCOperands(SDOperand &NewLHS,SDOperand &NewRHS,
-                                            ISD::CondCode CCCode) {
-  MVT::ValueType VT = NewLHS.getValueType();
-  
-  // Get the promoted values.
-  NewLHS = GetPromotedOp(NewLHS);
-  NewRHS = GetPromotedOp(NewRHS);
-  
-  // If this is an FP compare, the operands have already been extended.
-  if (!MVT::isInteger(NewLHS.getValueType()))
-    return;
-  
-  // Otherwise, we have to insert explicit sign or zero extends.  Note
-  // that we could insert sign extends for ALL conditions, but zero extend
-  // is cheaper on many machines (an AND instead of two shifts), so prefer
-  // it.
-  switch (CCCode) {
-  default: assert(0 && "Unknown integer comparison!");
-  case ISD::SETEQ:
-  case ISD::SETNE:
-  case ISD::SETUGE:
-  case ISD::SETUGT:
-  case ISD::SETULE:
-  case ISD::SETULT:
-    // ALL of these operations will work if we either sign or zero extend
-    // the operands (including the unsigned comparisons!).  Zero extend is
-    // usually a simpler/cheaper operation, so prefer it.
-    NewLHS = DAG.getZeroExtendInReg(NewLHS, VT);
-    NewRHS = DAG.getZeroExtendInReg(NewRHS, VT);
-    return;
-  case ISD::SETGE:
-  case ISD::SETGT:
-  case ISD::SETLT:
-  case ISD::SETLE:
-    NewLHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, NewLHS.getValueType(), NewLHS,
-                         DAG.getValueType(VT));
-    NewRHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, NewRHS.getValueType(), NewRHS,
-                         DAG.getValueType(VT));
-    return;
-  }
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_STORE(StoreSDNode *N, unsigned OpNo){
-  // FIXME: Add support for indexed stores.
-  SDOperand Ch = N->getChain(), Ptr = N->getBasePtr();
-  int SVOffset = N->getSrcValueOffset();
-  unsigned Alignment = N->getAlignment();
-  bool isVolatile = N->isVolatile();
-  
-  SDOperand Val = GetPromotedOp(N->getValue());  // Get promoted value.
-
-  assert(!N->isTruncatingStore() && "Cannot promote this store operand!");
-  
-  // Truncate the value and store the result.
-  return DAG.getTruncStore(Ch, Val, Ptr, N->getSrcValue(),
-                           SVOffset, N->getMemoryVT(),
-                           isVolatile, Alignment);
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_BUILD_VECTOR(SDNode *N) {
-  // The vector type is legal but the element type is not.  This implies
-  // that the vector is a power-of-two in length and that the element
-  // type does not have a strange size (eg: it is not i1).
-  MVT::ValueType VecVT = N->getValueType(0);
-  unsigned NumElts = MVT::getVectorNumElements(VecVT);
-  assert(!(NumElts & 1) && "Legal vector of one illegal element?");
-
-  // Build a vector of half the length out of elements of twice the bitwidth.
-  // For example <4 x i16> -> <2 x i32>.
-  MVT::ValueType OldVT = N->getOperand(0).getValueType();
-  MVT::ValueType NewVT = MVT::getIntegerType(2 * MVT::getSizeInBits(OldVT));
-  assert(!MVT::isExtendedVT(OldVT) && !MVT::isExtendedVT(NewVT));
-
-  std::vector<SDOperand> NewElts;
-  NewElts.reserve(NumElts/2);
-
-  for (unsigned i = 0; i < NumElts; i += 2) {
-    // Combine two successive elements into one promoted element.
-    SDOperand Lo = N->getOperand(i);
-    SDOperand Hi = N->getOperand(i+1);
-    if (TLI.isBigEndian())
-      std::swap(Lo, Hi);
-    NewElts.push_back(JoinIntegers(Lo, Hi));
-  }
-
-  SDOperand NewVec = DAG.getNode(ISD::BUILD_VECTOR,
-                                 MVT::getVectorType(NewVT, NewElts.size()),
-                                 &NewElts[0], NewElts.size());
-
-  // Convert the new vector to the old vector type.
-  return DAG.getNode(ISD::BIT_CONVERT, VecVT, NewVec);
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_INSERT_VECTOR_ELT(SDNode *N,
-                                                             unsigned OpNo) {
-  if (OpNo == 1) {
-    // Promote the inserted value.  This is valid because the type does not
-    // have to match the vector element type.
-
-    // Check that any extra bits introduced will be truncated away.
-    assert(MVT::getSizeInBits(N->getOperand(1).getValueType()) >=
-           MVT::getSizeInBits(MVT::getVectorElementType(N->getValueType(0))) &&
-           "Type of inserted value narrower than vector element type!");
-    return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
-                                  GetPromotedOp(N->getOperand(1)),
-                                  N->getOperand(2));
-  }
-
-  assert(OpNo == 2 && "Different operand and result vector types?");
-
-  // Promote the index.
-  SDOperand Idx = N->getOperand(2);
-  Idx = DAG.getZeroExtendInReg(GetPromotedOp(Idx), Idx.getValueType());
-  return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0),
-                                N->getOperand(1), Idx);
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_RET(SDNode *N, unsigned OpNo) {
-  assert(!(OpNo & 1) && "Return values should be legally typed!");
-  assert((N->getNumOperands() & 1) && "Wrong number of operands!");
-
-  // It's a flag.  Promote all the flags in one hit, as an optimization.
-  SmallVector<SDOperand, 8> NewValues(N->getNumOperands());
-  NewValues[0] = N->getOperand(0); // The chain
-  for (unsigned i = 1, e = N->getNumOperands(); i < e; i += 2) {
-    // The return value.
-    NewValues[i] = N->getOperand(i);
-
-    // The flag.
-    SDOperand Flag = N->getOperand(i + 1);
-    if (getTypeAction(Flag.getValueType()) == Promote)
-      // The promoted value may have rubbish in the new bits, but that
-      // doesn't matter because those bits aren't queried anyway.
-      Flag = GetPromotedOp(Flag);
-    NewValues[i + 1] = Flag;
-  }
-
-  return DAG.UpdateNodeOperands(SDOperand (N, 0),
-                                &NewValues[0], NewValues.size());
-}
-
-SDOperand DAGTypeLegalizer::PromoteOperand_MEMBARRIER(SDNode *N) {
-  SDOperand NewOps[6];
-  NewOps[0] = N->getOperand(0);
-  for (unsigned i = 1; i < array_lengthof(NewOps); ++i) {
-    SDOperand Flag = GetPromotedOp(N->getOperand(i));
-    NewOps[i] = DAG.getZeroExtendInReg(Flag, MVT::i1);
-  }
-  return DAG.UpdateNodeOperands(SDOperand (N, 0), NewOps,
-                                array_lengthof(NewOps));
-}

Removed: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesScalarize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesScalarize.cpp?rev=53162&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesScalarize.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesScalarize.cpp (removed)
@@ -1,233 +0,0 @@
-//===-- LegalizeTypesScalarize.cpp - Scalarization for LegalizeTypes ------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements scalarization support for LegalizeTypes.  Scalarization
-// is the act of changing a computation in an invalid single-element vector type
-// to be a computation in its scalar element type.  For example, implementing
-// <1 x f32> arithmetic in a scalar f32 register.  This is needed as a base case
-// when scalarizing vector arithmetic like <4 x f32>, which eventually
-// decomposes to scalars if the target doesn't support v4f32 or v2f32 types.
-//
-//===----------------------------------------------------------------------===//
-
-#include "LegalizeTypes.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-//  Result Vector Scalarization: <1 x ty> -> ty.
-//===----------------------------------------------------------------------===//
-
-void DAGTypeLegalizer::ScalarizeResult(SDNode *N, unsigned ResNo) {
-  DEBUG(cerr << "Scalarize node result " << ResNo << ": "; N->dump(&DAG); 
-        cerr << "\n");
-  SDOperand R = SDOperand();
-  
-  // FIXME: Custom lowering for scalarization?
-#if 0
-  // See if the target wants to custom expand this node.
-  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(0)) == 
-      TargetLowering::Custom) {
-    // If the target wants to, allow it to lower this itself.
-    if (SDNode *P = TLI.ExpandOperationResult(N, DAG)) {
-      // Everything that once used N now uses P.  We are guaranteed that the
-      // result value types of N and the result value types of P match.
-      ReplaceNodeWith(N, P);
-      return;
-    }
-  }
-#endif
-  
-  switch (N->getOpcode()) {
-  default:
-#ifndef NDEBUG
-    cerr << "ScalarizeResult #" << ResNo << ": ";
-    N->dump(&DAG); cerr << "\n";
-#endif
-    assert(0 && "Do not know how to scalarize the result of this operator!");
-    abort();
-    
-  case ISD::UNDEF:       R = ScalarizeRes_UNDEF(N); break;
-  case ISD::LOAD:        R = ScalarizeRes_LOAD(cast<LoadSDNode>(N)); break;
-  case ISD::ADD:
-  case ISD::FADD:
-  case ISD::SUB:
-  case ISD::FSUB:
-  case ISD::MUL:
-  case ISD::FMUL:
-  case ISD::SDIV:
-  case ISD::UDIV:
-  case ISD::FDIV:
-  case ISD::SREM:
-  case ISD::UREM:
-  case ISD::FREM:
-  case ISD::FPOW:
-  case ISD::AND:
-  case ISD::OR:
-  case ISD::XOR:         R = ScalarizeRes_BinOp(N); break;
-  case ISD::FNEG:
-  case ISD::FABS:
-  case ISD::FSQRT:
-  case ISD::FSIN:
-  case ISD::FCOS:              R = ScalarizeRes_UnaryOp(N); break;
-  case ISD::FPOWI:             R = ScalarizeRes_FPOWI(N); break;
-  case ISD::BUILD_VECTOR:      R = N->getOperand(0); break;
-  case ISD::INSERT_VECTOR_ELT: R = ScalarizeRes_INSERT_VECTOR_ELT(N); break;
-  case ISD::VECTOR_SHUFFLE:    R = ScalarizeRes_VECTOR_SHUFFLE(N); break;
-  case ISD::BIT_CONVERT:       R = ScalarizeRes_BIT_CONVERT(N); break;
-  case ISD::SELECT:            R = ScalarizeRes_SELECT(N); break;
-  }
-  
-  // If R is null, the sub-method took care of registering the result.
-  if (R.Val)
-    SetScalarizedOp(SDOperand(N, ResNo), R);
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_UNDEF(SDNode *N) {
-  return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(N->getValueType(0)));
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_LOAD(LoadSDNode *N) {
-  // FIXME: Add support for indexed loads.
-  SDOperand Result = DAG.getLoad(MVT::getVectorElementType(N->getValueType(0)),
-                                 N->getChain(), N->getBasePtr(), 
-                                 N->getSrcValue(), N->getSrcValueOffset(),
-                                 N->isVolatile(), N->getAlignment());
-  
-  // Legalized the chain result - switch anything that used the old chain to
-  // use the new one.
-  ReplaceValueWith(SDOperand(N, 1), Result.getValue(1));
-  return Result;
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_BinOp(SDNode *N) {
-  SDOperand LHS = GetScalarizedOp(N->getOperand(0));
-  SDOperand RHS = GetScalarizedOp(N->getOperand(1));
-  return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS);
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_UnaryOp(SDNode *N) {
-  SDOperand Op = GetScalarizedOp(N->getOperand(0));
-  return DAG.getNode(N->getOpcode(), Op.getValueType(), Op);
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_FPOWI(SDNode *N) {
-  SDOperand Op = GetScalarizedOp(N->getOperand(0));
-  return DAG.getNode(ISD::FPOWI, Op.getValueType(), Op, N->getOperand(1));
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_INSERT_VECTOR_ELT(SDNode *N) {
-  // The value to insert may have a wider type than the vector element type,
-  // so be sure to truncate it to the element type if necessary.
-  SDOperand Op = N->getOperand(1);
-  MVT::ValueType EltVT = MVT::getVectorElementType(N->getValueType(0));
-  if (MVT::getSizeInBits(Op.getValueType()) > MVT::getSizeInBits(EltVT))
-    Op = DAG.getNode(ISD::TRUNCATE, EltVT, Op);
-  assert(Op.getValueType() == EltVT && "Invalid type for inserted value!");
-  return Op;
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_VECTOR_SHUFFLE(SDNode *N) {
-  // Figure out if the scalar is the LHS or RHS and return it.
-  SDOperand EltNum = N->getOperand(2).getOperand(0);
-  unsigned Op = cast<ConstantSDNode>(EltNum)->getValue() != 0;
-  return GetScalarizedOp(N->getOperand(Op));
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_BIT_CONVERT(SDNode *N) {
-  MVT::ValueType NewVT = MVT::getVectorElementType(N->getValueType(0));
-  return DAG.getNode(ISD::BIT_CONVERT, NewVT, N->getOperand(0));
-}
-
-SDOperand DAGTypeLegalizer::ScalarizeRes_SELECT(SDNode *N) {
-  SDOperand LHS = GetScalarizedOp(N->getOperand(1));
-  return DAG.getNode(ISD::SELECT, LHS.getValueType(), N->getOperand(0), LHS,
-                     GetScalarizedOp(N->getOperand(2)));
-}
-
-
-//===----------------------------------------------------------------------===//
-//  Operand Vector Scalarization <1 x ty> -> ty.
-//===----------------------------------------------------------------------===//
-
-bool DAGTypeLegalizer::ScalarizeOperand(SDNode *N, unsigned OpNo) {
-  DEBUG(cerr << "Scalarize node operand " << OpNo << ": "; N->dump(&DAG); 
-        cerr << "\n");
-  SDOperand Res(0, 0);
-  
-  // FIXME: Should we support custom lowering for scalarization?
-#if 0
-  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(0)) == 
-      TargetLowering::Custom)
-    Res = TLI.LowerOperation(SDOperand(N, 0), DAG);
-#endif
-  
-  if (Res.Val == 0) {
-    switch (N->getOpcode()) {
-    default:
-#ifndef NDEBUG
-      cerr << "ScalarizeOperand Op #" << OpNo << ": ";
-      N->dump(&DAG); cerr << "\n";
-#endif
-      assert(0 && "Do not know how to scalarize this operator's operand!");
-      abort();
-      
-    case ISD::BIT_CONVERT:
-      Res = ScalarizeOp_BIT_CONVERT(N); break;
-
-    case ISD::EXTRACT_VECTOR_ELT:
-      Res = ScalarizeOp_EXTRACT_VECTOR_ELT(N); break;
-
-    case ISD::STORE:
-      Res = ScalarizeOp_STORE(cast<StoreSDNode>(N), OpNo); break;
-    }
-  }
-  
-  // If the result is null, the sub-method took care of registering results etc.
-  if (!Res.Val) return false;
-  
-  // If the result is N, the sub-method updated N in place.  Check to see if any
-  // operands are new, and if so, mark them.
-  if (Res.Val == N) {
-    // Mark N as new and remark N and its operands.  This allows us to correctly
-    // revisit N if it needs another step of promotion and allows us to visit
-    // any new operands to N.
-    ReanalyzeNode(N);
-    return true;
-  }
-
-  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
-         "Invalid operand expansion");
-  
-  ReplaceValueWith(SDOperand(N, 0), Res);
-  return false;
-}
-
-/// ScalarizeOp_BIT_CONVERT - If the value to convert is a vector that needs
-/// to be scalarized, it must be <1 x ty>.  Convert the element instead.
-SDOperand DAGTypeLegalizer::ScalarizeOp_BIT_CONVERT(SDNode *N) {
-  SDOperand Elt = GetScalarizedOp(N->getOperand(0));
-  return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Elt);
-}
-
-/// ScalarizeOp_EXTRACT_VECTOR_ELT - If the input is a vector that needs to be
-/// scalarized, it must be <1 x ty>, so just return the element, ignoring the
-/// index.
-SDOperand DAGTypeLegalizer::ScalarizeOp_EXTRACT_VECTOR_ELT(SDNode *N) {
-  return GetScalarizedOp(N->getOperand(0));
-}
-
-/// ScalarizeOp_STORE - If the value to store is a vector that needs to be
-/// scalarized, it must be <1 x ty>.  Just store the element.
-SDOperand DAGTypeLegalizer::ScalarizeOp_STORE(StoreSDNode *N, unsigned OpNo) {
-  // FIXME: Add support for indexed stores.
-  assert(OpNo == 1 && "Do not know how to scalarize this operand!");
-  return DAG.getStore(N->getChain(), GetScalarizedOp(N->getOperand(1)),
-                      N->getBasePtr(), N->getSrcValue(), N->getSrcValueOffset(),
-                      N->isVolatile(), N->getAlignment());
-}

Removed: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesSplit.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesSplit.cpp?rev=53162&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesSplit.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeTypesSplit.cpp (removed)
@@ -1,573 +0,0 @@
-//===-- LegalizeTypesSplit.cpp - Vector Splitting for LegalizeTypes -------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements vector splitting support for LegalizeTypes.  Vector
-// splitting is the act of changing a computation in an invalid vector type to
-// be a computation in multiple vectors of a smaller type.  For example,
-// implementing <128 x f32> operations in terms of two <64 x f32> operations.
-//
-//===----------------------------------------------------------------------===//
-
-#include "LegalizeTypes.h"
-using namespace llvm;
-
-/// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a vector
-/// type that needs to be split.  This handles non-power of two vectors.
-static void GetSplitDestVTs(MVT::ValueType InVT,
-                            MVT::ValueType &Lo, MVT::ValueType &Hi) {
-  MVT::ValueType NewEltVT = MVT::getVectorElementType(InVT);
-  unsigned NumElements = MVT::getVectorNumElements(InVT);
-  if ((NumElements & (NumElements-1)) == 0) {  // Simple power of two vector.
-    NumElements >>= 1;
-    Lo = Hi =  MVT::getVectorType(NewEltVT, NumElements);
-  } else {                                     // Non-power-of-two vectors.
-    unsigned NewNumElts_Lo = 1 << Log2_32(NumElements);
-    unsigned NewNumElts_Hi = NumElements - NewNumElts_Lo;
-    Lo = MVT::getVectorType(NewEltVT, NewNumElts_Lo);
-    Hi = MVT::getVectorType(NewEltVT, NewNumElts_Hi);
-  }
-}
-
-
-//===----------------------------------------------------------------------===//
-//  Result Vector Splitting
-//===----------------------------------------------------------------------===//
-
-/// SplitResult - This method is called when the specified result of the
-/// specified node is found to need vector splitting.  At this point, the node
-/// may also have invalid operands or may have other results that need
-/// legalization, we just know that (at least) one result needs vector
-/// splitting.
-void DAGTypeLegalizer::SplitResult(SDNode *N, unsigned ResNo) {
-  DEBUG(cerr << "Split node result: "; N->dump(&DAG); cerr << "\n");
-  SDOperand Lo, Hi;
-  
-#if 0
-  // See if the target wants to custom expand this node.
-  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(0)) == 
-      TargetLowering::Custom) {
-    // If the target wants to, allow it to lower this itself.
-    if (SDNode *P = TLI.ExpandOperationResult(N, DAG)) {
-      // Everything that once used N now uses P.  We are guaranteed that the
-      // result value types of N and the result value types of P match.
-      ReplaceNodeWith(N, P);
-      return;
-    }
-  }
-#endif
-  
-  switch (N->getOpcode()) {
-  default:
-#ifndef NDEBUG
-    cerr << "SplitResult #" << ResNo << ": ";
-    N->dump(&DAG); cerr << "\n";
-#endif
-    assert(0 && "Do not know how to split the result of this operator!");
-    abort();
-    
-  case ISD::UNDEF:            SplitRes_UNDEF(N, Lo, Hi); break;
-  case ISD::LOAD:             SplitRes_LOAD(cast<LoadSDNode>(N), Lo, Hi); break;
-  case ISD::BUILD_PAIR:       SplitRes_BUILD_PAIR(N, Lo, Hi); break;
-  case ISD::INSERT_VECTOR_ELT:SplitRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
-  case ISD::VECTOR_SHUFFLE:   SplitRes_VECTOR_SHUFFLE(N, Lo, Hi); break;
-  case ISD::BUILD_VECTOR:     SplitRes_BUILD_VECTOR(N, Lo, Hi); break;
-  case ISD::CONCAT_VECTORS:   SplitRes_CONCAT_VECTORS(N, Lo, Hi); break;
-  case ISD::BIT_CONVERT:      SplitRes_BIT_CONVERT(N, Lo, Hi); break;
-  case ISD::CTTZ:
-  case ISD::CTLZ:
-  case ISD::CTPOP:
-  case ISD::FNEG:
-  case ISD::FABS:
-  case ISD::FSQRT:
-  case ISD::FSIN:
-  case ISD::FCOS:
-  case ISD::FP_TO_SINT:
-  case ISD::FP_TO_UINT:
-  case ISD::SINT_TO_FP:
-  case ISD::UINT_TO_FP:       SplitRes_UnOp(N, Lo, Hi); break;
-  case ISD::ADD:
-  case ISD::SUB:
-  case ISD::MUL:
-  case ISD::FADD:
-  case ISD::FSUB:
-  case ISD::FMUL:
-  case ISD::SDIV:
-  case ISD::UDIV:
-  case ISD::FDIV:
-  case ISD::FPOW:
-  case ISD::AND:
-  case ISD::OR:
-  case ISD::XOR:
-  case ISD::UREM:
-  case ISD::SREM:
-  case ISD::FREM:             SplitRes_BinOp(N, Lo, Hi); break;
-  case ISD::FPOWI:            SplitRes_FPOWI(N, Lo, Hi); break;
-  case ISD::SELECT:           SplitRes_SELECT(N, Lo, Hi); break;
-  }
-  
-  // If Lo/Hi is null, the sub-method took care of registering results etc.
-  if (Lo.Val)
-    SetSplitOp(SDOperand(N, ResNo), Lo, Hi);
-}
-
-void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
-  MVT::ValueType LoVT, HiVT;
-  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
-
-  Lo = DAG.getNode(ISD::UNDEF, LoVT);
-  Hi = DAG.getNode(ISD::UNDEF, HiVT);
-}
-
-void DAGTypeLegalizer::SplitRes_LOAD(LoadSDNode *LD, 
-                                     SDOperand &Lo, SDOperand &Hi) {
-  // FIXME: Add support for indexed loads.
-  MVT::ValueType LoVT, HiVT;
-  GetSplitDestVTs(LD->getValueType(0), LoVT, HiVT);
-  
-  SDOperand Ch = LD->getChain();
-  SDOperand Ptr = LD->getBasePtr();
-  const Value *SV = LD->getSrcValue();
-  int SVOffset = LD->getSrcValueOffset();
-  unsigned Alignment = LD->getAlignment();
-  bool isVolatile = LD->isVolatile();
-  
-  Lo = DAG.getLoad(LoVT, Ch, Ptr, SV, SVOffset, isVolatile, Alignment);
-  unsigned IncrementSize = MVT::getSizeInBits(LoVT)/8;
-  Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
-                    DAG.getIntPtrConstant(IncrementSize));
-  SVOffset += IncrementSize;
-  Alignment = MinAlign(Alignment, IncrementSize);
-  Hi = DAG.getLoad(HiVT, Ch, Ptr, SV, SVOffset, isVolatile, Alignment);
-  
-  // Build a factor node to remember that this load is independent of the
-  // other one.
-  SDOperand TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
-                             Hi.getValue(1));
-  
-  // Legalized the chain result - switch anything that used the old chain to
-  // use the new one.
-  ReplaceValueWith(SDOperand(LD, 1), TF);
-}
-
-void DAGTypeLegalizer::SplitRes_BUILD_PAIR(SDNode *N, SDOperand &Lo,
-                                           SDOperand &Hi) {
-  Lo = N->getOperand(0);
-  Hi = N->getOperand(1);
-}
-
-void DAGTypeLegalizer::SplitRes_INSERT_VECTOR_ELT(SDNode *N, SDOperand &Lo,
-                                                  SDOperand &Hi) {
-  GetSplitOp(N->getOperand(0), Lo, Hi);
-  unsigned Index = cast<ConstantSDNode>(N->getOperand(2))->getValue();
-  SDOperand ScalarOp = N->getOperand(1);
-  unsigned LoNumElts = MVT::getVectorNumElements(Lo.getValueType());
-  if (Index < LoNumElts)
-    Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, Lo.getValueType(), Lo, ScalarOp,
-                     N->getOperand(2));
-  else
-    Hi = DAG.getNode(ISD::INSERT_VECTOR_ELT, Hi.getValueType(), Hi, ScalarOp,
-                     DAG.getIntPtrConstant(Index - LoNumElts));
-}
-
-void DAGTypeLegalizer::SplitRes_VECTOR_SHUFFLE(SDNode *N, 
-                                               SDOperand &Lo, SDOperand &Hi) {
-  // Build the low part.
-  SDOperand Mask = N->getOperand(2);
-  SmallVector<SDOperand, 16> Ops;
-  MVT::ValueType LoVT, HiVT;
-  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
-  MVT::ValueType EltVT = MVT::getVectorElementType(LoVT);
-  unsigned LoNumElts = MVT::getVectorNumElements(LoVT);
-  unsigned NumElements = Mask.getNumOperands();
-
-  // Insert all of the elements from the input that are needed.  We use 
-  // buildvector of extractelement here because the input vectors will have
-  // to be legalized, so this makes the code simpler.
-  for (unsigned i = 0; i != LoNumElts; ++i) {
-    unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getValue();
-    SDOperand InVec = N->getOperand(0);
-    if (Idx >= NumElements) {
-      InVec = N->getOperand(1);
-      Idx -= NumElements;
-    }
-    Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, EltVT, InVec,
-                              DAG.getIntPtrConstant(Idx)));
-  }
-  Lo = DAG.getNode(ISD::BUILD_VECTOR, LoVT, &Ops[0], Ops.size());
-  Ops.clear();
-  
-  for (unsigned i = LoNumElts; i != NumElements; ++i) {
-    unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getValue();
-    SDOperand InVec = N->getOperand(0);
-    if (Idx >= NumElements) {
-      InVec = N->getOperand(1);
-      Idx -= NumElements;
-    }
-    Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, EltVT, InVec,
-                              DAG.getIntPtrConstant(Idx)));
-  }
-  Hi = DAG.getNode(ISD::BUILD_VECTOR, HiVT, &Ops[0], Ops.size());
-}
-
-void DAGTypeLegalizer::SplitRes_BUILD_VECTOR(SDNode *N, SDOperand &Lo, 
-                                             SDOperand &Hi) {
-  MVT::ValueType LoVT, HiVT;
-  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
-  unsigned LoNumElts = MVT::getVectorNumElements(LoVT);
-  SmallVector<SDOperand, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts);
-  Lo = DAG.getNode(ISD::BUILD_VECTOR, LoVT, &LoOps[0], LoOps.size());
-  
-  SmallVector<SDOperand, 8> HiOps(N->op_begin()+LoNumElts, N->op_end());
-  Hi = DAG.getNode(ISD::BUILD_VECTOR, HiVT, &HiOps[0], HiOps.size());
-}
-
-void DAGTypeLegalizer::SplitRes_CONCAT_VECTORS(SDNode *N, 
-                                               SDOperand &Lo, SDOperand &Hi) {
-  // FIXME: Handle non-power-of-two vectors?
-  unsigned NumSubvectors = N->getNumOperands() / 2;
-  if (NumSubvectors == 1) {
-    Lo = N->getOperand(0);
-    Hi = N->getOperand(1);
-    return;
-  }
-
-  MVT::ValueType LoVT, HiVT;
-  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
-
-  SmallVector<SDOperand, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors);
-  Lo = DAG.getNode(ISD::CONCAT_VECTORS, LoVT, &LoOps[0], LoOps.size());
-    
-  SmallVector<SDOperand, 8> HiOps(N->op_begin()+NumSubvectors, N->op_end());
-  Hi = DAG.getNode(ISD::CONCAT_VECTORS, HiVT, &HiOps[0], HiOps.size());
-}
-
-void DAGTypeLegalizer::SplitRes_BIT_CONVERT(SDNode *N, 
-                                            SDOperand &Lo, SDOperand &Hi) {
-  // We know the result is a vector.  The input may be either a vector or a
-  // scalar value.
-  MVT::ValueType LoVT, HiVT;
-  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
-
-  SDOperand InOp = N->getOperand(0);
-  MVT::ValueType InVT = InOp.getValueType();
-
-  // Handle some special cases efficiently.
-  switch (getTypeAction(InVT)) {
-  default:
-    assert(false && "Unknown type action!");
-  case Legal:
-  case FloatToInt:
-  case Promote:
-  case Scalarize:
-    break;
-  case Expand:
-    // A scalar to vector conversion, where the scalar needs expansion.
-    // If the vector is being split in two then we can just convert the
-    // expanded pieces.
-    if (LoVT == HiVT) {
-      GetExpandedOp(InOp, Lo, Hi);
-      if (TLI.isBigEndian())
-        std::swap(Lo, Hi);
-      Lo = DAG.getNode(ISD::BIT_CONVERT, LoVT, Lo);
-      Hi = DAG.getNode(ISD::BIT_CONVERT, HiVT, Hi);
-      return;
-    }
-    break;
-  case Split:
-    // If the input is a vector that needs to be split, convert each split
-    // piece of the input now.
-    GetSplitOp(InOp, Lo, Hi);
-    Lo = DAG.getNode(ISD::BIT_CONVERT, LoVT, Lo);
-    Hi = DAG.getNode(ISD::BIT_CONVERT, HiVT, Hi);
-    return;
-  }
-
-  // In the general case, convert the input to an integer and split it by hand.
-  MVT::ValueType LoIntVT = MVT::getIntegerType(MVT::getSizeInBits(LoVT));
-  MVT::ValueType HiIntVT = MVT::getIntegerType(MVT::getSizeInBits(HiVT));
-  if (TLI.isBigEndian())
-    std::swap(LoIntVT, HiIntVT);
-
-  SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi);
-
-  if (TLI.isBigEndian())
-    std::swap(Lo, Hi);
-  Lo = DAG.getNode(ISD::BIT_CONVERT, LoVT, Lo);
-  Hi = DAG.getNode(ISD::BIT_CONVERT, HiVT, Hi);
-}
-
-void DAGTypeLegalizer::SplitRes_BinOp(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
-  SDOperand LHSLo, LHSHi;
-  GetSplitOp(N->getOperand(0), LHSLo, LHSHi);
-  SDOperand RHSLo, RHSHi;
-  GetSplitOp(N->getOperand(1), RHSLo, RHSHi);
-  
-  Lo = DAG.getNode(N->getOpcode(), LHSLo.getValueType(), LHSLo, RHSLo);
-  Hi = DAG.getNode(N->getOpcode(), LHSHi.getValueType(), LHSHi, RHSHi);
-}
-
-void DAGTypeLegalizer::SplitRes_UnOp(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
-  // Get the dest types.  This doesn't always match input types, e.g. int_to_fp.
-  MVT::ValueType LoVT, HiVT;
-  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
-
-  GetSplitOp(N->getOperand(0), Lo, Hi);
-  Lo = DAG.getNode(N->getOpcode(), LoVT, Lo);
-  Hi = DAG.getNode(N->getOpcode(), HiVT, Hi);
-}
-
-void DAGTypeLegalizer::SplitRes_FPOWI(SDNode *N, SDOperand &Lo, SDOperand &Hi) {
-  GetSplitOp(N->getOperand(0), Lo, Hi);
-  Lo = DAG.getNode(ISD::FPOWI, Lo.getValueType(), Lo, N->getOperand(1));
-  Hi = DAG.getNode(ISD::FPOWI, Lo.getValueType(), Hi, N->getOperand(1));
-}
-
-
-void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDOperand &Lo, SDOperand &Hi){
-  SDOperand LL, LH, RL, RH;
-  GetSplitOp(N->getOperand(1), LL, LH);
-  GetSplitOp(N->getOperand(2), RL, RH);
-  
-  SDOperand Cond = N->getOperand(0);
-  Lo = DAG.getNode(ISD::SELECT, LL.getValueType(), Cond, LL, RL);
-  Hi = DAG.getNode(ISD::SELECT, LH.getValueType(), Cond, LH, RH);
-}
-
-
-//===----------------------------------------------------------------------===//
-//  Operand Vector Splitting
-//===----------------------------------------------------------------------===//
-
-/// SplitOperand - This method is called when the specified operand of the
-/// specified node is found to need vector splitting.  At this point, all of the
-/// result types of the node are known to be legal, but other operands of the
-/// node may need legalization as well as the specified one.
-bool DAGTypeLegalizer::SplitOperand(SDNode *N, unsigned OpNo) {
-  DEBUG(cerr << "Split node operand: "; N->dump(&DAG); cerr << "\n");
-  SDOperand Res(0, 0);
-  
-#if 0
-  if (TLI.getOperationAction(N->getOpcode(), N->getValueType(0)) == 
-      TargetLowering::Custom)
-    Res = TLI.LowerOperation(SDOperand(N, 0), DAG);
-#endif
-  
-  if (Res.Val == 0) {
-    switch (N->getOpcode()) {
-    default:
-#ifndef NDEBUG
-      cerr << "SplitOperand Op #" << OpNo << ": ";
-      N->dump(&DAG); cerr << "\n";
-#endif
-      assert(0 && "Do not know how to split this operator's operand!");
-      abort();
-    case ISD::STORE: Res = SplitOp_STORE(cast<StoreSDNode>(N), OpNo); break;
-    case ISD::RET:   Res = SplitOp_RET(N, OpNo); break;
-
-    case ISD::BIT_CONVERT: Res = SplitOp_BIT_CONVERT(N); break;
-
-    case ISD::EXTRACT_VECTOR_ELT: Res = SplitOp_EXTRACT_VECTOR_ELT(N); break;
-    case ISD::EXTRACT_SUBVECTOR:  Res = SplitOp_EXTRACT_SUBVECTOR(N); break;
-    case ISD::VECTOR_SHUFFLE:     Res = SplitOp_VECTOR_SHUFFLE(N, OpNo); break;
-    }
-  }
-  
-  // If the result is null, the sub-method took care of registering results etc.
-  if (!Res.Val) return false;
-  
-  // If the result is N, the sub-method updated N in place.  Check to see if any
-  // operands are new, and if so, mark them.
-  if (Res.Val == N) {
-    // Mark N as new and remark N and its operands.  This allows us to correctly
-    // revisit N if it needs another step of promotion and allows us to visit
-    // any new operands to N.
-    ReanalyzeNode(N);
-    return true;
-  }
-
-  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
-         "Invalid operand expansion");
-  
-  ReplaceValueWith(SDOperand(N, 0), Res);
-  return false;
-}
-
-SDOperand DAGTypeLegalizer::SplitOp_STORE(StoreSDNode *N, unsigned OpNo) {
-  // FIXME: Add support for indexed stores.
-  assert(OpNo == 1 && "Can only split the stored value");
-  
-  SDOperand Ch  = N->getChain();
-  SDOperand Ptr = N->getBasePtr();
-  int SVOffset = N->getSrcValueOffset();
-  unsigned Alignment = N->getAlignment();
-  bool isVol = N->isVolatile();
-  SDOperand Lo, Hi;
-  GetSplitOp(N->getOperand(1), Lo, Hi);
-
-  unsigned IncrementSize = MVT::getSizeInBits(Lo.getValueType())/8;
-
-  Lo = DAG.getStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset, isVol, Alignment);
-  
-  // Increment the pointer to the other half.
-  Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
-                    DAG.getIntPtrConstant(IncrementSize));
-  
-  Hi = DAG.getStore(Ch, Hi, Ptr, N->getSrcValue(), SVOffset+IncrementSize,
-                    isVol, MinAlign(Alignment, IncrementSize));
-  return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
-}
-
-SDOperand DAGTypeLegalizer::SplitOp_RET(SDNode *N, unsigned OpNo) {
-  assert(N->getNumOperands() == 3 &&"Can only handle ret of one vector so far");
-  // FIXME: Returns of gcc generic vectors larger than a legal vector
-  // type should be returned by reference!
-  SDOperand Lo, Hi;
-  GetSplitOp(N->getOperand(1), Lo, Hi);
-
-  SDOperand Chain = N->getOperand(0);  // The chain.
-  SDOperand Sign = N->getOperand(2);  // Signness
-  
-  return DAG.getNode(ISD::RET, MVT::Other, Chain, Lo, Sign, Hi, Sign);
-}
-
-SDOperand DAGTypeLegalizer::SplitOp_BIT_CONVERT(SDNode *N) {
-  // For example, i64 = BIT_CONVERT v4i16 on alpha.  Typically the vector will
-  // end up being split all the way down to individual components.  Convert the
-  // split pieces into integers and reassemble.
-  SDOperand Lo, Hi;
-  GetSplitOp(N->getOperand(0), Lo, Hi);
-  Lo = BitConvertToInteger(Lo);
-  Hi = BitConvertToInteger(Hi);
-
-  if (TLI.isBigEndian())
-    std::swap(Lo, Hi);
-
-  return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0),
-                     JoinIntegers(Lo, Hi));
-}
-
-SDOperand DAGTypeLegalizer::SplitOp_EXTRACT_VECTOR_ELT(SDNode *N) {
-  SDOperand Vec = N->getOperand(0);
-  SDOperand Idx = N->getOperand(1);
-  MVT::ValueType VecVT = Vec.getValueType();
-
-  if (isa<ConstantSDNode>(Idx)) {
-    uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getValue();
-    assert(IdxVal < MVT::getVectorNumElements(VecVT) &&
-           "Invalid vector index!");
-
-    SDOperand Lo, Hi;
-    GetSplitOp(Vec, Lo, Hi);
-
-    uint64_t LoElts = MVT::getVectorNumElements(Lo.getValueType());
-
-    if (IdxVal < LoElts)
-      return DAG.UpdateNodeOperands(SDOperand(N, 0), Lo, Idx);
-    else
-      return DAG.UpdateNodeOperands(SDOperand(N, 0), Hi,
-                                    DAG.getConstant(IdxVal - LoElts,
-                                                    Idx.getValueType()));
-  }
-
-  // Store the vector to the stack and load back the required element.
-  SDOperand StackPtr = DAG.CreateStackTemporary(VecVT);
-  SDOperand Store = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0);
-
-  // Add the offset to the index.
-  MVT::ValueType EltVT = MVT::getVectorElementType(VecVT);
-  unsigned EltSize = MVT::getSizeInBits(EltVT)/8; // FIXME: should be ABI size.
-  Idx = DAG.getNode(ISD::MUL, Idx.getValueType(), Idx,
-                    DAG.getConstant(EltSize, Idx.getValueType()));
-
-  if (MVT::getSizeInBits(Idx.getValueType()) >
-      MVT::getSizeInBits(TLI.getPointerTy()))
-    Idx = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), Idx);
-  else
-    Idx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), Idx);
-
-  StackPtr = DAG.getNode(ISD::ADD, Idx.getValueType(), Idx, StackPtr);
-  return DAG.getLoad(EltVT, Store, StackPtr, NULL, 0);
-}
-
-SDOperand DAGTypeLegalizer::SplitOp_EXTRACT_SUBVECTOR(SDNode *N) {
-  // We know that the extracted result type is legal.  For now, assume the index
-  // is a constant.
-  MVT::ValueType SubVT = N->getValueType(0);
-  SDOperand Idx = N->getOperand(1);
-  SDOperand Lo, Hi;
-  GetSplitOp(N->getOperand(0), Lo, Hi);
-
-  uint64_t LoElts = MVT::getVectorNumElements(Lo.getValueType());
-  uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getValue();
-
-  if (IdxVal < LoElts) {
-    assert(IdxVal + MVT::getVectorNumElements(SubVT) <= LoElts &&
-           "Extracted subvector crosses vector split!");
-    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SubVT, Lo, Idx);
-  } else {
-    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SubVT, Hi,
-                       DAG.getConstant(IdxVal - LoElts, Idx.getValueType()));
-  }
-}
-
-SDOperand DAGTypeLegalizer::SplitOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo) {
-  assert(OpNo == 2 && "Shuffle source type differs from result type?");
-  SDOperand Mask = N->getOperand(2);
-  unsigned MaskLength = MVT::getVectorNumElements(Mask.getValueType());
-  unsigned LargestMaskEntryPlusOne = 2 * MaskLength;
-  unsigned MinimumBitWidth = Log2_32_Ceil(LargestMaskEntryPlusOne);
-
-  // Look for a legal vector type to place the mask values in.
-  // Note that there may not be *any* legal vector-of-integer
-  // type for which the element type is legal!
-  for (MVT::SimpleValueType EltVT = MVT::FIRST_INTEGER_VALUETYPE;
-       EltVT <= MVT::LAST_INTEGER_VALUETYPE;
-       // Integer values types are consecutively numbered.  Exploit this.
-       EltVT = MVT::SimpleValueType(EltVT + 1)) {
-
-    // Is the element type big enough to hold the values?
-    if (MVT::getSizeInBits(EltVT) < MinimumBitWidth)
-      // Nope.
-      continue;
-
-    // Is the vector type legal?
-    MVT::ValueType VecVT = MVT::getVectorType(EltVT, MaskLength);
-    if (!isTypeLegal(VecVT))
-      // Nope.
-      continue;
-
-    // If the element type is not legal, find a larger legal type to use for
-    // the BUILD_VECTOR operands.  This is an ugly hack, but seems to work!
-    // FIXME: The real solution is to change VECTOR_SHUFFLE into a variadic
-    // node where the shuffle mask is a list of integer operands, #2 .. #2+n.
-    for (MVT::SimpleValueType OpVT = EltVT; OpVT <= MVT::LAST_INTEGER_VALUETYPE;
-         // Integer values types are consecutively numbered.  Exploit this.
-         OpVT = MVT::SimpleValueType(OpVT + 1)) {
-      if (!isTypeLegal(OpVT))
-        continue;
-
-      // Success!  Rebuild the vector using the legal types.
-      SmallVector<SDOperand, 16> Ops(MaskLength);
-      for (unsigned i = 0; i < MaskLength; ++i) {
-        uint64_t Idx =
-          cast<ConstantSDNode>(Mask.getOperand(i))->getValue();
-        Ops[i] = DAG.getConstant(Idx, OpVT);
-      }
-      return DAG.UpdateNodeOperands(SDOperand(N,0),
-                                    N->getOperand(0), N->getOperand(1),
-                                    DAG.getNode(ISD::BUILD_VECTOR,
-                                                VecVT, &Ops[0], Ops.size()));
-    }
-
-    // Continuing is pointless - failure is certain.
-    break;
-  }
-  assert(false && "Failed to find an appropriate mask type!");
-  return SDOperand(N, 0);
-}

Added: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp?rev=53163&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp (added)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp Sun Jul  6 15:45:41 2008
@@ -0,0 +1,743 @@
+//===------- LegalizeVectorTypes.cpp - Legalization of vector types -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file performs vector type splitting and scalarization for LegalizeTypes.
+// Scalarization is the act of changing a computation in an illegal one-element
+// vector type to be a computation in its scalar element type.  For example,
+// implementing <1 x f32> arithmetic in a scalar f32 register.  This is needed
+// as a base case when scalarizing vector arithmetic like <4 x f32>, which
+// eventually decomposes to scalars if the target doesn't support v4f32 or v2f32
+// types.
+// Splitting is the act of changing a computation in an invalid vector type to
+// be a computation in multiple vectors of a smaller type.  For example,
+// implementing <128 x f32> operations in terms of two <64 x f32> operations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LegalizeTypes.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+//  Result Vector Scalarization: <1 x ty> -> ty.
+//===----------------------------------------------------------------------===//
+
+void DAGTypeLegalizer::ScalarizeResult(SDNode *N, unsigned ResNo) {
+  DEBUG(cerr << "Scalarize node result " << ResNo << ": "; N->dump(&DAG);
+        cerr << "\n");
+  SDOperand R = SDOperand();
+
+  switch (N->getOpcode()) {
+  default:
+#ifndef NDEBUG
+    cerr << "ScalarizeResult #" << ResNo << ": ";
+    N->dump(&DAG); cerr << "\n";
+#endif
+    assert(0 && "Do not know how to scalarize the result of this operator!");
+    abort();
+
+  case ISD::UNDEF: R = ScalarizeVecRes_UNDEF(N); break;
+  case ISD::LOAD:  R = ScalarizeVecRes_LOAD(cast<LoadSDNode>(N)); break;
+
+  case ISD::ADD:
+  case ISD::FADD:
+  case ISD::SUB:
+  case ISD::FSUB:
+  case ISD::MUL:
+  case ISD::FMUL:
+  case ISD::SDIV:
+  case ISD::UDIV:
+  case ISD::FDIV:
+  case ISD::SREM:
+  case ISD::UREM:
+  case ISD::FREM:
+  case ISD::FPOW:
+  case ISD::AND:
+  case ISD::OR:
+  case ISD::XOR:  R = ScalarizeVecRes_BinOp(N); break;
+
+  case ISD::FNEG:
+  case ISD::FABS:
+  case ISD::FSQRT:
+  case ISD::FSIN:
+  case ISD::FCOS:  R = ScalarizeVecRes_UnaryOp(N); break;
+
+  case ISD::FPOWI:             R = ScalarizeVecRes_FPOWI(N); break;
+  case ISD::BUILD_VECTOR:      R = N->getOperand(0); break;
+  case ISD::INSERT_VECTOR_ELT: R = ScalarizeVecRes_INSERT_VECTOR_ELT(N); break;
+  case ISD::VECTOR_SHUFFLE:    R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
+  case ISD::BIT_CONVERT:       R = ScalarizeVecRes_BIT_CONVERT(N); break;
+  case ISD::SELECT:            R = ScalarizeVecRes_SELECT(N); break;
+  }
+
+  // If R is null, the sub-method took care of registering the result.
+  if (R.Val)
+    SetScalarizedVector(SDOperand(N, ResNo), R);
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) {
+  return DAG.getNode(ISD::UNDEF, N->getValueType(0).getVectorElementType());
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
+  assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!");
+  SDOperand Result = DAG.getLoad(N->getValueType(0).getVectorElementType(),
+                                 N->getChain(), N->getBasePtr(),
+                                 N->getSrcValue(), N->getSrcValueOffset(),
+                                 N->isVolatile(), N->getAlignment());
+
+  // Legalized the chain result - switch anything that used the old chain to
+  // use the new one.
+  ReplaceValueWith(SDOperand(N, 1), Result.getValue(1));
+  return Result;
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
+  SDOperand LHS = GetScalarizedVector(N->getOperand(0));
+  SDOperand RHS = GetScalarizedVector(N->getOperand(1));
+  return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS);
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) {
+  SDOperand Op = GetScalarizedVector(N->getOperand(0));
+  return DAG.getNode(N->getOpcode(), Op.getValueType(), Op);
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_FPOWI(SDNode *N) {
+  SDOperand Op = GetScalarizedVector(N->getOperand(0));
+  return DAG.getNode(ISD::FPOWI, Op.getValueType(), Op, N->getOperand(1));
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) {
+  // The value to insert may have a wider type than the vector element type,
+  // so be sure to truncate it to the element type if necessary.
+  SDOperand Op = N->getOperand(1);
+  MVT EltVT = N->getValueType(0).getVectorElementType();
+  if (Op.getValueType().bitsGT(EltVT))
+    Op = DAG.getNode(ISD::TRUNCATE, EltVT, Op);
+  assert(Op.getValueType() == EltVT && "Invalid type for inserted value!");
+  return Op;
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) {
+  // Figure out if the scalar is the LHS or RHS and return it.
+  SDOperand EltNum = N->getOperand(2).getOperand(0);
+  unsigned Op = cast<ConstantSDNode>(EltNum)->getValue() != 0;
+  return GetScalarizedVector(N->getOperand(Op));
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_BIT_CONVERT(SDNode *N) {
+  MVT NewVT = N->getValueType(0).getVectorElementType();
+  return DAG.getNode(ISD::BIT_CONVERT, NewVT, N->getOperand(0));
+}
+
+SDOperand DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) {
+  SDOperand LHS = GetScalarizedVector(N->getOperand(1));
+  return DAG.getNode(ISD::SELECT, LHS.getValueType(), N->getOperand(0), LHS,
+                     GetScalarizedVector(N->getOperand(2)));
+}
+
+
+//===----------------------------------------------------------------------===//
+//  Operand Vector Scalarization <1 x ty> -> ty.
+//===----------------------------------------------------------------------===//
+
+bool DAGTypeLegalizer::ScalarizeOperand(SDNode *N, unsigned OpNo) {
+  DEBUG(cerr << "Scalarize node operand " << OpNo << ": "; N->dump(&DAG);
+        cerr << "\n");
+  SDOperand Res(0, 0);
+
+  if (Res.Val == 0) {
+    switch (N->getOpcode()) {
+    default:
+#ifndef NDEBUG
+      cerr << "ScalarizeOperand Op #" << OpNo << ": ";
+      N->dump(&DAG); cerr << "\n";
+#endif
+      assert(0 && "Do not know how to scalarize this operator's operand!");
+      abort();
+
+    case ISD::BIT_CONVERT:
+      Res = ScalarizeVecOp_BIT_CONVERT(N); break;
+
+    case ISD::EXTRACT_VECTOR_ELT:
+      Res = ScalarizeVecOp_EXTRACT_VECTOR_ELT(N); break;
+
+    case ISD::STORE:
+      Res = ScalarizeVecOp_STORE(cast<StoreSDNode>(N), OpNo); break;
+    }
+  }
+
+  // If the result is null, the sub-method took care of registering results etc.
+  if (!Res.Val) return false;
+
+  // If the result is N, the sub-method updated N in place.  Check to see if any
+  // operands are new, and if so, mark them.
+  if (Res.Val == N) {
+    // Mark N as new and remark N and its operands.  This allows us to correctly
+    // revisit N if it needs another step of promotion and allows us to visit
+    // any new operands to N.
+    ReanalyzeNode(N);
+    return true;
+  }
+
+  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
+         "Invalid operand expansion");
+
+  ReplaceValueWith(SDOperand(N, 0), Res);
+  return false;
+}
+
+/// ScalarizeVecOp_BIT_CONVERT - If the value to convert is a vector that needs
+/// to be scalarized, it must be <1 x ty>.  Convert the element instead.
+SDOperand DAGTypeLegalizer::ScalarizeVecOp_BIT_CONVERT(SDNode *N) {
+  SDOperand Elt = GetScalarizedVector(N->getOperand(0));
+  return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Elt);
+}
+
+/// ScalarizeVecOp_EXTRACT_VECTOR_ELT - If the input is a vector that needs to
+/// be scalarized, it must be <1 x ty>, so just return the element, ignoring the
+/// index.
+SDOperand DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
+  return GetScalarizedVector(N->getOperand(0));
+}
+
+/// ScalarizeVecOp_STORE - If the value to store is a vector that needs to be
+/// scalarized, it must be <1 x ty>.  Just store the element.
+SDOperand DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){
+  assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
+  assert(OpNo == 1 && "Do not know how to scalarize this operand!");
+  return DAG.getStore(N->getChain(), GetScalarizedVector(N->getOperand(1)),
+                      N->getBasePtr(), N->getSrcValue(), N->getSrcValueOffset(),
+                      N->isVolatile(), N->getAlignment());
+}
+
+
+//===----------------------------------------------------------------------===//
+//  Result Vector Splitting
+//===----------------------------------------------------------------------===//
+
+/// SplitResult - This method is called when the specified result of the
+/// specified node is found to need vector splitting.  At this point, the node
+/// may also have invalid operands or may have other results that need
+/// legalization, we just know that (at least) one result needs vector
+/// splitting.
+void DAGTypeLegalizer::SplitResult(SDNode *N, unsigned ResNo) {
+  DEBUG(cerr << "Split node result: "; N->dump(&DAG); cerr << "\n");
+  SDOperand Lo, Hi;
+
+  switch (N->getOpcode()) {
+  default:
+#ifndef NDEBUG
+    cerr << "SplitResult #" << ResNo << ": ";
+    N->dump(&DAG); cerr << "\n";
+#endif
+    assert(0 && "Do not know how to split the result of this operator!");
+    abort();
+
+  case ISD::MERGE_VALUES: SplitRes_MERGE_VALUES(N, Lo, Hi); break;
+  case ISD::SELECT:       SplitRes_SELECT(N, Lo, Hi); break;
+  case ISD::SELECT_CC:    SplitRes_SELECT_CC(N, Lo, Hi); break;
+  case ISD::UNDEF:        SplitRes_UNDEF(N, Lo, Hi); break;
+
+  case ISD::LOAD:
+    SplitVecRes_LOAD(cast<LoadSDNode>(N), Lo, Hi);
+    break;
+  case ISD::BUILD_PAIR:       SplitVecRes_BUILD_PAIR(N, Lo, Hi); break;
+  case ISD::INSERT_VECTOR_ELT:SplitVecRes_INSERT_VECTOR_ELT(N, Lo, Hi); break;
+  case ISD::VECTOR_SHUFFLE:   SplitVecRes_VECTOR_SHUFFLE(N, Lo, Hi); break;
+  case ISD::BUILD_VECTOR:     SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break;
+  case ISD::CONCAT_VECTORS:   SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break;
+  case ISD::BIT_CONVERT:      SplitVecRes_BIT_CONVERT(N, Lo, Hi); break;
+  case ISD::CTTZ:
+  case ISD::CTLZ:
+  case ISD::CTPOP:
+  case ISD::FNEG:
+  case ISD::FABS:
+  case ISD::FSQRT:
+  case ISD::FSIN:
+  case ISD::FCOS:
+  case ISD::FP_TO_SINT:
+  case ISD::FP_TO_UINT:
+  case ISD::SINT_TO_FP:
+  case ISD::UINT_TO_FP:       SplitVecRes_UnOp(N, Lo, Hi); break;
+  case ISD::ADD:
+  case ISD::SUB:
+  case ISD::MUL:
+  case ISD::FADD:
+  case ISD::FSUB:
+  case ISD::FMUL:
+  case ISD::SDIV:
+  case ISD::UDIV:
+  case ISD::FDIV:
+  case ISD::FPOW:
+  case ISD::AND:
+  case ISD::OR:
+  case ISD::XOR:
+  case ISD::UREM:
+  case ISD::SREM:
+  case ISD::FREM:             SplitVecRes_BinOp(N, Lo, Hi); break;
+  case ISD::FPOWI:            SplitVecRes_FPOWI(N, Lo, Hi); break;
+  }
+
+  // If Lo/Hi is null, the sub-method took care of registering results etc.
+  if (Lo.Val)
+    SetSplitVector(SDOperand(N, ResNo), Lo, Hi);
+}
+
+void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDOperand &Lo,
+                                        SDOperand &Hi) {
+  assert(ISD::isUNINDEXEDLoad(LD) && "Indexed load during type legalization!");
+  MVT LoVT, HiVT;
+  GetSplitDestVTs(LD->getValueType(0), LoVT, HiVT);
+
+  SDOperand Ch = LD->getChain();
+  SDOperand Ptr = LD->getBasePtr();
+  const Value *SV = LD->getSrcValue();
+  int SVOffset = LD->getSrcValueOffset();
+  unsigned Alignment = LD->getAlignment();
+  bool isVolatile = LD->isVolatile();
+
+  Lo = DAG.getLoad(LoVT, Ch, Ptr, SV, SVOffset, isVolatile, Alignment);
+  unsigned IncrementSize = LoVT.getSizeInBits()/8;
+  Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
+                    DAG.getIntPtrConstant(IncrementSize));
+  SVOffset += IncrementSize;
+  Alignment = MinAlign(Alignment, IncrementSize);
+  Hi = DAG.getLoad(HiVT, Ch, Ptr, SV, SVOffset, isVolatile, Alignment);
+
+  // Build a factor node to remember that this load is independent of the
+  // other one.
+  SDOperand TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
+                             Hi.getValue(1));
+
+  // Legalized the chain result - switch anything that used the old chain to
+  // use the new one.
+  ReplaceValueWith(SDOperand(LD, 1), TF);
+}
+
+void DAGTypeLegalizer::SplitVecRes_BUILD_PAIR(SDNode *N, SDOperand &Lo,
+                                              SDOperand &Hi) {
+#ifndef NDEBUG
+  MVT LoVT, HiVT;
+  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
+  assert(LoVT == HiVT && "Non-power-of-two vectors not supported!");
+#endif
+  Lo = N->getOperand(0);
+  Hi = N->getOperand(1);
+}
+
+void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDOperand &Lo,
+                                                     SDOperand &Hi) {
+  SDOperand Vec = N->getOperand(0);
+  SDOperand Elt = N->getOperand(1);
+  SDOperand Idx = N->getOperand(2);
+  GetSplitVector(Vec, Lo, Hi);
+
+  if (ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
+    unsigned IdxVal = CIdx->getValue();
+    unsigned LoNumElts = Lo.getValueType().getVectorNumElements();
+    if (IdxVal < LoNumElts)
+      Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, Lo.getValueType(), Lo, Elt, Idx);
+    else
+      Hi = DAG.getNode(ISD::INSERT_VECTOR_ELT, Hi.getValueType(), Hi, Elt,
+                       DAG.getIntPtrConstant(IdxVal - LoNumElts));
+    return;
+  }
+
+  // Spill the vector to the stack.
+  MVT VecVT = Vec.getValueType();
+  SDOperand StackPtr = DAG.CreateStackTemporary(VecVT);
+  SDOperand Store = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0);
+
+  // Store the new element.
+  SDOperand EltPtr = GetVectorElementPointer(StackPtr,
+                                             VecVT.getVectorElementType(), Idx);
+  Store = DAG.getStore(Store, Elt, EltPtr, NULL, 0);
+
+  // Reload the vector from the stack.
+  SDOperand Load = DAG.getLoad(VecVT, Store, StackPtr, NULL, 0);
+
+  // Split it.
+  SplitVecRes_LOAD(cast<LoadSDNode>(Load.Val), Lo, Hi);
+}
+
+void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDOperand &Lo,
+                                                  SDOperand &Hi) {
+  // Build the low part.
+  SDOperand Mask = N->getOperand(2);
+  SmallVector<SDOperand, 16> Ops;
+  MVT LoVT, HiVT;
+  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
+  MVT EltVT = LoVT.getVectorElementType();
+  unsigned LoNumElts = LoVT.getVectorNumElements();
+  unsigned NumElements = Mask.getNumOperands();
+
+  // Insert all of the elements from the input that are needed.  We use
+  // buildvector of extractelement here because the input vectors will have
+  // to be legalized, so this makes the code simpler.
+  for (unsigned i = 0; i != LoNumElts; ++i) {
+    unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getValue();
+    SDOperand InVec = N->getOperand(0);
+    if (Idx >= NumElements) {
+      InVec = N->getOperand(1);
+      Idx -= NumElements;
+    }
+    Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, EltVT, InVec,
+                              DAG.getIntPtrConstant(Idx)));
+  }
+  Lo = DAG.getNode(ISD::BUILD_VECTOR, LoVT, &Ops[0], Ops.size());
+  Ops.clear();
+
+  for (unsigned i = LoNumElts; i != NumElements; ++i) {
+    unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getValue();
+    SDOperand InVec = N->getOperand(0);
+    if (Idx >= NumElements) {
+      InVec = N->getOperand(1);
+      Idx -= NumElements;
+    }
+    Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, EltVT, InVec,
+                              DAG.getIntPtrConstant(Idx)));
+  }
+  Hi = DAG.getNode(ISD::BUILD_VECTOR, HiVT, &Ops[0], Ops.size());
+}
+
+void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDOperand &Lo,
+                                                SDOperand &Hi) {
+  MVT LoVT, HiVT;
+  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
+  unsigned LoNumElts = LoVT.getVectorNumElements();
+  SmallVector<SDOperand, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts);
+  Lo = DAG.getNode(ISD::BUILD_VECTOR, LoVT, &LoOps[0], LoOps.size());
+
+  SmallVector<SDOperand, 8> HiOps(N->op_begin()+LoNumElts, N->op_end());
+  Hi = DAG.getNode(ISD::BUILD_VECTOR, HiVT, &HiOps[0], HiOps.size());
+}
+
+void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDOperand &Lo,
+                                                  SDOperand &Hi) {
+  // FIXME: Handle non-power-of-two vectors?
+  unsigned NumSubvectors = N->getNumOperands() / 2;
+  if (NumSubvectors == 1) {
+    Lo = N->getOperand(0);
+    Hi = N->getOperand(1);
+    return;
+  }
+
+  MVT LoVT, HiVT;
+  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
+
+  SmallVector<SDOperand, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors);
+  Lo = DAG.getNode(ISD::CONCAT_VECTORS, LoVT, &LoOps[0], LoOps.size());
+
+  SmallVector<SDOperand, 8> HiOps(N->op_begin()+NumSubvectors, N->op_end());
+  Hi = DAG.getNode(ISD::CONCAT_VECTORS, HiVT, &HiOps[0], HiOps.size());
+}
+
+void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDOperand &Lo,
+                                               SDOperand &Hi) {
+  // We know the result is a vector.  The input may be either a vector or a
+  // scalar value.
+  MVT LoVT, HiVT;
+  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
+
+  SDOperand InOp = N->getOperand(0);
+  MVT InVT = InOp.getValueType();
+
+  // Handle some special cases efficiently.
+  switch (getTypeAction(InVT)) {
+  default:
+    assert(false && "Unknown type action!");
+  case Legal:
+  case PromoteInteger:
+  case SoftenFloat:
+  case Scalarize:
+    break;
+  case ExpandInteger:
+  case ExpandFloat:
+    // A scalar to vector conversion, where the scalar needs expansion.
+    // If the vector is being split in two then we can just convert the
+    // expanded pieces.
+    if (LoVT == HiVT) {
+      GetExpandedOp(InOp, Lo, Hi);
+      if (TLI.isBigEndian())
+        std::swap(Lo, Hi);
+      Lo = DAG.getNode(ISD::BIT_CONVERT, LoVT, Lo);
+      Hi = DAG.getNode(ISD::BIT_CONVERT, HiVT, Hi);
+      return;
+    }
+    break;
+  case Split:
+    // If the input is a vector that needs to be split, convert each split
+    // piece of the input now.
+    GetSplitVector(InOp, Lo, Hi);
+    Lo = DAG.getNode(ISD::BIT_CONVERT, LoVT, Lo);
+    Hi = DAG.getNode(ISD::BIT_CONVERT, HiVT, Hi);
+    return;
+  }
+
+  // In the general case, convert the input to an integer and split it by hand.
+  MVT LoIntVT = MVT::getIntegerVT(LoVT.getSizeInBits());
+  MVT HiIntVT = MVT::getIntegerVT(HiVT.getSizeInBits());
+  if (TLI.isBigEndian())
+    std::swap(LoIntVT, HiIntVT);
+
+  SplitInteger(BitConvertToInteger(InOp), LoIntVT, HiIntVT, Lo, Hi);
+
+  if (TLI.isBigEndian())
+    std::swap(Lo, Hi);
+  Lo = DAG.getNode(ISD::BIT_CONVERT, LoVT, Lo);
+  Hi = DAG.getNode(ISD::BIT_CONVERT, HiVT, Hi);
+}
+
+void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDOperand &Lo,
+                                         SDOperand &Hi) {
+  SDOperand LHSLo, LHSHi;
+  GetSplitVector(N->getOperand(0), LHSLo, LHSHi);
+  SDOperand RHSLo, RHSHi;
+  GetSplitVector(N->getOperand(1), RHSLo, RHSHi);
+
+  Lo = DAG.getNode(N->getOpcode(), LHSLo.getValueType(), LHSLo, RHSLo);
+  Hi = DAG.getNode(N->getOpcode(), LHSHi.getValueType(), LHSHi, RHSHi);
+}
+
+void DAGTypeLegalizer::SplitVecRes_UnOp(SDNode *N, SDOperand &Lo,
+                                        SDOperand &Hi) {
+  // Get the dest types.  This doesn't always match input types, e.g. int_to_fp.
+  MVT LoVT, HiVT;
+  GetSplitDestVTs(N->getValueType(0), LoVT, HiVT);
+
+  GetSplitVector(N->getOperand(0), Lo, Hi);
+  Lo = DAG.getNode(N->getOpcode(), LoVT, Lo);
+  Hi = DAG.getNode(N->getOpcode(), HiVT, Hi);
+}
+
+void DAGTypeLegalizer::SplitVecRes_FPOWI(SDNode *N, SDOperand &Lo,
+                                         SDOperand &Hi) {
+  GetSplitVector(N->getOperand(0), Lo, Hi);
+  Lo = DAG.getNode(ISD::FPOWI, Lo.getValueType(), Lo, N->getOperand(1));
+  Hi = DAG.getNode(ISD::FPOWI, Lo.getValueType(), Hi, N->getOperand(1));
+}
+
+
+//===----------------------------------------------------------------------===//
+//  Operand Vector Splitting
+//===----------------------------------------------------------------------===//
+
+/// SplitOperand - This method is called when the specified operand of the
+/// specified node is found to need vector splitting.  At this point, all of the
+/// result types of the node are known to be legal, but other operands of the
+/// node may need legalization as well as the specified one.
+bool DAGTypeLegalizer::SplitOperand(SDNode *N, unsigned OpNo) {
+  DEBUG(cerr << "Split node operand: "; N->dump(&DAG); cerr << "\n");
+  SDOperand Res(0, 0);
+
+  if (Res.Val == 0) {
+    switch (N->getOpcode()) {
+    default:
+#ifndef NDEBUG
+      cerr << "SplitOperand Op #" << OpNo << ": ";
+      N->dump(&DAG); cerr << "\n";
+#endif
+      assert(0 && "Do not know how to split this operator's operand!");
+      abort();
+    case ISD::STORE: Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo); break;
+    case ISD::RET:   Res = SplitVecOp_RET(N, OpNo); break;
+
+    case ISD::BIT_CONVERT: Res = SplitVecOp_BIT_CONVERT(N); break;
+
+    case ISD::EXTRACT_VECTOR_ELT: Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
+    case ISD::EXTRACT_SUBVECTOR:  Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
+    case ISD::VECTOR_SHUFFLE:
+      Res = SplitVecOp_VECTOR_SHUFFLE(N, OpNo);
+      break;
+    }
+  }
+
+  // If the result is null, the sub-method took care of registering results etc.
+  if (!Res.Val) return false;
+
+  // If the result is N, the sub-method updated N in place.  Check to see if any
+  // operands are new, and if so, mark them.
+  if (Res.Val == N) {
+    // Mark N as new and remark N and its operands.  This allows us to correctly
+    // revisit N if it needs another step of promotion and allows us to visit
+    // any new operands to N.
+    ReanalyzeNode(N);
+    return true;
+  }
+
+  assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 &&
+         "Invalid operand expansion");
+
+  ReplaceValueWith(SDOperand(N, 0), Res);
+  return false;
+}
+
+SDOperand DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
+  assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!");
+  assert(OpNo == 1 && "Can only split the stored value");
+
+  SDOperand Ch  = N->getChain();
+  SDOperand Ptr = N->getBasePtr();
+  int SVOffset = N->getSrcValueOffset();
+  unsigned Alignment = N->getAlignment();
+  bool isVol = N->isVolatile();
+  SDOperand Lo, Hi;
+  GetSplitVector(N->getOperand(1), Lo, Hi);
+
+  unsigned IncrementSize = Lo.getValueType().getSizeInBits()/8;
+
+  Lo = DAG.getStore(Ch, Lo, Ptr, N->getSrcValue(), SVOffset, isVol, Alignment);
+
+  // Increment the pointer to the other half.
+  Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
+                    DAG.getIntPtrConstant(IncrementSize));
+
+  Hi = DAG.getStore(Ch, Hi, Ptr, N->getSrcValue(), SVOffset+IncrementSize,
+                    isVol, MinAlign(Alignment, IncrementSize));
+  return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi);
+}
+
+SDOperand DAGTypeLegalizer::SplitVecOp_RET(SDNode *N, unsigned OpNo) {
+  assert(N->getNumOperands() == 3 &&"Can only handle ret of one vector so far");
+  // FIXME: Returns of gcc generic vectors larger than a legal vector
+  // type should be returned by reference!
+  SDOperand Lo, Hi;
+  GetSplitVector(N->getOperand(1), Lo, Hi);
+
+  SDOperand Chain = N->getOperand(0);  // The chain.
+  SDOperand Sign = N->getOperand(2);  // Signness
+
+  return DAG.getNode(ISD::RET, MVT::Other, Chain, Lo, Sign, Hi, Sign);
+}
+
+SDOperand DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) {
+  // For example, i64 = BIT_CONVERT v4i16 on alpha.  Typically the vector will
+  // end up being split all the way down to individual components.  Convert the
+  // split pieces into integers and reassemble.
+  SDOperand Lo, Hi;
+  GetSplitVector(N->getOperand(0), Lo, Hi);
+  Lo = BitConvertToInteger(Lo);
+  Hi = BitConvertToInteger(Hi);
+
+  if (TLI.isBigEndian())
+    std::swap(Lo, Hi);
+
+  return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0),
+                     JoinIntegers(Lo, Hi));
+}
+
+SDOperand DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
+  SDOperand Vec = N->getOperand(0);
+  SDOperand Idx = N->getOperand(1);
+  MVT VecVT = Vec.getValueType();
+
+  if (isa<ConstantSDNode>(Idx)) {
+    uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getValue();
+    assert(IdxVal < VecVT.getVectorNumElements() && "Invalid vector index!");
+
+    SDOperand Lo, Hi;
+    GetSplitVector(Vec, Lo, Hi);
+
+    uint64_t LoElts = Lo.getValueType().getVectorNumElements();
+
+    if (IdxVal < LoElts)
+      return DAG.UpdateNodeOperands(SDOperand(N, 0), Lo, Idx);
+    else
+      return DAG.UpdateNodeOperands(SDOperand(N, 0), Hi,
+                                    DAG.getConstant(IdxVal - LoElts,
+                                                    Idx.getValueType()));
+  }
+
+  // Store the vector to the stack.
+  MVT EltVT = VecVT.getVectorElementType();
+  SDOperand StackPtr = DAG.CreateStackTemporary(VecVT);
+  SDOperand Store = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0);
+
+  // Load back the required element.
+  StackPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
+  return DAG.getLoad(EltVT, Store, StackPtr, NULL, 0);
+}
+
+SDOperand DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) {
+  // We know that the extracted result type is legal.  For now, assume the index
+  // is a constant.
+  MVT SubVT = N->getValueType(0);
+  SDOperand Idx = N->getOperand(1);
+  SDOperand Lo, Hi;
+  GetSplitVector(N->getOperand(0), Lo, Hi);
+
+  uint64_t LoElts = Lo.getValueType().getVectorNumElements();
+  uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getValue();
+
+  if (IdxVal < LoElts) {
+    assert(IdxVal + SubVT.getVectorNumElements() <= LoElts &&
+           "Extracted subvector crosses vector split!");
+    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SubVT, Lo, Idx);
+  } else {
+    return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SubVT, Hi,
+                       DAG.getConstant(IdxVal - LoElts, Idx.getValueType()));
+  }
+}
+
+SDOperand DAGTypeLegalizer::SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo){
+  assert(OpNo == 2 && "Shuffle source type differs from result type?");
+  SDOperand Mask = N->getOperand(2);
+  unsigned MaskLength = Mask.getValueType().getVectorNumElements();
+  unsigned LargestMaskEntryPlusOne = 2 * MaskLength;
+  unsigned MinimumBitWidth = Log2_32_Ceil(LargestMaskEntryPlusOne);
+
+  // Look for a legal vector type to place the mask values in.
+  // Note that there may not be *any* legal vector-of-integer
+  // type for which the element type is legal!
+  for (MVT::SimpleValueType EltVT = MVT::FIRST_INTEGER_VALUETYPE;
+       EltVT <= MVT::LAST_INTEGER_VALUETYPE;
+       // Integer values types are consecutively numbered.  Exploit this.
+       EltVT = MVT::SimpleValueType(EltVT + 1)) {
+
+    // Is the element type big enough to hold the values?
+    if (MVT(EltVT).getSizeInBits() < MinimumBitWidth)
+      // Nope.
+      continue;
+
+    // Is the vector type legal?
+    MVT VecVT = MVT::getVectorVT(EltVT, MaskLength);
+    if (!isTypeLegal(VecVT))
+      // Nope.
+      continue;
+
+    // If the element type is not legal, find a larger legal type to use for
+    // the BUILD_VECTOR operands.  This is an ugly hack, but seems to work!
+    // FIXME: The real solution is to change VECTOR_SHUFFLE into a variadic
+    // node where the shuffle mask is a list of integer operands, #2 .. #2+n.
+    for (MVT::SimpleValueType OpVT = EltVT; OpVT <= MVT::LAST_INTEGER_VALUETYPE;
+         // Integer values types are consecutively numbered.  Exploit this.
+         OpVT = MVT::SimpleValueType(OpVT + 1)) {
+      if (!isTypeLegal(OpVT))
+        continue;
+
+      // Success!  Rebuild the vector using the legal types.
+      SmallVector<SDOperand, 16> Ops(MaskLength);
+      for (unsigned i = 0; i < MaskLength; ++i) {
+        uint64_t Idx =
+          cast<ConstantSDNode>(Mask.getOperand(i))->getValue();
+        Ops[i] = DAG.getConstant(Idx, OpVT);
+      }
+      return DAG.UpdateNodeOperands(SDOperand(N,0),
+                                    N->getOperand(0), N->getOperand(1),
+                                    DAG.getNode(ISD::BUILD_VECTOR,
+                                                VecVT, &Ops[0], Ops.size()));
+    }
+
+    // Continuing is pointless - failure is certain.
+    break;
+  }
+  assert(false && "Failed to find an appropriate mask type!");
+  return SDOperand(N, 0);
+}

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp Sun Jul  6 15:45:41 2008
@@ -14,7 +14,6 @@
 //===----------------------------------------------------------------------===//
 
 #define DEBUG_TYPE "pre-RA-sched"
-#include "llvm/Constants.h"
 #include "llvm/Type.h"
 #include "llvm/CodeGen/ScheduleDAG.h"
 #include "llvm/CodeGen/MachineConstantPool.h"
@@ -79,13 +78,12 @@
 
 SUnit *ScheduleDAG::Clone(SUnit *Old) {
   SUnit *SU = NewSUnit(Old->Node);
+  SU->OrigNode = Old->OrigNode;
   SU->FlaggedNodes = Old->FlaggedNodes;
-  SU->InstanceNo = SUnitMap[Old->Node].size();
   SU->Latency = Old->Latency;
   SU->isTwoAddress = Old->isTwoAddress;
   SU->isCommutable = Old->isCommutable;
   SU->hasPhysRegDefs = Old->hasPhysRegDefs;
-  SUnitMap[Old->Node].push_back(SU);
   return SU;
 }
 
@@ -97,15 +95,22 @@
   // Reserve entries in the vector for each of the SUnits we are creating.  This
   // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
   // invalidated.
-  SUnits.reserve(std::distance(DAG.allnodes_begin(), DAG.allnodes_end()));
+  SUnits.reserve(DAG.allnodes_size());
   
+  // During scheduling, the NodeId field of SDNode is used to map SDNodes
+  // to their associated SUnits by holding SUnits table indices. A value
+  // of -1 means the SDNode does not yet have an associated SUnit.
+  for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
+       E = DAG.allnodes_end(); NI != E; ++NI)
+    NI->setNodeId(-1);
+
   for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
        E = DAG.allnodes_end(); NI != E; ++NI) {
     if (isPassiveNode(NI))  // Leaf node, e.g. a TargetImmediate.
       continue;
     
     // If this node has already been processed, stop now.
-    if (SUnitMap[NI].size()) continue;
+    if (NI->getNodeId() != -1) continue;
     
     SUnit *NodeSUnit = NewSUnit(NI);
     
@@ -120,7 +125,8 @@
       do {
         N = N->getOperand(N->getNumOperands()-1).Val;
         NodeSUnit->FlaggedNodes.push_back(N);
-        SUnitMap[N].push_back(NodeSUnit);
+        assert(N->getNodeId() == -1 && "Node already inserted!");
+        N->setNodeId(NodeSUnit->NodeNum);
       } while (N->getNumOperands() &&
                N->getOperand(N->getNumOperands()-1).getValueType()== MVT::Flag);
       std::reverse(NodeSUnit->FlaggedNodes.begin(),
@@ -140,7 +146,8 @@
         if (FlagVal.isOperandOf(UI->getUser())) {
           HasFlagUse = true;
           NodeSUnit->FlaggedNodes.push_back(N);
-          SUnitMap[N].push_back(NodeSUnit);
+          assert(N->getNodeId() == -1 && "Node already inserted!");
+          N->setNodeId(NodeSUnit->NodeNum);
           N = UI->getUser();
           break;
         }
@@ -150,7 +157,8 @@
     // Now all flagged nodes are in FlaggedNodes and N is the bottom-most node.
     // Update the SUnit
     NodeSUnit->Node = N;
-    SUnitMap[N].push_back(NodeSUnit);
+    assert(N->getNodeId() == -1 && "Node already inserted!");
+    N->setNodeId(NodeSUnit->NodeNum);
 
     ComputeLatency(NodeSUnit);
   }
@@ -187,11 +195,11 @@
       for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
         SDNode *OpN = N->getOperand(i).Val;
         if (isPassiveNode(OpN)) continue;   // Not scheduled.
-        SUnit *OpSU = SUnitMap[OpN].front();
+        SUnit *OpSU = &SUnits[OpN->getNodeId()];
         assert(OpSU && "Node has no SUnit!");
         if (OpSU == SU) continue;           // In the same group.
 
-        MVT::ValueType OpVT = N->getOperand(i).getValueType();
+        MVT OpVT = N->getOperand(i).getValueType();
         assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
         bool isChain = OpVT == MVT::Other;
 
@@ -206,8 +214,6 @@
     // Remove MainNode from FlaggedNodes again.
     SU->FlaggedNodes.pop_back();
   }
-  
-  return;
 }
 
 void ScheduleDAG::ComputeLatency(SUnit *SU) {
@@ -218,26 +224,26 @@
   if (InstrItins.isEmpty()) {
     // No latency information.
     SU->Latency = 1;
-  } else {
-    SU->Latency = 0;
-    if (SU->Node->isTargetOpcode()) {
-      unsigned SchedClass =
-        TII->get(SU->Node->getTargetOpcode()).getSchedClass();
+    return;
+  }
+
+  SU->Latency = 0;
+  if (SU->Node->isTargetOpcode()) {
+    unsigned SchedClass = TII->get(SU->Node->getTargetOpcode()).getSchedClass();
+    const InstrStage *S = InstrItins.begin(SchedClass);
+    const InstrStage *E = InstrItins.end(SchedClass);
+    for (; S != E; ++S)
+      SU->Latency += S->Cycles;
+  }
+  for (unsigned i = 0, e = SU->FlaggedNodes.size(); i != e; ++i) {
+    SDNode *FNode = SU->FlaggedNodes[i];
+    if (FNode->isTargetOpcode()) {
+      unsigned SchedClass = TII->get(FNode->getTargetOpcode()).getSchedClass();
       const InstrStage *S = InstrItins.begin(SchedClass);
       const InstrStage *E = InstrItins.end(SchedClass);
       for (; S != E; ++S)
         SU->Latency += S->Cycles;
     }
-    for (unsigned i = 0, e = SU->FlaggedNodes.size(); i != e; ++i) {
-      SDNode *FNode = SU->FlaggedNodes[i];
-      if (FNode->isTargetOpcode()) {
-        unsigned SchedClass =TII->get(FNode->getTargetOpcode()).getSchedClass();
-        const InstrStage *S = InstrItins.begin(SchedClass);
-        const InstrStage *E = InstrItins.end(SchedClass);
-        for (; S != E; ++S)
-          SU->Latency += S->Cycles;
-      }
-    }
   }
 }
 
@@ -384,11 +390,12 @@
   return N;
 }
 
-static const TargetRegisterClass *getInstrOperandRegClass(
-        const TargetRegisterInfo *TRI, 
-        const TargetInstrInfo *TII,
-        const TargetInstrDesc &II,
-        unsigned Op) {
+/// getInstrOperandRegClass - Return register class of the operand of an
+/// instruction of the specified TargetInstrDesc.
+static const TargetRegisterClass*
+getInstrOperandRegClass(const TargetRegisterInfo *TRI, 
+                        const TargetInstrInfo *TII, const TargetInstrDesc &II,
+                        unsigned Op) {
   if (Op >= II.getNumOperands()) {
     assert(II.isVariadic() && "Invalid operand # of instruction");
     return NULL;
@@ -398,15 +405,18 @@
   return TRI->getRegClass(II.OpInfo[Op].RegClass);
 }
 
+/// EmitCopyFromReg - Generate machine code for an CopyFromReg node or an
+/// implicit physical register output.
 void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo,
-                                  unsigned InstanceNo, unsigned SrcReg,
+                                  bool IsClone, unsigned SrcReg,
                                   DenseMap<SDOperand, unsigned> &VRBaseMap) {
   unsigned VRBase = 0;
   if (TargetRegisterInfo::isVirtualRegister(SrcReg)) {
     // Just use the input register directly!
-    if (InstanceNo > 0)
+    if (IsClone)
       VRBaseMap.erase(SDOperand(Node, ResNo));
     bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo),SrcReg));
+    isNew = isNew; // Silence compiler warning.
     assert(isNew && "Node emitted out of order - early");
     return;
   }
@@ -432,7 +442,7 @@
         SDOperand Op = Use->getOperand(i);
         if (Op.Val != Node || Op.ResNo != ResNo)
           continue;
-        MVT::ValueType VT = Node->getValueType(Op.ResNo);
+        MVT VT = Node->getValueType(Op.ResNo);
         if (VT != MVT::Other && VT != MVT::Flag)
           Match = false;
       }
@@ -462,9 +472,10 @@
     TII->copyRegToReg(*BB, BB->end(), VRBase, SrcReg, DstRC, SrcRC);
   }
 
-  if (InstanceNo > 0)
+  if (IsClone)
     VRBaseMap.erase(SDOperand(Node, ResNo));
   bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,ResNo), VRBase));
+  isNew = isNew; // Silence compiler warning.
   assert(isNew && "Node emitted out of order - early");
 }
 
@@ -522,6 +533,7 @@
     }
 
     bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,i), VRBase));
+    isNew = isNew; // Silence compiler warning.
     assert(isNew && "Node emitted out of order - early");
   }
 }
@@ -651,18 +663,17 @@
       assert(getInstrOperandRegClass(TRI, TII, *II, IIOpNum) &&
              "Don't have operand info for this instruction!");
     }
-  }
-  
+  }  
 }
 
 void ScheduleDAG::AddMemOperand(MachineInstr *MI, const MachineMemOperand &MO) {
   MI->addMemOperand(MO);
 }
 
-// Returns the Register Class of a subregister
-static const TargetRegisterClass *getSubRegisterRegClass(
-        const TargetRegisterClass *TRC,
-        unsigned SubIdx) {
+/// getSubRegisterRegClass - Returns the register class of specified register
+/// class' "SubIdx"'th sub-register class.
+static const TargetRegisterClass*
+getSubRegisterRegClass(const TargetRegisterClass *TRC, unsigned SubIdx) {
   // Pick the register class of the subregister
   TargetRegisterInfo::regclass_iterator I =
     TRC->subregclasses_begin() + SubIdx-1;
@@ -671,10 +682,12 @@
   return *I;
 }
 
-static const TargetRegisterClass *getSuperregRegisterClass(
-        const TargetRegisterClass *TRC,
-        unsigned SubIdx,
-        MVT::ValueType VT) {
+/// getSuperRegisterRegClass - Returns the register class of a superreg A whose
+/// "SubIdx"'th sub-register class is the specified register class and whose
+/// type matches the specified type.
+static const TargetRegisterClass*
+getSuperRegisterRegClass(const TargetRegisterClass *TRC,
+                         unsigned SubIdx, MVT VT) {
   // Pick the register class of the superegister for this type
   for (TargetRegisterInfo::regclass_iterator I = TRC->superregclasses_begin(),
          E = TRC->superregclasses_end(); I != E; ++I)
@@ -719,9 +732,11 @@
 
     if (VRBase) {
       // Grab the destination register
+#ifndef NDEBUG
       const TargetRegisterClass *DRC = MRI.getRegClass(VRBase);
       assert(SRC && DRC && SRC == DRC && 
              "Source subregister and destination must have the same class");
+#endif
     } else {
       // Create the reg
       assert(SRC && "Couldn't find source register class");
@@ -747,7 +762,7 @@
     if (VRBase) {
       TRC = MRI.getRegClass(VRBase);
     } else {
-      TRC = getSuperregRegisterClass(MRI.getRegClass(SubReg), SubIdx, 
+      TRC = getSuperRegisterRegClass(MRI.getRegClass(SubReg), SubIdx, 
                                      Node->getValueType(0));
       assert(TRC && "Couldn't determine register class for insert_subreg");
       VRBase = MRI.createVirtualRegister(TRC); // Create the reg
@@ -772,12 +787,13 @@
     assert(0 && "Node is not insert_subreg, extract_subreg, or subreg_to_reg");
      
   bool isNew = VRBaseMap.insert(std::make_pair(SDOperand(Node,0), VRBase));
+  isNew = isNew; // Silence compiler warning.
   assert(isNew && "Node emitted out of order - early");
 }
 
 /// EmitNode - Generate machine code for an node and needed dependencies.
 ///
-void ScheduleDAG::EmitNode(SDNode *Node, unsigned InstanceNo,
+void ScheduleDAG::EmitNode(SDNode *Node, bool IsClone,
                            DenseMap<SDOperand, unsigned> &VRBaseMap) {
   // If machine instruction
   if (Node->isTargetOpcode()) {
@@ -799,10 +815,10 @@
     unsigned NumResults = CountResults(Node);
     unsigned NodeOperands = CountOperands(Node);
     unsigned MemOperandsEnd = ComputeMemOperandsEnd(Node);
-    unsigned NumMIOperands = NodeOperands + NumResults;
     bool HasPhysRegOuts = (NumResults > II.getNumDefs()) &&
                           II.getImplicitDefs() != 0;
 #ifndef NDEBUG
+    unsigned NumMIOperands = NodeOperands + NumResults;
     assert((II.getNumOperands() == NumMIOperands ||
             HasPhysRegOuts || II.isVariadic()) &&
            "#operands for dag node doesn't match .td file!"); 
@@ -852,120 +868,98 @@
       for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
         unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
         if (Node->hasAnyUseOfValue(i))
-          EmitCopyFromReg(Node, i, InstanceNo, Reg, VRBaseMap);
+          EmitCopyFromReg(Node, i, IsClone, Reg, VRBaseMap);
       }
     }
-  } else {
-    switch (Node->getOpcode()) {
-    default:
+    return;
+  }
+
+  switch (Node->getOpcode()) {
+  default:
 #ifndef NDEBUG
-      Node->dump(&DAG);
+    Node->dump(&DAG);
 #endif
-      assert(0 && "This target-independent node should have been selected!");
-      break;
-    case ISD::EntryToken:
-      assert(0 && "EntryToken should have been excluded from the schedule!");
-      break;
-    case ISD::TokenFactor: // fall thru
-    case ISD::LABEL:
-    case ISD::DECLARE:
-    case ISD::SRCVALUE:
-      break;
-    case ISD::CopyToReg: {
-      unsigned SrcReg;
-      SDOperand SrcVal = Node->getOperand(2);
-      if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
-        SrcReg = R->getReg();
-      else
-        SrcReg = getVR(SrcVal, VRBaseMap);
+    assert(0 && "This target-independent node should have been selected!");
+    break;
+  case ISD::EntryToken:
+    assert(0 && "EntryToken should have been excluded from the schedule!");
+    break;
+  case ISD::TokenFactor: // fall thru
+    break;
+  case ISD::CopyToReg: {
+    unsigned SrcReg;
+    SDOperand SrcVal = Node->getOperand(2);
+    if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal))
+      SrcReg = R->getReg();
+    else
+      SrcReg = getVR(SrcVal, VRBaseMap);
       
-      unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
-      if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
-        break;
+    unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
+    if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
+      break;
       
-      const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
-      // Get the register classes of the src/dst.
-      if (TargetRegisterInfo::isVirtualRegister(SrcReg))
-        SrcTRC = MRI.getRegClass(SrcReg);
-      else
-        SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
+    const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
+    // Get the register classes of the src/dst.
+    if (TargetRegisterInfo::isVirtualRegister(SrcReg))
+      SrcTRC = MRI.getRegClass(SrcReg);
+    else
+      SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
 
-      if (TargetRegisterInfo::isVirtualRegister(DestReg))
-        DstTRC = MRI.getRegClass(DestReg);
-      else
-        DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
+    if (TargetRegisterInfo::isVirtualRegister(DestReg))
+      DstTRC = MRI.getRegClass(DestReg);
+    else
+      DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
                                             Node->getOperand(1).getValueType());
-      TII->copyRegToReg(*BB, BB->end(), DestReg, SrcReg, DstTRC, SrcTRC);
-      break;
-    }
-    case ISD::CopyFromReg: {
-      unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
-      EmitCopyFromReg(Node, 0, InstanceNo, SrcReg, VRBaseMap);
-      break;
-    }
-    case ISD::INLINEASM: {
-      unsigned NumOps = Node->getNumOperands();
-      if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
-        --NumOps;  // Ignore the flag operand.
+    TII->copyRegToReg(*BB, BB->end(), DestReg, SrcReg, DstTRC, SrcTRC);
+    break;
+  }
+  case ISD::CopyFromReg: {
+    unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
+    EmitCopyFromReg(Node, 0, IsClone, SrcReg, VRBaseMap);
+    break;
+  }
+  case ISD::INLINEASM: {
+    unsigned NumOps = Node->getNumOperands();
+    if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
+      --NumOps;  // Ignore the flag operand.
       
-      // Create the inline asm machine instruction.
-      MachineInstr *MI = BuildMI(TII->get(TargetInstrInfo::INLINEASM));
+    // Create the inline asm machine instruction.
+    MachineInstr *MI = BuildMI(TII->get(TargetInstrInfo::INLINEASM));
 
-      // Add the asm string as an external symbol operand.
-      const char *AsmStr =
-        cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
-      MI->addOperand(MachineOperand::CreateES(AsmStr));
+    // Add the asm string as an external symbol operand.
+    const char *AsmStr =
+      cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
+    MI->addOperand(MachineOperand::CreateES(AsmStr));
       
-      // Add all of the operand registers to the instruction.
-      for (unsigned i = 2; i != NumOps;) {
-        unsigned Flags = cast<ConstantSDNode>(Node->getOperand(i))->getValue();
-        unsigned NumVals = Flags >> 3;
+    // Add all of the operand registers to the instruction.
+    for (unsigned i = 2; i != NumOps;) {
+      unsigned Flags = cast<ConstantSDNode>(Node->getOperand(i))->getValue();
+      unsigned NumVals = Flags >> 3;
         
-        MI->addOperand(MachineOperand::CreateImm(Flags));
-        ++i;  // Skip the ID value.
+      MI->addOperand(MachineOperand::CreateImm(Flags));
+      ++i;  // Skip the ID value.
         
-        switch (Flags & 7) {
-        default: assert(0 && "Bad flags!");
-        case 1:  // Use of register.
-          for (; NumVals; --NumVals, ++i) {
-            unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
-            MI->addOperand(MachineOperand::CreateReg(Reg, false));
-          }
-          break;
-        case 2:   // Def of register.
-          for (; NumVals; --NumVals, ++i) {
-            unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
-            MI->addOperand(MachineOperand::CreateReg(Reg, true));
-          }
-          break;
-        case 3: { // Immediate.
-          for (; NumVals; --NumVals, ++i) {
-            if (ConstantSDNode *CS =
-                   dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
-              MI->addOperand(MachineOperand::CreateImm(CS->getValue()));
-            } else if (GlobalAddressSDNode *GA = 
-                  dyn_cast<GlobalAddressSDNode>(Node->getOperand(i))) {
-              MI->addOperand(MachineOperand::CreateGA(GA->getGlobal(),
-                                                      GA->getOffset()));
-            } else {
-              BasicBlockSDNode *BB =cast<BasicBlockSDNode>(Node->getOperand(i));
-              MI->addOperand(MachineOperand::CreateMBB(BB->getBasicBlock()));
-            }
-          }
-          break;
-        }
-        case 4:  // Addressing mode.
-          // The addressing mode has been selected, just add all of the
-          // operands to the machine instruction.
-          for (; NumVals; --NumVals, ++i)
-            AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
-          break;
+      switch (Flags & 7) {
+      default: assert(0 && "Bad flags!");
+      case 2:   // Def of register.
+        for (; NumVals; --NumVals, ++i) {
+          unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
+          MI->addOperand(MachineOperand::CreateReg(Reg, true));
         }
+        break;
+      case 1:  // Use of register.
+      case 3:  // Immediate.
+      case 4:  // Addressing mode.
+        // The addressing mode has been selected, just add all of the
+        // operands to the machine instruction.
+        for (; NumVals; --NumVals, ++i)
+          AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
+        break;
       }
-      BB->push_back(MI);
-      break;
-    }
     }
+    BB->push_back(MI);
+    break;
+  }
   }
 }
 
@@ -999,6 +993,7 @@
       assert(I->Reg && "Unknown physical register!");
       unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
       bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase));
+      isNew = isNew; // Silence compiler warning.
       assert(isNew && "Node emitted out of order - early");
       TII->copyRegToReg(*BB, BB->end(), VRBase, I->Reg,
                         SU->CopyDstRC, SU->CopySrcRC);
@@ -1111,11 +1106,11 @@
       continue;
     }
     for (unsigned j = 0, ee = SU->FlaggedNodes.size(); j != ee; ++j)
-      EmitNode(SU->FlaggedNodes[j], SU->InstanceNo, VRBaseMap);
+      EmitNode(SU->FlaggedNodes[j], SU->OrigNode != SU, VRBaseMap);
     if (!SU->Node)
       EmitCrossRCCopy(SU, CopyVRBaseMap);
     else
-      EmitNode(SU->Node, SU->InstanceNo, VRBaseMap);
+      EmitNode(SU->Node, SU->OrigNode != SU, VRBaseMap);
   }
 
   if (isEntryBB && SchedLiveInCopies)

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp Sun Jul  6 15:45:41 2008
@@ -28,9 +28,9 @@
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/Compiler.h"
+#include "llvm/ADT/PriorityQueue.h"
 #include "llvm/ADT/Statistic.h"
 #include <climits>
-#include <queue>
 using namespace llvm;
 
 STATISTIC(NumNoops , "Number of noops inserted");
@@ -94,7 +94,7 @@
   // Build scheduling units.
   BuildSchedUnits();
 
-  AvailableQueue->initNodes(SUnitMap, SUnits);
+  AvailableQueue->initNodes(SUnits);
   
   ListScheduleTopDown();
   
@@ -177,6 +177,7 @@
   // While Available queue is not empty, grab the node with the highest
   // priority. If it is not ready put it back.  Schedule the node.
   std::vector<SUnit*> NotReady;
+  Sequence.reserve(SUnits.size());
   while (!AvailableQueue->empty() || !PendingQueue.empty()) {
     // Check to see if any of the pending instructions are ready to issue.  If
     // so, add them to the available queue.
@@ -314,13 +315,12 @@
     /// mobility.
     std::vector<unsigned> NumNodesSolelyBlocking;
 
-    std::priority_queue<SUnit*, std::vector<SUnit*>, latency_sort> Queue;
+    PriorityQueue<SUnit*, std::vector<SUnit*>, latency_sort> Queue;
 public:
     LatencyPriorityQueue() : Queue(latency_sort(this)) {
     }
     
-    void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
-                   std::vector<SUnit> &sunits) {
+    void initNodes(std::vector<SUnit> &sunits) {
       SUnits = &sunits;
       // Calculate node priorities.
       CalculatePriorities();
@@ -373,25 +373,9 @@
       return V;
     }
 
-    /// remove - This is a really inefficient way to remove a node from a
-    /// priority queue.  We should roll our own heap to make this better or
-    /// something.
     void remove(SUnit *SU) {
-      std::vector<SUnit*> Temp;
-      
       assert(!Queue.empty() && "Not in queue!");
-      while (Queue.top() != SU) {
-        Temp.push_back(Queue.top());
-        Queue.pop();
-        assert(!Queue.empty() && "Not in queue!");
-      }
-
-      // Remove the node from the PQ.
-      Queue.pop();
-      
-      // Add all the other nodes back.
-      for (unsigned i = 0, e = Temp.size(); i != e; ++i)
-        Queue.push(Temp[i]);
+      Queue.erase_one(SU);
     }
 
     // ScheduledNode - As nodes are scheduled, we look to see if there are any
@@ -564,7 +548,7 @@
 /// recognizer and deletes it when done.
 ScheduleDAG* llvm::createTDListDAGScheduler(SelectionDAGISel *IS,
                                             SelectionDAG *DAG,
-                                            MachineBasicBlock *BB) {
+                                            MachineBasicBlock *BB, bool Fast) {
   return new ScheduleDAGList(*DAG, BB, DAG->getTarget(),
                              new LatencyPriorityQueue(),
                              IS->CreateTargetHazardRecognizer());

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp Sun Jul  6 15:45:41 2008
@@ -24,11 +24,13 @@
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/Compiler.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/PriorityQueue.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
 #include <climits>
-#include <queue>
 #include "llvm/Support/CommandLine.h"
 using namespace llvm;
 
@@ -56,6 +58,10 @@
   /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
   /// it is top-down.
   bool isBottomUp;
+
+  /// Fast - True if we are performing fast scheduling.
+  ///
+  bool Fast;
   
   /// AvailableQueue - The priority queue to use for the available SUnits.
   SchedulingPriorityQueue *AvailableQueue;
@@ -69,9 +75,9 @@
 
 public:
   ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
-                  const TargetMachine &tm, bool isbottomup,
-                  SchedulingPriorityQueue *availqueue)
-    : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
+                    const TargetMachine &tm, bool isbottomup, bool f,
+                    SchedulingPriorityQueue *availqueue)
+    : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), Fast(f),
       AvailableQueue(availqueue) {
     }
 
@@ -142,36 +148,6 @@
   /// even after dynamic insertions of new edges.
   /// This allows a very fast implementation of IsReachable.
 
-
-  /** 
-  The idea of the algorithm is taken from 
-  "Online algorithms for managing the topological order of
-  a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
-  This is the MNR algorithm, which was first introduced by 
-  A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in  
-  "Maintaining a topological order under edge insertions".
-
-  Short description of the algorithm: 
-  
-  Topological ordering, ord, of a DAG maps each node to a topological
-  index so that for all edges X->Y it is the case that ord(X) < ord(Y).
-  
-  This means that if there is a path from the node X to the node Z, 
-  then ord(X) < ord(Z).
-  
-  This property can be used to check for reachability of nodes:
-  if Z is reachable from X, then an insertion of the edge Z->X would 
-  create a cycle.
-   
-  The algorithm first computes a topological ordering for the DAG by initializing
-  the Index2Node and Node2Index arrays and then tries to keep the ordering
-  up-to-date after edge insertions by reordering the DAG.
-  
-  On insertion of the edge X->Y, the algorithm first marks by calling DFS the
-  nodes reachable from Y, and then shifts them using Shift to lie immediately
-  after X in Index2Node.
-  */
-
   /// InitDAGTopologicalSorting - create the initial topological 
   /// ordering from the DAG to be scheduled.
   void InitDAGTopologicalSorting();
@@ -210,11 +186,13 @@
 
   DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
           SUnits[su].dumpAll(&DAG));
-  CalculateDepths();
-  CalculateHeights();
+  if (!Fast) {
+    CalculateDepths();
+    CalculateHeights();
+  }
   InitDAGTopologicalSorting();
 
-  AvailableQueue->initNodes(SUnitMap, SUnits);
+  AvailableQueue->initNodes(SUnits);
   
   // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
   if (isBottomUp)
@@ -223,8 +201,9 @@
     ListScheduleTopDown();
   
   AvailableQueue->releaseState();
-  
-  CommuteNodesToReducePressure();
+
+  if (!Fast)
+    CommuteNodesToReducePressure();
   
   DOUT << "*** Final schedule ***\n";
   DEBUG(dumpSchedule());
@@ -253,7 +232,7 @@
           continue;
 
         SDNode *OpN = SU->Node->getOperand(j).Val;
-        SUnit *OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo];
+        SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
         if (OpSU && OperandSeen.count(OpSU) == 1) {
           // Ok, so SU is not the last use of OpSU, but SU is two-address so
           // it will clobber OpSU. Try to commute SU if no other source operands
@@ -262,7 +241,7 @@
           for (unsigned k = 0; k < NumOps; ++k) {
             if (k != j) {
               OpN = SU->Node->getOperand(k).Val;
-              OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo];
+              OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
               if (OpSU && OperandSeen.count(OpSU) == 1) {
                 DoCommute = false;
                 break;
@@ -281,7 +260,7 @@
     for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
          I != E; ++I) {
       if (!I->isCtrl)
-        OperandSeen.insert(I->Dep);
+        OperandSeen.insert(I->Dep->OrigNode);
     }
   }
 }
@@ -363,14 +342,14 @@
 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
 /// unscheduled, incrcease the succ left count of its predecessors. Remove
 /// them from AvailableQueue if necessary.
-void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
-  PredSU->CycleBound = 0;
+void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {  
+  unsigned CycleBound = 0;
   for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
        I != E; ++I) {
     if (I->Dep == SU)
       continue;
-    PredSU->CycleBound = std::max(PredSU->CycleBound,
-                                  I->Dep->Cycle + PredSU->Latency);
+    CycleBound = std::max(CycleBound,
+                          I->Dep->Cycle + PredSU->Latency);
   }
 
   if (PredSU->isAvailable) {
@@ -379,6 +358,7 @@
       AvailableQueue->remove(PredSU);
   }
 
+  PredSU->CycleBound = CycleBound;
   ++PredSU->NumSuccsLeft;
 }
 
@@ -446,6 +426,33 @@
 
 /// InitDAGTopologicalSorting - create the initial topological 
 /// ordering from the DAG to be scheduled.
+
+/// The idea of the algorithm is taken from 
+/// "Online algorithms for managing the topological order of
+/// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
+/// This is the MNR algorithm, which was first introduced by 
+/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in  
+/// "Maintaining a topological order under edge insertions".
+///
+/// Short description of the algorithm: 
+///
+/// Topological ordering, ord, of a DAG maps each node to a topological
+/// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
+///
+/// This means that if there is a path from the node X to the node Z, 
+/// then ord(X) < ord(Z).
+///
+/// This property can be used to check for reachability of nodes:
+/// if Z is reachable from X, then an insertion of the edge Z->X would 
+/// create a cycle.
+///
+/// The algorithm first computes a topological ordering for the DAG by
+/// initializing the Index2Node and Node2Index arrays and then tries to keep
+/// the ordering up-to-date after edge insertions by reordering the DAG.
+///
+/// On insertion of the edge X->Y, the algorithm first marks by calling DFS
+/// the nodes reachable from Y, and then shifts them using Shift to lie
+/// immediately after X in Index2Node.
 void ScheduleDAGRRList::InitDAGTopologicalSorting() {
   unsigned DAGSize = SUnits.size();
   std::vector<unsigned> InDegree(DAGSize);
@@ -644,7 +651,7 @@
   SUnit *NewSU;
   bool TryUnfold = false;
   for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
-    MVT::ValueType VT = N->getValueType(i);
+    MVT VT = N->getValueType(i);
     if (VT == MVT::Flag)
       return NULL;
     else if (VT == MVT::Other)
@@ -652,13 +659,13 @@
   }
   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
     const SDOperand &Op = N->getOperand(i);
-    MVT::ValueType VT = Op.Val->getValueType(Op.ResNo);
+    MVT VT = Op.Val->getValueType(Op.ResNo);
     if (VT == MVT::Flag)
       return NULL;
   }
 
   if (TryUnfold) {
-    SmallVector<SDNode*, 4> NewNodes;
+    SmallVector<SDNode*, 2> NewNodes;
     if (!TII->unfoldMemoryOperand(DAG, N, NewNodes))
       return NULL;
 
@@ -675,7 +682,9 @@
                                   SDOperand(LoadNode, 1));
 
     SUnit *NewSU = CreateNewSUnit(N);
-    SUnitMap[N].push_back(NewSU);
+    assert(N->getNodeId() == -1 && "Node already inserted!");
+    N->setNodeId(NewSU->NodeNum);
+      
     const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
     for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
       if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
@@ -695,14 +704,12 @@
     // but it has different alignment or volatileness.
     bool isNewLoad = true;
     SUnit *LoadSU;
-    DenseMap<SDNode*, std::vector<SUnit*> >::iterator SMI =
-      SUnitMap.find(LoadNode);
-    if (SMI != SUnitMap.end()) {
-      LoadSU = SMI->second.front();
+    if (LoadNode->getNodeId() != -1) {
+      LoadSU = &SUnits[LoadNode->getNodeId()];
       isNewLoad = false;
     } else {
       LoadSU = CreateNewSUnit(LoadNode);
-      SUnitMap[LoadNode].push_back(LoadSU);
+      LoadNode->setNodeId(LoadSU->NodeNum);
 
       LoadSU->Depth = SU->Depth;
       LoadSU->Height = SU->Height;
@@ -870,8 +877,8 @@
 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
 /// definition of the specified node.
 /// FIXME: Move to SelectionDAG?
-static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg,
-                                            const TargetInstrInfo *TII) {
+static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
+                                 const TargetInstrInfo *TII) {
   const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
   assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
   unsigned NumRes = TID.getNumDefs();
@@ -941,7 +948,7 @@
   unsigned CurCycle = 0;
   // Add root to Available queue.
   if (!SUnits.empty()) {
-    SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front();
+    SUnit *RootSU = &SUnits[DAG.getRoot().Val->getNodeId()];
     assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
     RootSU->isAvailable = true;
     AvailableQueue->push(RootSU);
@@ -950,9 +957,11 @@
   // While Available queue is not empty, grab the node with the highest
   // priority. If it is not ready put it back.  Schedule the node.
   SmallVector<SUnit*, 4> NotReady;
+  DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
+  Sequence.reserve(SUnits.size());
   while (!AvailableQueue->empty()) {
     bool Delayed = false;
-    DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
+    LRegsMap.clear();
     SUnit *CurSU = AvailableQueue->pop();
     while (CurSU) {
       if (CurSU->CycleBound <= CurCycle) {
@@ -1020,7 +1029,7 @@
         SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
         if (!NewDef) {
           // Issue expensive cross register class copies.
-          MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
+          MVT VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
           const TargetRegisterClass *RC =
             TRI->getPhysicalRegisterRegClass(Reg, VT);
           const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
@@ -1172,6 +1181,7 @@
   // While Available queue is not empty, grab the node with the highest
   // priority. If it is not ready put it back.  Schedule the node.
   std::vector<SUnit*> NotReady;
+  Sequence.reserve(SUnits.size());
   while (!AvailableQueue->empty()) {
     SUnit *CurSU = AvailableQueue->pop();
     while (CurSU && CurSU->CycleBound > CurCycle) {
@@ -1249,6 +1259,15 @@
     bool operator()(const SUnit* left, const SUnit* right) const;
   };
 
+  struct bu_ls_rr_fast_sort : public std::binary_function<SUnit*, SUnit*, bool>{
+    RegReductionPriorityQueue<bu_ls_rr_fast_sort> *SPQ;
+    bu_ls_rr_fast_sort(RegReductionPriorityQueue<bu_ls_rr_fast_sort> *spq)
+      : SPQ(spq) {}
+    bu_ls_rr_fast_sort(const bu_ls_rr_fast_sort &RHS) : SPQ(RHS.SPQ) {}
+    
+    bool operator()(const SUnit* left, const SUnit* right) const;
+  };
+
   struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
     RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
     td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
@@ -1264,18 +1283,88 @@
     N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
 }
 
+/// CalcNodeBUSethiUllmanNumber - Compute Sethi Ullman number for bottom up
+/// scheduling. Smaller number is the higher priority.
+static unsigned
+CalcNodeBUSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
+  unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
+  if (SethiUllmanNumber != 0)
+    return SethiUllmanNumber;
+
+  unsigned Extra = 0;
+  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+       I != E; ++I) {
+    if (I->isCtrl) continue;  // ignore chain preds
+    SUnit *PredSU = I->Dep;
+    unsigned PredSethiUllman = CalcNodeBUSethiUllmanNumber(PredSU, SUNumbers);
+    if (PredSethiUllman > SethiUllmanNumber) {
+      SethiUllmanNumber = PredSethiUllman;
+      Extra = 0;
+    } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
+      ++Extra;
+  }
+
+  SethiUllmanNumber += Extra;
+
+  if (SethiUllmanNumber == 0)
+    SethiUllmanNumber = 1;
+  
+  return SethiUllmanNumber;
+}
+
+/// CalcNodeTDSethiUllmanNumber - Compute Sethi Ullman number for top down
+/// scheduling. Smaller number is the higher priority.
+static unsigned
+CalcNodeTDSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
+  unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
+  if (SethiUllmanNumber != 0)
+    return SethiUllmanNumber;
+
+  unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
+  if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
+    SethiUllmanNumber = 0xffff;
+  else if (SU->NumSuccsLeft == 0)
+    // If SU does not have a use, i.e. it doesn't produce a value that would
+    // be consumed (e.g. store), then it terminates a chain of computation.
+    // Give it a small SethiUllman number so it will be scheduled right before
+    // its predecessors that it doesn't lengthen their live ranges.
+    SethiUllmanNumber = 0;
+  else if (SU->NumPredsLeft == 0 &&
+           (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
+    SethiUllmanNumber = 0xffff;
+  else {
+    int Extra = 0;
+    for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+         I != E; ++I) {
+      if (I->isCtrl) continue;  // ignore chain preds
+      SUnit *PredSU = I->Dep;
+      unsigned PredSethiUllman = CalcNodeTDSethiUllmanNumber(PredSU, SUNumbers);
+      if (PredSethiUllman > SethiUllmanNumber) {
+        SethiUllmanNumber = PredSethiUllman;
+        Extra = 0;
+      } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
+        ++Extra;
+    }
+
+    SethiUllmanNumber += Extra;
+  }
+  
+  return SethiUllmanNumber;
+}
+
+
 namespace {
   template<class SF>
   class VISIBILITY_HIDDEN RegReductionPriorityQueue
    : public SchedulingPriorityQueue {
-    std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
+    PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
+    unsigned currentQueueId;
 
   public:
     RegReductionPriorityQueue() :
-    Queue(SF(this)) {}
+    Queue(SF(this)), currentQueueId(0) {}
     
-    virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
-                           std::vector<SUnit> &sunits) {}
+    virtual void initNodes(std::vector<SUnit> &sunits) {}
 
     virtual void addNode(const SUnit *SU) {}
 
@@ -1292,48 +1381,34 @@
     bool empty() const { return Queue.empty(); }
     
     void push(SUnit *U) {
+      assert(!U->NodeQueueId && "Node in the queue already");
+      U->NodeQueueId = ++currentQueueId;
       Queue.push(U);
     }
+
     void push_all(const std::vector<SUnit *> &Nodes) {
       for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
-        Queue.push(Nodes[i]);
+        push(Nodes[i]);
     }
     
     SUnit *pop() {
       if (empty()) return NULL;
       SUnit *V = Queue.top();
       Queue.pop();
+      V->NodeQueueId = 0;
       return V;
     }
 
-    /// remove - This is a really inefficient way to remove a node from a
-    /// priority queue.  We should roll our own heap to make this better or
-    /// something.
     void remove(SUnit *SU) {
-      std::vector<SUnit*> Temp;
-      
-      assert(!Queue.empty() && "Not in queue!");
-      while (Queue.top() != SU) {
-        Temp.push_back(Queue.top());
-        Queue.pop();
-        assert(!Queue.empty() && "Not in queue!");
-      }
-
-      // Remove the node from the PQ.
-      Queue.pop();
-      
-      // Add all the other nodes back.
-      for (unsigned i = 0, e = Temp.size(); i != e; ++i)
-        Queue.push(Temp[i]);
+      assert(!Queue.empty() && "Queue is empty!");
+      assert(SU->NodeQueueId != 0 && "Not in queue!");
+      Queue.erase_one(SU);
+      SU->NodeQueueId = 0;
     }
   };
 
-  template<class SF>
   class VISIBILITY_HIDDEN BURegReductionPriorityQueue
-   : public RegReductionPriorityQueue<SF> {
-    // SUnitMap SDNode to SUnit mapping (n -> n).
-    DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
-
+   : public RegReductionPriorityQueue<bu_ls_rr_sort> {
     // SUnits - The SUnits for the current graph.
     const std::vector<SUnit> *SUnits;
     
@@ -1343,14 +1418,13 @@
     const TargetInstrInfo *TII;
     const TargetRegisterInfo *TRI;
     ScheduleDAGRRList *scheduleDAG;
+
   public:
     explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii,
                                          const TargetRegisterInfo *tri)
       : TII(tii), TRI(tri), scheduleDAG(NULL) {}
 
-    void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
-                   std::vector<SUnit> &sunits) {
-      SUnitMap = &sumap;
+    void initNodes(std::vector<SUnit> &sunits) {
       SUnits = &sunits;
       // Add pseudo dependency edges for two-address nodes.
       AddPseudoTwoAddrDeps();
@@ -1359,13 +1433,15 @@
     }
 
     void addNode(const SUnit *SU) {
-      SethiUllmanNumbers.resize(SUnits->size(), 0);
-      CalcNodeSethiUllmanNumber(SU);
+      unsigned SUSize = SethiUllmanNumbers.size();
+      if (SUnits->size() > SUSize)
+        SethiUllmanNumbers.resize(SUSize*2, 0);
+      CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
     }
 
     void updateNode(const SUnit *SU) {
       SethiUllmanNumbers[SU->NodeNum] = 0;
-      CalcNodeSethiUllmanNumber(SU);
+      CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
     }
 
     void releaseState() {
@@ -1412,16 +1488,53 @@
     bool canClobber(const SUnit *SU, const SUnit *Op);
     void AddPseudoTwoAddrDeps();
     void CalculateSethiUllmanNumbers();
-    unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
   };
 
 
-  template<class SF>
-  class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
-   : public RegReductionPriorityQueue<SF> {
-    // SUnitMap SDNode to SUnit mapping (n -> n).
-    DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
+  class VISIBILITY_HIDDEN BURegReductionFastPriorityQueue
+   : public RegReductionPriorityQueue<bu_ls_rr_fast_sort> {
+    // SUnits - The SUnits for the current graph.
+    const std::vector<SUnit> *SUnits;
+    
+    // SethiUllmanNumbers - The SethiUllman number for each node.
+    std::vector<unsigned> SethiUllmanNumbers;
+  public:
+    explicit BURegReductionFastPriorityQueue() {}
+
+    void initNodes(std::vector<SUnit> &sunits) {
+      SUnits = &sunits;
+      // Calculate node priorities.
+      CalculateSethiUllmanNumbers();
+    }
 
+    void addNode(const SUnit *SU) {
+      unsigned SUSize = SethiUllmanNumbers.size();
+      if (SUnits->size() > SUSize)
+        SethiUllmanNumbers.resize(SUSize*2, 0);
+      CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
+    }
+
+    void updateNode(const SUnit *SU) {
+      SethiUllmanNumbers[SU->NodeNum] = 0;
+      CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
+    }
+
+    void releaseState() {
+      SUnits = 0;
+      SethiUllmanNumbers.clear();
+    }
+
+    unsigned getNodePriority(const SUnit *SU) const {
+      return SethiUllmanNumbers[SU->NodeNum];
+    }
+
+  private:
+    void CalculateSethiUllmanNumbers();
+  };
+
+
+  class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
+   : public RegReductionPriorityQueue<td_ls_rr_sort> {
     // SUnits - The SUnits for the current graph.
     const std::vector<SUnit> *SUnits;
     
@@ -1431,22 +1544,22 @@
   public:
     TDRegReductionPriorityQueue() {}
 
-    void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
-                   std::vector<SUnit> &sunits) {
-      SUnitMap = &sumap;
+    void initNodes(std::vector<SUnit> &sunits) {
       SUnits = &sunits;
       // Calculate node priorities.
       CalculateSethiUllmanNumbers();
     }
 
     void addNode(const SUnit *SU) {
-      SethiUllmanNumbers.resize(SUnits->size(), 0);
-      CalcNodeSethiUllmanNumber(SU);
+      unsigned SUSize = SethiUllmanNumbers.size();
+      if (SUnits->size() > SUSize)
+        SethiUllmanNumbers.resize(SUSize*2, 0);
+      CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
     }
 
     void updateNode(const SUnit *SU) {
       SethiUllmanNumbers[SU->NodeNum] = 0;
-      CalcNodeSethiUllmanNumber(SU);
+      CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
     }
 
     void releaseState() {
@@ -1461,7 +1574,6 @@
 
   private:
     void CalculateSethiUllmanNumbers();
-    unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
   };
 }
 
@@ -1504,19 +1616,6 @@
 
 // Bottom up
 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
-  // There used to be a special tie breaker here that looked for
-  // two-address instructions and preferred the instruction with a
-  // def&use operand.  The special case triggered diagnostics when
-  // _GLIBCXX_DEBUG was enabled because it broke the strict weak
-  // ordering that priority_queue requires. It didn't help much anyway
-  // because AddPseudoTwoAddrDeps already covers many of the cases
-  // where it would have applied.  In addition, it's counter-intuitive
-  // that a tie breaker would be the first thing attempted.  There's a
-  // "real" tie breaker below that is the operation of last resort.
-  // The fact that the "special tie breaker" would trigger when there
-  // wasn't otherwise a tie is what broke the strict weak ordering
-  // constraint.
-
   unsigned LPriority = SPQ->getNodePriority(left);
   unsigned RPriority = SPQ->getNodePriority(right);
   if (LPriority != RPriority)
@@ -1562,12 +1661,24 @@
   if (left->CycleBound != right->CycleBound)
     return left->CycleBound > right->CycleBound;
 
-  // FIXME: No strict ordering.
-  return false;
+  assert(left->NodeQueueId && right->NodeQueueId && 
+         "NodeQueueId cannot be zero");
+  return (left->NodeQueueId > right->NodeQueueId);
+}
+
+bool
+bu_ls_rr_fast_sort::operator()(const SUnit *left, const SUnit *right) const {
+  unsigned LPriority = SPQ->getNodePriority(left);
+  unsigned RPriority = SPQ->getNodePriority(right);
+  if (LPriority != RPriority)
+    return LPriority > RPriority;
+  assert(left->NodeQueueId && right->NodeQueueId && 
+         "NodeQueueId cannot be zero");
+  return (left->NodeQueueId > right->NodeQueueId);
 }
 
-template<class SF> bool
-BURegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
+bool
+BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) {
   if (SU->isTwoAddress) {
     unsigned Opc = SU->Node->getTargetOpcode();
     const TargetInstrDesc &TID = TII->get(Opc);
@@ -1576,8 +1687,8 @@
     for (unsigned i = 0; i != NumOps; ++i) {
       if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
         SDNode *DU = SU->Node->getOperand(i).Val;
-        if ((*SUnitMap).find(DU) != (*SUnitMap).end() &&
-            Op == (*SUnitMap)[DU][SU->InstanceNo])
+        if (DU->getNodeId() != -1 &&
+            Op->OrigNode == &(*SUnits)[DU->getNodeId()])
           return true;
       }
     }
@@ -1600,21 +1711,20 @@
 }
 
 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
-/// physical register def.
+/// physical register defs.
 static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU,
                                   const TargetInstrInfo *TII,
                                   const TargetRegisterInfo *TRI) {
   SDNode *N = SuccSU->Node;
   unsigned NumDefs = TII->get(N->getTargetOpcode()).getNumDefs();
   const unsigned *ImpDefs = TII->get(N->getTargetOpcode()).getImplicitDefs();
-  if (!ImpDefs)
-    return false;
+  assert(ImpDefs && "Caller should check hasPhysRegDefs");
   const unsigned *SUImpDefs =
     TII->get(SU->Node->getTargetOpcode()).getImplicitDefs();
   if (!SUImpDefs)
     return false;
   for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
-    MVT::ValueType VT = N->getValueType(i);
+    MVT VT = N->getValueType(i);
     if (VT == MVT::Flag || VT == MVT::Other)
       continue;
     unsigned Reg = ImpDefs[i - NumDefs];
@@ -1634,8 +1744,7 @@
 /// one that has a CopyToReg use (more likely to be a loop induction update).
 /// If both are two-address, but one is commutable while the other is not
 /// commutable, favor the one that's not commutable.
-template<class SF>
-void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
+void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
   for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
     SUnit *SU = (SUnit *)&((*SUnits)[i]);
     if (!SU->isTwoAddress)
@@ -1652,12 +1761,12 @@
     for (unsigned j = 0; j != NumOps; ++j) {
       if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) {
         SDNode *DU = SU->Node->getOperand(j).Val;
-        if ((*SUnitMap).find(DU) == (*SUnitMap).end())
+        if (DU->getNodeId() == -1)
           continue;
-        SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo];
+        const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
         if (!DUSU) continue;
-        for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end();
-             I != E; ++I) {
+        for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
+             E = DUSU->Succs.end(); I != E; ++I) {
           if (I->isCtrl) continue;
           SUnit *SuccSU = I->Dep;
           if (SuccSU == SU)
@@ -1694,44 +1803,19 @@
   }
 }
 
-/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. 
-/// Smaller number is the higher priority.
-template<class SF>
-unsigned BURegReductionPriorityQueue<SF>::
-CalcNodeSethiUllmanNumber(const SUnit *SU) {
-  unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
-  if (SethiUllmanNumber != 0)
-    return SethiUllmanNumber;
-
-  unsigned Extra = 0;
-  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
-       I != E; ++I) {
-    if (I->isCtrl) continue;  // ignore chain preds
-    SUnit *PredSU = I->Dep;
-    unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
-    if (PredSethiUllman > SethiUllmanNumber) {
-      SethiUllmanNumber = PredSethiUllman;
-      Extra = 0;
-    } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
-      ++Extra;
-  }
-
-  SethiUllmanNumber += Extra;
-
-  if (SethiUllmanNumber == 0)
-    SethiUllmanNumber = 1;
-  
-  return SethiUllmanNumber;
-}
-
 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
 /// scheduling units.
-template<class SF>
-void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
+void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
+  SethiUllmanNumbers.assign(SUnits->size(), 0);
+  
+  for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
+    CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
+}
+void BURegReductionFastPriorityQueue::CalculateSethiUllmanNumbers() {
   SethiUllmanNumbers.assign(SUnits->size(), 0);
   
   for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
-    CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
+    CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
 }
 
 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
@@ -1792,59 +1876,18 @@
   if (left->CycleBound != right->CycleBound)
     return left->CycleBound > right->CycleBound;
 
-  // FIXME: No strict ordering.
-  return false;
-}
-
-/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. 
-/// Smaller number is the higher priority.
-template<class SF>
-unsigned TDRegReductionPriorityQueue<SF>::
-CalcNodeSethiUllmanNumber(const SUnit *SU) {
-  unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
-  if (SethiUllmanNumber != 0)
-    return SethiUllmanNumber;
-
-  unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
-  if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
-    SethiUllmanNumber = 0xffff;
-  else if (SU->NumSuccsLeft == 0)
-    // If SU does not have a use, i.e. it doesn't produce a value that would
-    // be consumed (e.g. store), then it terminates a chain of computation.
-    // Give it a small SethiUllman number so it will be scheduled right before
-    // its predecessors that it doesn't lengthen their live ranges.
-    SethiUllmanNumber = 0;
-  else if (SU->NumPredsLeft == 0 &&
-           (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
-    SethiUllmanNumber = 0xffff;
-  else {
-    int Extra = 0;
-    for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
-         I != E; ++I) {
-      if (I->isCtrl) continue;  // ignore chain preds
-      SUnit *PredSU = I->Dep;
-      unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
-      if (PredSethiUllman > SethiUllmanNumber) {
-        SethiUllmanNumber = PredSethiUllman;
-        Extra = 0;
-      } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
-        ++Extra;
-    }
-
-    SethiUllmanNumber += Extra;
-  }
-  
-  return SethiUllmanNumber;
+  assert(left->NodeQueueId && right->NodeQueueId && 
+         "NodeQueueId cannot be zero");
+  return (left->NodeQueueId > right->NodeQueueId);
 }
 
 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
 /// scheduling units.
-template<class SF>
-void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
+void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
   SethiUllmanNumbers.assign(SUnits->size(), 0);
   
   for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
-    CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
+    CalcNodeTDSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
 }
 
 //===----------------------------------------------------------------------===//
@@ -1853,23 +1896,27 @@
 
 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
                                                     SelectionDAG *DAG,
-                                                    MachineBasicBlock *BB) {
+                                                    MachineBasicBlock *BB,
+                                                    bool Fast) {
+  if (Fast)
+    return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, true,
+                                 new BURegReductionFastPriorityQueue());
+
   const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
   const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo();
   
-  BURegReductionPriorityQueue<bu_ls_rr_sort> *priorityQueue = 
-    new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII, TRI);
+  BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
 
-  ScheduleDAGRRList * scheduleDAG = 
-    new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, priorityQueue);
-  priorityQueue->setScheduleDAG(scheduleDAG);
-  return scheduleDAG;  
+  ScheduleDAGRRList *SD =
+    new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(),true,false, PQ);
+  PQ->setScheduleDAG(SD);
+  return SD;  
 }
 
 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
                                                     SelectionDAG *DAG,
-                                                    MachineBasicBlock *BB) {
-  return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false,
-                              new TDRegReductionPriorityQueue<td_ls_rr_sort>());
+                                                    MachineBasicBlock *BB,
+                                                    bool Fast) {
+  return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, Fast,
+                               new TDRegReductionPriorityQueue());
 }
-

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Sun Jul  6 15:45:41 2008
@@ -12,6 +12,7 @@
 //===----------------------------------------------------------------------===//
 #include "llvm/CodeGen/SelectionDAG.h"
 #include "llvm/Constants.h"
+#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/GlobalAlias.h"
 #include "llvm/GlobalVariable.h"
 #include "llvm/Intrinsics.h"
@@ -40,13 +41,13 @@
 
 /// makeVTList - Return an instance of the SDVTList struct initialized with the
 /// specified members.
-static SDVTList makeVTList(const MVT::ValueType *VTs, unsigned NumVTs) {
+static SDVTList makeVTList(const MVT *VTs, unsigned NumVTs) {
   SDVTList Res = {VTs, NumVTs};
   return Res;
 }
 
-static const fltSemantics *MVTToAPFloatSemantics(MVT::ValueType VT) {
-  switch (VT) {
+static const fltSemantics *MVTToAPFloatSemantics(MVT VT) {
+  switch (VT.getSimpleVT()) {
   default: assert(0 && "Unknown FP format");
   case MVT::f32:     return &APFloat::IEEEsingle;
   case MVT::f64:     return &APFloat::IEEEdouble;
@@ -70,9 +71,9 @@
   return Value.bitwiseIsEqual(V);
 }
 
-bool ConstantFPSDNode::isValueValidForType(MVT::ValueType VT, 
+bool ConstantFPSDNode::isValueValidForType(MVT VT,
                                            const APFloat& Val) {
-  assert(MVT::isFloatingPoint(VT) && "Can only convert between FP types");
+  assert(VT.isFloatingPoint() && "Can only convert between FP types");
   
   // PPC long double cannot be converted to any other type.
   if (VT == MVT::ppcf128 ||
@@ -191,19 +192,15 @@
 
 
 /// isDebugLabel - Return true if the specified node represents a debug
-/// label (i.e. ISD::LABEL or TargetInstrInfo::LABEL node and third operand
-/// is 0).
+/// label (i.e. ISD::DBG_LABEL or TargetInstrInfo::DBG_LABEL node).
 bool ISD::isDebugLabel(const SDNode *N) {
   SDOperand Zero;
-  if (N->getOpcode() == ISD::LABEL)
-    Zero = N->getOperand(2);
-  else if (N->isTargetOpcode() &&
-           N->getTargetOpcode() == TargetInstrInfo::LABEL)
-    // Chain moved to last operand.
-    Zero = N->getOperand(1);
-  else
-    return false;
-  return isa<ConstantSDNode>(Zero) && cast<ConstantSDNode>(Zero)->isNullValue();
+  if (N->getOpcode() == ISD::DBG_LABEL)
+    return true;
+  if (N->isTargetOpcode() &&
+      N->getTargetOpcode() == TargetInstrInfo::DBG_LABEL)
+    return true;
+  return false;
 }
 
 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
@@ -293,6 +290,7 @@
     switch (Result) {
     default: break;
     case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT
+    case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E
     case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE
     case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE
     case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE
@@ -318,7 +316,7 @@
 
 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
 /// solely with their pointer.
-void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
+static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
   ID.AddPointer(VTList.VTs);  
 }
 
@@ -380,6 +378,17 @@
   case ISD::Register:
     ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
     break;
+  case ISD::DBG_STOPPOINT: {
+    const DbgStopPointSDNode *DSP = cast<DbgStopPointSDNode>(N);
+    ID.AddInteger(DSP->getLine());
+    ID.AddInteger(DSP->getColumn());
+    ID.AddPointer(DSP->getCompileUnit());
+    break;
+  }
+  case ISD::DBG_LABEL:
+  case ISD::EH_LABEL:
+    ID.AddInteger(cast<LabelSDNode>(N)->getLabelID());
+    break;
   case ISD::SRCVALUE:
     ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
     break;
@@ -415,7 +424,7 @@
     LoadSDNode *LD = cast<LoadSDNode>(N);
     ID.AddInteger(LD->getAddressingMode());
     ID.AddInteger(LD->getExtensionType());
-    ID.AddInteger((unsigned int)(LD->getMemoryVT()));
+    ID.AddInteger(LD->getMemoryVT().getRawBits());
     ID.AddInteger(LD->getAlignment());
     ID.AddInteger(LD->isVolatile());
     break;
@@ -424,12 +433,29 @@
     StoreSDNode *ST = cast<StoreSDNode>(N);
     ID.AddInteger(ST->getAddressingMode());
     ID.AddInteger(ST->isTruncatingStore());
-    ID.AddInteger((unsigned int)(ST->getMemoryVT()));
+    ID.AddInteger(ST->getMemoryVT().getRawBits());
     ID.AddInteger(ST->getAlignment());
     ID.AddInteger(ST->isVolatile());
     break;
   }
+  case ISD::ATOMIC_CMP_SWAP:
+  case ISD::ATOMIC_LOAD_ADD:
+  case ISD::ATOMIC_SWAP:
+  case ISD::ATOMIC_LOAD_SUB:
+  case ISD::ATOMIC_LOAD_AND:
+  case ISD::ATOMIC_LOAD_OR:
+  case ISD::ATOMIC_LOAD_XOR:
+  case ISD::ATOMIC_LOAD_NAND:
+  case ISD::ATOMIC_LOAD_MIN:
+  case ISD::ATOMIC_LOAD_MAX:
+  case ISD::ATOMIC_LOAD_UMIN:
+  case ISD::ATOMIC_LOAD_UMAX: {
+    AtomicSDNode *AT = cast<AtomicSDNode>(N);
+    ID.AddInteger(AT->getAlignment());
+    ID.AddInteger(AT->isVolatile());
+    break;
   }
+  } // end switch (N->getOpcode())
 }
 
 //===----------------------------------------------------------------------===//
@@ -494,20 +520,23 @@
     DeadNodes.pop_back();
     
     if (UpdateListener)
-      UpdateListener->NodeDeleted(N);
+      UpdateListener->NodeDeleted(N, 0);
     
     // Take the node out of the appropriate CSE map.
     RemoveNodeFromCSEMaps(N);
 
     // Next, brutally remove the operand list.  This is safe to do, as there are
     // no cycles in the graph.
+    unsigned op_num = 0;
     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
       SDNode *Operand = I->getVal();
-      Operand->removeUser(std::distance(N->op_begin(), I), N);
+      Operand->removeUser(op_num, N);
       
       // Now that we removed this operand, see if there are no uses of it left.
       if (Operand->use_empty())
         DeadNodes.push_back(Operand);
+      
+      op_num++;
     }
     if (N->OperandsNeedDelete) {
       delete[] N->OperandList;
@@ -556,9 +585,6 @@
   bool Erased = false;
   switch (N->getOpcode()) {
   case ISD::HANDLENODE: return;  // noop.
-  case ISD::STRING:
-    Erased = StringNodes.erase(cast<StringSDNode>(N)->getValue());
-    break;
   case ISD::CONDCODE:
     assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
            "Cond code doesn't exist!");
@@ -573,12 +599,12 @@
       TargetExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
     break;
   case ISD::VALUETYPE: {
-    MVT::ValueType VT = cast<VTSDNode>(N)->getVT();
-    if (MVT::isExtendedVT(VT)) {
+    MVT VT = cast<VTSDNode>(N)->getVT();
+    if (VT.isExtended()) {
       Erased = ExtendedValueTypeNodes.erase(VT);
     } else {
-      Erased = ValueTypeNodes[VT] != 0;
-      ValueTypeNodes[VT] = 0;
+      Erased = ValueTypeNodes[VT.getSimpleVT()] != 0;
+      ValueTypeNodes[VT.getSimpleVT()] = 0;
     }
     break;
   }
@@ -683,13 +709,13 @@
   if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
     ID.AddInteger(LD->getAddressingMode());
     ID.AddInteger(LD->getExtensionType());
-    ID.AddInteger((unsigned int)(LD->getMemoryVT()));
+    ID.AddInteger(LD->getMemoryVT().getRawBits());
     ID.AddInteger(LD->getAlignment());
     ID.AddInteger(LD->isVolatile());
   } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
     ID.AddInteger(ST->getAddressingMode());
     ID.AddInteger(ST->isTruncatingStore());
-    ID.AddInteger((unsigned int)(ST->getMemoryVT()));
+    ID.AddInteger(ST->getMemoryVT().getRawBits());
     ID.AddInteger(ST->getAlignment());
     ID.AddInteger(ST->isVolatile());
   }
@@ -711,37 +737,24 @@
   }
 }
 
-SDOperand SelectionDAG::getZeroExtendInReg(SDOperand Op, MVT::ValueType VT) {
+SDOperand SelectionDAG::getZeroExtendInReg(SDOperand Op, MVT VT) {
   if (Op.getValueType() == VT) return Op;
   APInt Imm = APInt::getLowBitsSet(Op.getValueSizeInBits(),
-                                   MVT::getSizeInBits(VT));
+                                   VT.getSizeInBits());
   return getNode(ISD::AND, Op.getValueType(), Op,
                  getConstant(Imm, Op.getValueType()));
 }
 
-SDOperand SelectionDAG::getString(const std::string &Val) {
-  StringSDNode *&N = StringNodes[Val];
-  if (!N) {
-    N = new StringSDNode(Val);
-    AllNodes.push_back(N);
-  }
-  return SDOperand(N, 0);
-}
-
-SDOperand SelectionDAG::getConstant(uint64_t Val, MVT::ValueType VT, bool isT) {
-  MVT::ValueType EltVT =
-    MVT::isVector(VT) ? MVT::getVectorElementType(VT) : VT;
-
-  return getConstant(APInt(MVT::getSizeInBits(EltVT), Val), VT, isT);
+SDOperand SelectionDAG::getConstant(uint64_t Val, MVT VT, bool isT) {
+  MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
+  return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
 }
 
-SDOperand SelectionDAG::getConstant(const APInt &Val, MVT::ValueType VT, bool isT) {
-  assert(MVT::isInteger(VT) && "Cannot create FP integer constant!");
+SDOperand SelectionDAG::getConstant(const APInt &Val, MVT VT, bool isT) {
+  assert(VT.isInteger() && "Cannot create FP integer constant!");
 
-  MVT::ValueType EltVT =
-    MVT::isVector(VT) ? MVT::getVectorElementType(VT) : VT;
-  
-  assert(Val.getBitWidth() == MVT::getSizeInBits(EltVT) &&
+  MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
+  assert(Val.getBitWidth() == EltVT.getSizeInBits() &&
          "APInt size does not match type size!");
 
   unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
@@ -751,7 +764,7 @@
   void *IP = 0;
   SDNode *N = NULL;
   if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
-    if (!MVT::isVector(VT))
+    if (!VT.isVector())
       return SDOperand(N, 0);
   if (!N) {
     N = new ConstantSDNode(isT, Val, EltVT);
@@ -760,9 +773,9 @@
   }
 
   SDOperand Result(N, 0);
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SmallVector<SDOperand, 8> Ops;
-    Ops.assign(MVT::getVectorNumElements(VT), Result);
+    Ops.assign(VT.getVectorNumElements(), Result);
     Result = getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
   }
   return Result;
@@ -773,12 +786,11 @@
 }
 
 
-SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT::ValueType VT,
-                                      bool isTarget) {
-  assert(MVT::isFloatingPoint(VT) && "Cannot create integer FP constant!");
+SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT VT, bool isTarget) {
+  assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
                                 
-  MVT::ValueType EltVT =
-    MVT::isVector(VT) ? MVT::getVectorElementType(VT) : VT;
+  MVT EltVT =
+    VT.isVector() ? VT.getVectorElementType() : VT;
 
   // Do the map lookup using the actual bit pattern for the floating point
   // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
@@ -790,7 +802,7 @@
   void *IP = 0;
   SDNode *N = NULL;
   if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
-    if (!MVT::isVector(VT))
+    if (!VT.isVector())
       return SDOperand(N, 0);
   if (!N) {
     N = new ConstantFPSDNode(isTarget, V, EltVT);
@@ -799,18 +811,17 @@
   }
 
   SDOperand Result(N, 0);
-  if (MVT::isVector(VT)) {
+  if (VT.isVector()) {
     SmallVector<SDOperand, 8> Ops;
-    Ops.assign(MVT::getVectorNumElements(VT), Result);
+    Ops.assign(VT.getVectorNumElements(), Result);
     Result = getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
   }
   return Result;
 }
 
-SDOperand SelectionDAG::getConstantFP(double Val, MVT::ValueType VT,
-                                      bool isTarget) {
-  MVT::ValueType EltVT =
-    MVT::isVector(VT) ? MVT::getVectorElementType(VT) : VT;
+SDOperand SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) {
+  MVT EltVT =
+    VT.isVector() ? VT.getVectorElementType() : VT;
   if (EltVT==MVT::f32)
     return getConstantFP(APFloat((float)Val), VT, isTarget);
   else
@@ -818,7 +829,7 @@
 }
 
 SDOperand SelectionDAG::getGlobalAddress(const GlobalValue *GV,
-                                         MVT::ValueType VT, int Offset,
+                                         MVT VT, int Offset,
                                          bool isTargetGA) {
   unsigned Opc;
 
@@ -847,8 +858,7 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getFrameIndex(int FI, MVT::ValueType VT,
-                                      bool isTarget) {
+SDOperand SelectionDAG::getFrameIndex(int FI, MVT VT, bool isTarget) {
   unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
   FoldingSetNodeID ID;
   AddNodeIDNode(ID, Opc, getVTList(VT), (SDOperand*)0, 0);
@@ -862,7 +872,7 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getJumpTable(int JTI, MVT::ValueType VT, bool isTarget){
+SDOperand SelectionDAG::getJumpTable(int JTI, MVT VT, bool isTarget){
   unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
   FoldingSetNodeID ID;
   AddNodeIDNode(ID, Opc, getVTList(VT), (SDOperand*)0, 0);
@@ -876,7 +886,7 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getConstantPool(Constant *C, MVT::ValueType VT,
+SDOperand SelectionDAG::getConstantPool(Constant *C, MVT VT,
                                         unsigned Alignment, int Offset,
                                         bool isTarget) {
   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
@@ -895,8 +905,7 @@
 }
 
 
-SDOperand SelectionDAG::getConstantPool(MachineConstantPoolValue *C,
-                                        MVT::ValueType VT,
+SDOperand SelectionDAG::getConstantPool(MachineConstantPoolValue *C, MVT VT,
                                         unsigned Alignment, int Offset,
                                         bool isTarget) {
   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
@@ -941,12 +950,12 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getValueType(MVT::ValueType VT) {
-  if (!MVT::isExtendedVT(VT) && (unsigned)VT >= ValueTypeNodes.size())
-    ValueTypeNodes.resize(VT+1);
+SDOperand SelectionDAG::getValueType(MVT VT) {
+  if (VT.isSimple() && (unsigned)VT.getSimpleVT() >= ValueTypeNodes.size())
+    ValueTypeNodes.resize(VT.getSimpleVT()+1);
 
-  SDNode *&N = MVT::isExtendedVT(VT) ?
-    ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT];
+  SDNode *&N = VT.isExtended() ?
+    ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT()];
 
   if (N) return SDOperand(N, 0);
   N = new VTSDNode(VT);
@@ -954,7 +963,7 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getExternalSymbol(const char *Sym, MVT::ValueType VT) {
+SDOperand SelectionDAG::getExternalSymbol(const char *Sym, MVT VT) {
   SDNode *&N = ExternalSymbols[Sym];
   if (N) return SDOperand(N, 0);
   N = new ExternalSymbolSDNode(false, Sym, VT);
@@ -962,8 +971,7 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getTargetExternalSymbol(const char *Sym,
-                                                MVT::ValueType VT) {
+SDOperand SelectionDAG::getTargetExternalSymbol(const char *Sym, MVT VT) {
   SDNode *&N = TargetExternalSymbols[Sym];
   if (N) return SDOperand(N, 0);
   N = new ExternalSymbolSDNode(true, Sym, VT);
@@ -974,7 +982,7 @@
 SDOperand SelectionDAG::getCondCode(ISD::CondCode Cond) {
   if ((unsigned)Cond >= CondCodeNodes.size())
     CondCodeNodes.resize(Cond+1);
-  
+
   if (CondCodeNodes[Cond] == 0) {
     CondCodeNodes[Cond] = new CondCodeSDNode(Cond);
     AllNodes.push_back(CondCodeNodes[Cond]);
@@ -982,7 +990,7 @@
   return SDOperand(CondCodeNodes[Cond], 0);
 }
 
-SDOperand SelectionDAG::getRegister(unsigned RegNo, MVT::ValueType VT) {
+SDOperand SelectionDAG::getRegister(unsigned RegNo, MVT VT) {
   FoldingSetNodeID ID;
   AddNodeIDNode(ID, ISD::Register, getVTList(VT), (SDOperand*)0, 0);
   ID.AddInteger(RegNo);
@@ -995,6 +1003,40 @@
   return SDOperand(N, 0);
 }
 
+SDOperand SelectionDAG::getDbgStopPoint(SDOperand Root,
+                                        unsigned Line, unsigned Col,
+                                        const CompileUnitDesc *CU) {
+  FoldingSetNodeID ID;
+  SDOperand Ops[] = { Root };
+  AddNodeIDNode(ID, ISD::DBG_STOPPOINT, getVTList(MVT::Other), &Ops[0], 1);
+  ID.AddInteger(Line);
+  ID.AddInteger(Col);
+  ID.AddPointer(CU);
+  void *IP = 0;
+  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+    return SDOperand(E, 0);
+  SDNode *N = new DbgStopPointSDNode(Root, Line, Col, CU);
+  CSEMap.InsertNode(N, IP);
+  AllNodes.push_back(N);
+  return SDOperand(N, 0);
+}
+
+SDOperand SelectionDAG::getLabel(unsigned Opcode,
+                                 SDOperand Root,
+                                 unsigned LabelID) {
+  FoldingSetNodeID ID;
+  SDOperand Ops[] = { Root };
+  AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), &Ops[0], 1);
+  ID.AddInteger(LabelID);
+  void *IP = 0;
+  if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+    return SDOperand(E, 0);
+  SDNode *N = new LabelSDNode(Opcode, Root, LabelID);
+  CSEMap.InsertNode(N, IP);
+  AllNodes.push_back(N);
+  return SDOperand(N, 0);
+}
+
 SDOperand SelectionDAG::getSrcValue(const Value *V) {
   assert((!V || isa<PointerType>(V->getType())) &&
          "SrcValue is not a pointer?");
@@ -1038,17 +1080,18 @@
 
 /// CreateStackTemporary - Create a stack temporary, suitable for holding the
 /// specified value type.
-SDOperand SelectionDAG::CreateStackTemporary(MVT::ValueType VT) {
+SDOperand SelectionDAG::CreateStackTemporary(MVT VT, unsigned minAlign) {
   MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
-  unsigned ByteSize = MVT::getSizeInBits(VT)/8;
-  const Type *Ty = MVT::getTypeForValueType(VT);
-  unsigned StackAlign = (unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty);
+  unsigned ByteSize = VT.getSizeInBits()/8;
+  const Type *Ty = VT.getTypeForMVT();
+  unsigned StackAlign =
+  std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
+  
   int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign);
   return getFrameIndex(FrameIdx, TLI.getPointerTy());
 }
 
-
-SDOperand SelectionDAG::FoldSetCC(MVT::ValueType VT, SDOperand N1,
+SDOperand SelectionDAG::FoldSetCC(MVT VT, SDOperand N1,
                                   SDOperand N2, ISD::CondCode Cond) {
   // These setcc operations always fold.
   switch (Cond) {
@@ -1068,7 +1111,7 @@
   case ISD::SETUO:
   case ISD::SETUEQ:
   case ISD::SETUNE:
-    assert(!MVT::isInteger(N1.getValueType()) && "Illegal setcc for integer!");
+    assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
     break;
   }
   
@@ -1176,7 +1219,7 @@
                                      APInt &KnownZero, APInt &KnownOne,
                                      unsigned Depth) const {
   unsigned BitWidth = Mask.getBitWidth();
-  assert(BitWidth == MVT::getSizeInBits(Op.getValueType()) &&
+  assert(BitWidth == Op.getValueType().getSizeInBits() &&
          "Mask size mismatches value type size!");
 
   KnownZero = KnownOne = APInt(BitWidth, 0);   // Don't know anything.
@@ -1229,6 +1272,52 @@
     KnownZero = KnownZeroOut;
     return;
   }
+  case ISD::MUL: {
+    APInt Mask2 = APInt::getAllOnesValue(BitWidth);
+    ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero, KnownOne, Depth+1);
+    ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
+    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
+    // If low bits are zero in either operand, output low known-0 bits.
+    // Also compute a conserative estimate for high known-0 bits.
+    // More trickiness is possible, but this is sufficient for the
+    // interesting case of alignment computation.
+    KnownOne.clear();
+    unsigned TrailZ = KnownZero.countTrailingOnes() +
+                      KnownZero2.countTrailingOnes();
+    unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
+                               KnownZero2.countLeadingOnes(),
+                               BitWidth) - BitWidth;
+
+    TrailZ = std::min(TrailZ, BitWidth);
+    LeadZ = std::min(LeadZ, BitWidth);
+    KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
+                APInt::getHighBitsSet(BitWidth, LeadZ);
+    KnownZero &= Mask;
+    return;
+  }
+  case ISD::UDIV: {
+    // For the purposes of computing leading zeros we can conservatively
+    // treat a udiv as a logical right shift by the power of 2 known to
+    // be less than the denominator.
+    APInt AllOnes = APInt::getAllOnesValue(BitWidth);
+    ComputeMaskedBits(Op.getOperand(0),
+                      AllOnes, KnownZero2, KnownOne2, Depth+1);
+    unsigned LeadZ = KnownZero2.countLeadingOnes();
+
+    KnownOne2.clear();
+    KnownZero2.clear();
+    ComputeMaskedBits(Op.getOperand(1),
+                      AllOnes, KnownZero2, KnownOne2, Depth+1);
+    unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
+    if (RHSUnknownLeadingOnes != BitWidth)
+      LeadZ = std::min(BitWidth,
+                       LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
+
+    KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
+    return;
+  }
   case ISD::SELECT:
     ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1);
     ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1);
@@ -1325,8 +1414,8 @@
     }
     return;
   case ISD::SIGN_EXTEND_INREG: {
-    MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
-    unsigned EBits = MVT::getSizeInBits(EVT);
+    MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+    unsigned EBits = EVT.getSizeInBits();
     
     // Sign extension.  Compute the demanded bits in the result that are not 
     // present in the input.
@@ -1364,21 +1453,21 @@
   case ISD::CTPOP: {
     unsigned LowBits = Log2_32(BitWidth)+1;
     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
-    KnownOne  = APInt(BitWidth, 0);
+    KnownOne.clear();
     return;
   }
   case ISD::LOAD: {
     if (ISD::isZEXTLoad(Op.Val)) {
       LoadSDNode *LD = cast<LoadSDNode>(Op);
-      MVT::ValueType VT = LD->getMemoryVT();
-      unsigned MemBits = MVT::getSizeInBits(VT);
+      MVT VT = LD->getMemoryVT();
+      unsigned MemBits = VT.getSizeInBits();
       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits) & Mask;
     }
     return;
   }
   case ISD::ZERO_EXTEND: {
-    MVT::ValueType InVT = Op.getOperand(0).getValueType();
-    unsigned InBits = MVT::getSizeInBits(InVT);
+    MVT InVT = Op.getOperand(0).getValueType();
+    unsigned InBits = InVT.getSizeInBits();
     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
     APInt InMask    = Mask;
     InMask.trunc(InBits);
@@ -1391,8 +1480,8 @@
     return;
   }
   case ISD::SIGN_EXTEND: {
-    MVT::ValueType InVT = Op.getOperand(0).getValueType();
-    unsigned InBits = MVT::getSizeInBits(InVT);
+    MVT InVT = Op.getOperand(0).getValueType();
+    unsigned InBits = InVT.getSizeInBits();
     APInt InSignBit = APInt::getSignBit(InBits);
     APInt NewBits   = APInt::getHighBitsSet(BitWidth, BitWidth - InBits) & Mask;
     APInt InMask = Mask;
@@ -1432,8 +1521,8 @@
     return;
   }
   case ISD::ANY_EXTEND: {
-    MVT::ValueType InVT = Op.getOperand(0).getValueType();
-    unsigned InBits = MVT::getSizeInBits(InVT);
+    MVT InVT = Op.getOperand(0).getValueType();
+    unsigned InBits = InVT.getSizeInBits();
     APInt InMask = Mask;
     InMask.trunc(InBits);
     KnownZero.trunc(InBits);
@@ -1444,8 +1533,8 @@
     return;
   }
   case ISD::TRUNCATE: {
-    MVT::ValueType InVT = Op.getOperand(0).getValueType();
-    unsigned InBits = MVT::getSizeInBits(InVT);
+    MVT InVT = Op.getOperand(0).getValueType();
+    unsigned InBits = InVT.getSizeInBits();
     APInt InMask = Mask;
     InMask.zext(InBits);
     KnownZero.zext(InBits);
@@ -1457,8 +1546,8 @@
     break;
   }
   case ISD::AssertZext: {
-    MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
-    APInt InMask = APInt::getLowBitsSet(BitWidth, MVT::getSizeInBits(VT));
+    MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+    APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
     ComputeMaskedBits(Op.getOperand(0), Mask & InMask, KnownZero, 
                       KnownOne, Depth+1);
     KnownZero |= (~InMask) & Mask;
@@ -1469,48 +1558,95 @@
     KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
     return;
   
+  case ISD::SUB: {
+    if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
+      // We know that the top bits of C-X are clear if X contains less bits
+      // than C (i.e. no wrap-around can happen).  For example, 20-X is
+      // positive if we can prove that X is >= 0 and < 16.
+      if (CLHS->getAPIntValue().isNonNegative()) {
+        unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
+        // NLZ can't be BitWidth with no sign bit
+        APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
+        ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero2, KnownOne2,
+                          Depth+1);
+
+        // If all of the MaskV bits are known to be zero, then we know the
+        // output top bits are zero, because we now know that the output is
+        // from [0-C].
+        if ((KnownZero2 & MaskV) == MaskV) {
+          unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
+          // Top bits known zero.
+          KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
+        }
+      }
+    }
+  }
+  // fall through
   case ISD::ADD: {
-    // If either the LHS or the RHS are Zero, the result is zero.
-    ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1);
-    ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1);
-    assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 
-    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 
-    
     // Output known-0 bits are known if clear or set in both the low clear bits
     // common to both LHS & RHS.  For example, 8+(X<<3) is known to have the
     // low 3 bits clear.
-    unsigned KnownZeroOut = std::min(KnownZero.countTrailingOnes(), 
-                                     KnownZero2.countTrailingOnes());
-    
-    KnownZero = APInt::getLowBitsSet(BitWidth, KnownZeroOut);
-    KnownOne = APInt(BitWidth, 0);
+    APInt Mask2 = APInt::getLowBitsSet(BitWidth, Mask.countTrailingOnes());
+    ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
+    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 
+    unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
+
+    ComputeMaskedBits(Op.getOperand(1), Mask2, KnownZero2, KnownOne2, Depth+1);
+    assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 
+    KnownZeroOut = std::min(KnownZeroOut,
+                            KnownZero2.countTrailingOnes());
+
+    KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
     return;
   }
-  case ISD::SUB: {
-    ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0));
-    if (!CLHS) return;
+  case ISD::SREM:
+    if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
+      const APInt &RA = Rem->getAPIntValue();
+      if (RA.isPowerOf2() || (-RA).isPowerOf2()) {
+        APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) : ~RA;
+        APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
+        ComputeMaskedBits(Op.getOperand(0), Mask2,KnownZero2,KnownOne2,Depth+1);
+
+        // The sign of a remainder is equal to the sign of the first
+        // operand (zero being positive).
+        if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
+          KnownZero2 |= ~LowBits;
+        else if (KnownOne2[BitWidth-1])
+          KnownOne2 |= ~LowBits;
 
-    // We know that the top bits of C-X are clear if X contains less bits
-    // than C (i.e. no wrap-around can happen).  For example, 20-X is
-    // positive if we can prove that X is >= 0 and < 16.
-    if (CLHS->getAPIntValue().isNonNegative()) {
-      unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
-      // NLZ can't be BitWidth with no sign bit
-      APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
-      ComputeMaskedBits(Op.getOperand(1), MaskV, KnownZero, KnownOne, Depth+1);
-
-      // If all of the MaskV bits are known to be zero, then we know the output
-      // top bits are zero, because we now know that the output is from [0-C].
-      if ((KnownZero & MaskV) == MaskV) {
-        unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
-        // Top bits known zero.
-        KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
-        KnownOne = APInt(BitWidth, 0);   // No one bits known.
-      } else {
-        KnownZero = KnownOne = APInt(BitWidth, 0);  // Otherwise, nothing known.
+        KnownZero |= KnownZero2 & Mask;
+        KnownOne |= KnownOne2 & Mask;
+
+        assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
       }
     }
     return;
+  case ISD::UREM: {
+    if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
+      const APInt &RA = Rem->getAPIntValue();
+      if (RA.isPowerOf2()) {
+        APInt LowBits = (RA - 1);
+        APInt Mask2 = LowBits & Mask;
+        KnownZero |= ~LowBits & Mask;
+        ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero, KnownOne,Depth+1);
+        assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
+        break;
+      }
+    }
+
+    // Since the result is less than or equal to either operand, any leading
+    // zero bits in either operand must also exist in the result.
+    APInt AllOnes = APInt::getAllOnesValue(BitWidth);
+    ComputeMaskedBits(Op.getOperand(0), AllOnes, KnownZero, KnownOne,
+                      Depth+1);
+    ComputeMaskedBits(Op.getOperand(1), AllOnes, KnownZero2, KnownOne2,
+                      Depth+1);
+
+    uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
+                                KnownZero2.countLeadingOnes());
+    KnownOne.clear();
+    KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
+    return;
   }
   default:
     // Allow the target to implement this method for its nodes.
@@ -1530,10 +1666,11 @@
 /// information.  For example, immediately after an "SRA X, 2", we know that
 /// the top 3 bits are all equal to each other, so we return 3.
 unsigned SelectionDAG::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{
-  MVT::ValueType VT = Op.getValueType();
-  assert(MVT::isInteger(VT) && "Invalid VT!");
-  unsigned VTBits = MVT::getSizeInBits(VT);
+  MVT VT = Op.getValueType();
+  assert(VT.isInteger() && "Invalid VT!");
+  unsigned VTBits = VT.getSizeInBits();
   unsigned Tmp, Tmp2;
+  unsigned FirstAnswer = 1;
   
   if (Depth == 6)
     return 1;  // Limit search depth.
@@ -1541,10 +1678,10 @@
   switch (Op.getOpcode()) {
   default: break;
   case ISD::AssertSext:
-    Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
+    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
     return VTBits-Tmp+1;
   case ISD::AssertZext:
-    Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
+    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
     return VTBits-Tmp;
     
   case ISD::Constant: {
@@ -1558,12 +1695,12 @@
   }
     
   case ISD::SIGN_EXTEND:
-    Tmp = VTBits-MVT::getSizeInBits(Op.getOperand(0).getValueType());
+    Tmp = VTBits-Op.getOperand(0).getValueType().getSizeInBits();
     return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
     
   case ISD::SIGN_EXTEND_INREG:
     // Max of the input and what this extends.
-    Tmp = MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT());
+    Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
     Tmp = VTBits-Tmp+1;
     
     Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
@@ -1589,16 +1726,21 @@
   case ISD::AND:
   case ISD::OR:
   case ISD::XOR:    // NOT is handled here.
-    // Logical binary ops preserve the number of sign bits.
+    // Logical binary ops preserve the number of sign bits at the worst.
     Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
-    if (Tmp == 1) return 1;  // Early out.
-    Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
-    return std::min(Tmp, Tmp2);
+    if (Tmp != 1) {
+      Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
+      FirstAnswer = std::min(Tmp, Tmp2);
+      // We computed what we know about the sign bits as our first
+      // answer. Now proceed to the generic code that uses
+      // ComputeMaskedBits, and pick whichever answer is better.
+    }
+    break;
 
   case ISD::SELECT:
-    Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+    Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
     if (Tmp == 1) return 1;  // Early out.
-    Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
+    Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
     return std::min(Tmp, Tmp2);
     
   case ISD::SETCC:
@@ -1693,10 +1835,10 @@
     switch (ExtType) {
     default: break;
     case ISD::SEXTLOAD:    // '17' bits known
-      Tmp = MVT::getSizeInBits(LD->getMemoryVT());
+      Tmp = LD->getMemoryVT().getSizeInBits();
       return VTBits-Tmp+1;
     case ISD::ZEXTLOAD:    // '16' bits known
-      Tmp = MVT::getSizeInBits(LD->getMemoryVT());
+      Tmp = LD->getMemoryVT().getSizeInBits();
       return VTBits-Tmp;
     }
   }
@@ -1707,7 +1849,7 @@
       Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
       Op.getOpcode() == ISD::INTRINSIC_VOID) {
     unsigned NumBits = TLI.ComputeNumSignBitsForTargetNode(Op, Depth);
-    if (NumBits > 1) return NumBits;
+    if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
   }
   
   // Finally, if we can prove that the top bits of the result are 0's or 1's,
@@ -1722,7 +1864,7 @@
     Mask = KnownOne;
   } else {
     // Nothing known.
-    return 1;
+    return FirstAnswer;
   }
   
   // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
@@ -1731,7 +1873,7 @@
   Mask <<= Mask.getBitWidth()-VTBits;
   // Return # leading zeros.  We use 'min' here in case Val was zero before
   // shifting.  We don't want to return '64' as for an i32 "0".
-  return std::min(VTBits, Mask.countLeadingZeros());
+  return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
 }
 
 
@@ -1745,9 +1887,38 @@
 }
 
 
+/// getShuffleScalarElt - Returns the scalar element that will make up the ith
+/// element of the result of the vector shuffle.
+SDOperand SelectionDAG::getShuffleScalarElt(const SDNode *N, unsigned i) {
+  MVT VT = N->getValueType(0);
+  SDOperand PermMask = N->getOperand(2);
+  SDOperand Idx = PermMask.getOperand(i);
+  if (Idx.getOpcode() == ISD::UNDEF)
+    return getNode(ISD::UNDEF, VT.getVectorElementType());
+  unsigned Index = cast<ConstantSDNode>(Idx)->getValue();
+  unsigned NumElems = PermMask.getNumOperands();
+  SDOperand V = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1);
+  Index %= NumElems;
+
+  if (V.getOpcode() == ISD::BIT_CONVERT) {
+    V = V.getOperand(0);
+    if (V.getValueType().getVectorNumElements() != NumElems)
+      return SDOperand();
+  }
+  if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
+    return (Index == 0) ? V.getOperand(0)
+                      : getNode(ISD::UNDEF, VT.getVectorElementType());
+  if (V.getOpcode() == ISD::BUILD_VECTOR)
+    return V.getOperand(Index);
+  if (V.getOpcode() == ISD::VECTOR_SHUFFLE)
+    return getShuffleScalarElt(V.Val, Index);
+  return SDOperand();
+}
+
+
 /// getNode - Gets or creates the specified node.
 ///
-SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT) {
+SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT) {
   FoldingSetNodeID ID;
   AddNodeIDNode(ID, Opcode, getVTList(VT), (SDOperand*)0, 0);
   void *IP = 0;
@@ -1760,12 +1931,11 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT,
-                                SDOperand Operand) {
+SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand Operand) {
   // Constant fold unary operations with an integer constant operand.
   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.Val)) {
     const APInt &Val = C->getAPIntValue();
-    unsigned BitWidth = MVT::getSizeInBits(VT);
+    unsigned BitWidth = VT.getSizeInBits();
     switch (Opcode) {
     default: break;
     case ISD::SIGN_EXTEND:
@@ -1846,60 +2016,57 @@
   unsigned OpOpcode = Operand.Val->getOpcode();
   switch (Opcode) {
   case ISD::TokenFactor:
-  case ISD::MERGE_VALUES:
-    return Operand;         // Factor or merge of one node?  No need.
+    return Operand;         // Factor of one node?  No need.
   case ISD::FP_ROUND: assert(0 && "Invalid method to make FP_ROUND node");
   case ISD::FP_EXTEND:
-    assert(MVT::isFloatingPoint(VT) &&
-           MVT::isFloatingPoint(Operand.getValueType()) && "Invalid FP cast!");
+    assert(VT.isFloatingPoint() &&
+           Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
     if (Operand.getValueType() == VT) return Operand;  // noop conversion.
     if (Operand.getOpcode() == ISD::UNDEF)
       return getNode(ISD::UNDEF, VT);
     break;
   case ISD::SIGN_EXTEND:
-    assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) &&
+    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
            "Invalid SIGN_EXTEND!");
     if (Operand.getValueType() == VT) return Operand;   // noop extension
-    assert(MVT::getSizeInBits(Operand.getValueType()) < MVT::getSizeInBits(VT)
+    assert(Operand.getValueType().bitsLT(VT)
            && "Invalid sext node, dst < src!");
     if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
       return getNode(OpOpcode, VT, Operand.Val->getOperand(0));
     break;
   case ISD::ZERO_EXTEND:
-    assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) &&
+    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
            "Invalid ZERO_EXTEND!");
     if (Operand.getValueType() == VT) return Operand;   // noop extension
-    assert(MVT::getSizeInBits(Operand.getValueType()) < MVT::getSizeInBits(VT)
+    assert(Operand.getValueType().bitsLT(VT)
            && "Invalid zext node, dst < src!");
     if (OpOpcode == ISD::ZERO_EXTEND)   // (zext (zext x)) -> (zext x)
       return getNode(ISD::ZERO_EXTEND, VT, Operand.Val->getOperand(0));
     break;
   case ISD::ANY_EXTEND:
-    assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) &&
+    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
            "Invalid ANY_EXTEND!");
     if (Operand.getValueType() == VT) return Operand;   // noop extension
-    assert(MVT::getSizeInBits(Operand.getValueType()) < MVT::getSizeInBits(VT)
+    assert(Operand.getValueType().bitsLT(VT)
            && "Invalid anyext node, dst < src!");
     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND)
       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x)
       return getNode(OpOpcode, VT, Operand.Val->getOperand(0));
     break;
   case ISD::TRUNCATE:
-    assert(MVT::isInteger(VT) && MVT::isInteger(Operand.getValueType()) &&
+    assert(VT.isInteger() && Operand.getValueType().isInteger() &&
            "Invalid TRUNCATE!");
     if (Operand.getValueType() == VT) return Operand;   // noop truncate
-    assert(MVT::getSizeInBits(Operand.getValueType()) > MVT::getSizeInBits(VT)
+    assert(Operand.getValueType().bitsGT(VT)
            && "Invalid truncate node, src < dst!");
     if (OpOpcode == ISD::TRUNCATE)
       return getNode(ISD::TRUNCATE, VT, Operand.Val->getOperand(0));
     else if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
              OpOpcode == ISD::ANY_EXTEND) {
       // If the source is smaller than the dest, we still need an extend.
-      if (MVT::getSizeInBits(Operand.Val->getOperand(0).getValueType())
-          < MVT::getSizeInBits(VT))
+      if (Operand.Val->getOperand(0).getValueType().bitsLT(VT))
         return getNode(OpOpcode, VT, Operand.Val->getOperand(0));
-      else if (MVT::getSizeInBits(Operand.Val->getOperand(0).getValueType())
-               > MVT::getSizeInBits(VT))
+      else if (Operand.Val->getOperand(0).getValueType().bitsGT(VT))
         return getNode(ISD::TRUNCATE, VT, Operand.Val->getOperand(0));
       else
         return Operand.Val->getOperand(0);
@@ -1907,7 +2074,7 @@
     break;
   case ISD::BIT_CONVERT:
     // Basic sanity checking.
-    assert(MVT::getSizeInBits(VT) == MVT::getSizeInBits(Operand.getValueType())
+    assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
            && "Cannot BIT_CONVERT between types of different sizes!");
     if (VT == Operand.getValueType()) return Operand;  // noop conversion.
     if (OpOpcode == ISD::BIT_CONVERT)  // bitconv(bitconv(x)) -> bitconv(x)
@@ -1916,8 +2083,8 @@
       return getNode(ISD::UNDEF, VT);
     break;
   case ISD::SCALAR_TO_VECTOR:
-    assert(MVT::isVector(VT) && !MVT::isVector(Operand.getValueType()) &&
-           MVT::getVectorElementType(VT) == Operand.getValueType() &&
+    assert(VT.isVector() && !Operand.getValueType().isVector() &&
+           VT.getVectorElementType() == Operand.getValueType() &&
            "Illegal SCALAR_TO_VECTOR node!");
     if (OpOpcode == ISD::UNDEF)
       return getNode(ISD::UNDEF, VT);
@@ -1961,7 +2128,7 @@
 
 
 
-SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT,
+SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT,
                                 SDOperand N1, SDOperand N2) {
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
   ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.Val);
@@ -1975,7 +2142,7 @@
     if (N2.getOpcode() == ISD::EntryToken) return N1;
     break;
   case ISD::AND:
-    assert(MVT::isInteger(VT) && N1.getValueType() == N2.getValueType() &&
+    assert(VT.isInteger() && N1.getValueType() == N2.getValueType() &&
            N1.getValueType() == VT && "Binary operator types must match!");
     // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's
     // worth handling here.
@@ -1986,10 +2153,12 @@
     break;
   case ISD::OR:
   case ISD::XOR:
-    assert(MVT::isInteger(VT) && N1.getValueType() == N2.getValueType() &&
+  case ISD::ADD:
+  case ISD::SUB:
+    assert(VT.isInteger() && N1.getValueType() == N2.getValueType() &&
            N1.getValueType() == VT && "Binary operator types must match!");
-    // (X ^| 0) -> X.  This commonly occurs when legalizing i64 values, so it's
-    // worth handling here.
+    // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so
+    // it's worth handling here.
     if (N2C && N2C->isNullValue())
       return N1;
     break;
@@ -1997,10 +2166,8 @@
   case ISD::UREM:
   case ISD::MULHU:
   case ISD::MULHS:
-    assert(MVT::isInteger(VT) && "This operator does not apply to FP types!");
+    assert(VT.isInteger() && "This operator does not apply to FP types!");
     // fall through
-  case ISD::ADD:
-  case ISD::SUB:
   case ISD::MUL:
   case ISD::SDIV:
   case ISD::SREM:
@@ -2014,8 +2181,8 @@
     break;
   case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match.
     assert(N1.getValueType() == VT &&
-           MVT::isFloatingPoint(N1.getValueType()) && 
-           MVT::isFloatingPoint(N2.getValueType()) &&
+           N1.getValueType().isFloatingPoint() &&
+           N2.getValueType().isFloatingPoint() &&
            "Invalid FCOPYSIGN!");
     break;
   case ISD::SHL:
@@ -2025,49 +2192,52 @@
   case ISD::ROTR:
     assert(VT == N1.getValueType() &&
            "Shift operators return type must be the same as their first arg");
-    assert(MVT::isInteger(VT) && MVT::isInteger(N2.getValueType()) &&
-           VT != MVT::i1 && "Shifts only work on integers");
+    assert(VT.isInteger() && N2.getValueType().isInteger() &&
+           "Shifts only work on integers");
+
+    // Always fold shifts of i1 values so the code generator doesn't need to
+    // handle them.  Since we know the size of the shift has to be less than the
+    // size of the value, the shift/rotate count is guaranteed to be zero.
+    if (VT == MVT::i1)
+      return N1;
     break;
   case ISD::FP_ROUND_INREG: {
-    MVT::ValueType EVT = cast<VTSDNode>(N2)->getVT();
+    MVT EVT = cast<VTSDNode>(N2)->getVT();
     assert(VT == N1.getValueType() && "Not an inreg round!");
-    assert(MVT::isFloatingPoint(VT) && MVT::isFloatingPoint(EVT) &&
+    assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
            "Cannot FP_ROUND_INREG integer types");
-    assert(MVT::getSizeInBits(EVT) <= MVT::getSizeInBits(VT) &&
-           "Not rounding down!");
+    assert(EVT.bitsLE(VT) && "Not rounding down!");
     if (cast<VTSDNode>(N2)->getVT() == VT) return N1;  // Not actually rounding.
     break;
   }
   case ISD::FP_ROUND:
-    assert(MVT::isFloatingPoint(VT) &&
-           MVT::isFloatingPoint(N1.getValueType()) &&
-           MVT::getSizeInBits(VT) <= MVT::getSizeInBits(N1.getValueType()) &&
+    assert(VT.isFloatingPoint() &&
+           N1.getValueType().isFloatingPoint() &&
+           VT.bitsLE(N1.getValueType()) &&
            isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
     if (N1.getValueType() == VT) return N1;  // noop conversion.
     break;
   case ISD::AssertSext:
   case ISD::AssertZext: {
-    MVT::ValueType EVT = cast<VTSDNode>(N2)->getVT();
+    MVT EVT = cast<VTSDNode>(N2)->getVT();
     assert(VT == N1.getValueType() && "Not an inreg extend!");
-    assert(MVT::isInteger(VT) && MVT::isInteger(EVT) &&
+    assert(VT.isInteger() && EVT.isInteger() &&
            "Cannot *_EXTEND_INREG FP types");
-    assert(MVT::getSizeInBits(EVT) <= MVT::getSizeInBits(VT) &&
-           "Not extending!");
+    assert(EVT.bitsLE(VT) && "Not extending!");
     if (VT == EVT) return N1; // noop assertion.
     break;
   }
   case ISD::SIGN_EXTEND_INREG: {
-    MVT::ValueType EVT = cast<VTSDNode>(N2)->getVT();
+    MVT EVT = cast<VTSDNode>(N2)->getVT();
     assert(VT == N1.getValueType() && "Not an inreg extend!");
-    assert(MVT::isInteger(VT) && MVT::isInteger(EVT) &&
+    assert(VT.isInteger() && EVT.isInteger() &&
            "Cannot *_EXTEND_INREG FP types");
-    assert(MVT::getSizeInBits(EVT) <= MVT::getSizeInBits(VT) &&
-           "Not extending!");
+    assert(EVT.bitsLE(VT) && "Not extending!");
     if (EVT == VT) return N1;  // Not actually extending
 
     if (N1C) {
       APInt Val = N1C->getAPIntValue();
-      unsigned FromBits = MVT::getSizeInBits(cast<VTSDNode>(N2)->getVT());
+      unsigned FromBits = cast<VTSDNode>(N2)->getVT().getSizeInBits();
       Val <<= Val.getBitWidth()-FromBits;
       Val = Val.ashr(Val.getBitWidth()-FromBits);
       return getConstant(Val, VT);
@@ -2086,7 +2256,7 @@
     if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
         N1.getNumOperands() > 0) {
       unsigned Factor =
-        MVT::getVectorNumElements(N1.getOperand(0).getValueType());
+        N1.getOperand(0).getValueType().getVectorNumElements();
       return getNode(ISD::EXTRACT_VECTOR_ELT, VT,
                      N1.getOperand(N2C->getValue() / Factor),
                      getConstant(N2C->getValue() % Factor, N2.getValueType()));
@@ -2109,10 +2279,9 @@
     break;
   case ISD::EXTRACT_ELEMENT:
     assert(N2C && (unsigned)N2C->getValue() < 2 && "Bad EXTRACT_ELEMENT!");
-    assert(!MVT::isVector(N1.getValueType()) &&
-           MVT::isInteger(N1.getValueType()) &&
-           !MVT::isVector(VT) && MVT::isInteger(VT) &&
-           "EXTRACT_ELEMENT only applies to integers!");
+    assert(!N1.getValueType().isVector() && !VT.isVector() &&
+           (N1.getValueType().isInteger() == VT.isInteger()) &&
+           "Wrong types for EXTRACT_ELEMENT!");
 
     // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
     // 64-bit integers into 32-bit parts.  Instead of building the extract of
@@ -2122,7 +2291,7 @@
 
     // EXTRACT_ELEMENT of a constant int is also very common.
     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
-      unsigned ElementSize = MVT::getSizeInBits(VT);
+      unsigned ElementSize = VT.getSizeInBits();
       unsigned Shift = ElementSize * N2C->getValue();
       APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
       return getConstant(ShiftedVal.trunc(ElementSize), VT);
@@ -2136,7 +2305,7 @@
 
   if (N1C) {
     if (N2C) {
-      APInt C1 = N1C->getAPIntValue(), C2 = N2C->getAPIntValue();
+      const APInt &C1 = N1C->getAPIntValue(), &C2 = N2C->getAPIntValue();
       switch (Opcode) {
       case ISD::ADD: return getConstant(C1 + C2, VT);
       case ISD::SUB: return getConstant(C1 - C2, VT);
@@ -2236,7 +2405,7 @@
       case ISD::SREM:
       case ISD::SRL:
       case ISD::SHL:
-        if (!MVT::isVector(VT)) 
+        if (!VT.isVector())
           return getConstant(0, VT);    // fold op(undef, arg2) -> 0
         // For vectors, we can't easily build an all zero vector, just return
         // the LHS.
@@ -2272,14 +2441,14 @@
     case ISD::AND:
     case ISD::SRL:
     case ISD::SHL:
-      if (!MVT::isVector(VT)) 
+      if (!VT.isVector())
         return getConstant(0, VT);  // fold op(arg1, undef) -> 0
       // For vectors, we can't easily build an all zero vector, just return
       // the LHS.
       return N1;
     case ISD::OR:
-      if (!MVT::isVector(VT)) 
-        return getConstant(MVT::getIntVTBitMask(VT), VT);
+      if (!VT.isVector())
+        return getConstant(VT.getIntegerVTBitMask(), VT);
       // For vectors, we can't easily build an all one vector, just return
       // the LHS.
       return N1;
@@ -2308,7 +2477,7 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT,
+SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT,
                                 SDOperand N1, SDOperand N2, SDOperand N3) {
   // Perform various simplifications.
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val);
@@ -2340,9 +2509,9 @@
     break;
   case ISD::VECTOR_SHUFFLE:
     assert(VT == N1.getValueType() && VT == N2.getValueType() &&
-           MVT::isVector(VT) && MVT::isVector(N3.getValueType()) &&
+           VT.isVector() && N3.getValueType().isVector() &&
            N3.getOpcode() == ISD::BUILD_VECTOR &&
-           MVT::getVectorNumElements(VT) == N3.getNumOperands() &&
+           VT.getVectorNumElements() == N3.getNumOperands() &&
            "Illegal VECTOR_SHUFFLE node!");
     break;
   case ISD::BIT_CONVERT:
@@ -2371,14 +2540,14 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT,
+SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT,
                                 SDOperand N1, SDOperand N2, SDOperand N3,
                                 SDOperand N4) {
   SDOperand Ops[] = { N1, N2, N3, N4 };
   return getNode(Opcode, VT, Ops, 4);
 }
 
-SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT,
+SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT,
                                 SDOperand N1, SDOperand N2, SDOperand N3,
                                 SDOperand N4, SDOperand N5) {
   SDOperand Ops[] = { N1, N2, N3, N4, N5 };
@@ -2387,43 +2556,53 @@
 
 /// getMemsetValue - Vectorized representation of the memset value
 /// operand.
-static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
-                                SelectionDAG &DAG) {
-  MVT::ValueType CurVT = VT;
+static SDOperand getMemsetValue(SDOperand Value, MVT VT, SelectionDAG &DAG) {
+  unsigned NumBits = VT.isVector() ?
+    VT.getVectorElementType().getSizeInBits() : VT.getSizeInBits();
   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
-    uint64_t Val   = C->getValue() & 255;
+    APInt Val = APInt(NumBits, C->getValue() & 255);
     unsigned Shift = 8;
-    while (CurVT != MVT::i8) {
+    for (unsigned i = NumBits; i > 8; i >>= 1) {
       Val = (Val << Shift) | Val;
       Shift <<= 1;
-      CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
-    }
-    return DAG.getConstant(Val, VT);
-  } else {
-    Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
-    unsigned Shift = 8;
-    while (CurVT != MVT::i8) {
-      Value =
-        DAG.getNode(ISD::OR, VT,
-                    DAG.getNode(ISD::SHL, VT, Value,
-                                DAG.getConstant(Shift, MVT::i8)), Value);
-      Shift <<= 1;
-      CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
     }
+    if (VT.isInteger())
+      return DAG.getConstant(Val, VT);
+    return DAG.getConstantFP(APFloat(Val), VT);
+  }
 
-    return Value;
+  Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
+  unsigned Shift = 8;
+  for (unsigned i = NumBits; i > 8; i >>= 1) {
+    Value = DAG.getNode(ISD::OR, VT,
+                        DAG.getNode(ISD::SHL, VT, Value,
+                                    DAG.getConstant(Shift, MVT::i8)), Value);
+    Shift <<= 1;
   }
+
+  return Value;
 }
 
 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
 /// used when a memcpy is turned into a memset when the source is a constant
 /// string ptr.
-static SDOperand getMemsetStringVal(MVT::ValueType VT,
-                                    SelectionDAG &DAG,
+static SDOperand getMemsetStringVal(MVT VT, SelectionDAG &DAG,
                                     const TargetLowering &TLI,
                                     std::string &Str, unsigned Offset) {
+  // Handle vector with all elements zero.
+  if (Str.empty()) {
+    if (VT.isInteger())
+      return DAG.getConstant(0, VT);
+    unsigned NumElts = VT.getVectorNumElements();
+    MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
+    return DAG.getNode(ISD::BIT_CONVERT, VT,
+                       DAG.getConstant(0, MVT::getVectorVT(EltVT, NumElts)));
+  }
+
+  assert(!VT.isVector() && "Can't handle vector type here!");
+  unsigned NumBits = VT.getSizeInBits();
+  unsigned MSB = NumBits / 8;
   uint64_t Val = 0;
-  unsigned MSB = MVT::getSizeInBits(VT) / 8;
   if (TLI.isLittleEndian())
     Offset = Offset + MSB - 1;
   for (unsigned i = 0; i != MSB; ++i) {
@@ -2434,56 +2613,112 @@
 }
 
 /// getMemBasePlusOffset - Returns base and offset node for the 
+///
 static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
                                       SelectionDAG &DAG) {
-  MVT::ValueType VT = Base.getValueType();
+  MVT VT = Base.getValueType();
   return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
 }
 
+/// isMemSrcFromString - Returns true if memcpy source is a string constant.
+///
+static bool isMemSrcFromString(SDOperand Src, std::string &Str) {
+  unsigned SrcDelta = 0;
+  GlobalAddressSDNode *G = NULL;
+  if (Src.getOpcode() == ISD::GlobalAddress)
+    G = cast<GlobalAddressSDNode>(Src);
+  else if (Src.getOpcode() == ISD::ADD &&
+           Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
+           Src.getOperand(1).getOpcode() == ISD::Constant) {
+    G = cast<GlobalAddressSDNode>(Src.getOperand(0));
+    SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getValue();
+  }
+  if (!G)
+    return false;
+
+  GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
+  if (GV && GetConstantStringInfo(GV, Str, SrcDelta, false))
+    return true;
+
+  return false;
+}
+
 /// MeetsMaxMemopRequirement - Determines if the number of memory ops required
 /// to replace the memset / memcpy is below the threshold. It also returns the
 /// types of the sequence of memory ops to perform memset / memcpy.
-static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
-                                     unsigned Limit, uint64_t Size,
-                                     unsigned Align,
-                                     const TargetLowering &TLI) {
-  MVT::ValueType VT;
+static
+bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps,
+                              SDOperand Dst, SDOperand Src,
+                              unsigned Limit, uint64_t Size, unsigned &Align,
+                              std::string &Str, bool &isSrcStr,
+                              SelectionDAG &DAG,
+                              const TargetLowering &TLI) {
+  isSrcStr = isMemSrcFromString(Src, Str);
+  bool isSrcConst = isa<ConstantSDNode>(Src);
+  bool AllowUnalign = TLI.allowsUnalignedMemoryAccesses();
+  MVT VT= TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr);
+  if (VT != MVT::iAny) {
+    unsigned NewAlign = (unsigned)
+      TLI.getTargetData()->getABITypeAlignment(VT.getTypeForMVT());
+    // If source is a string constant, this will require an unaligned load.
+    if (NewAlign > Align && (isSrcConst || AllowUnalign)) {
+      if (Dst.getOpcode() != ISD::FrameIndex) {
+        // Can't change destination alignment. It requires a unaligned store.
+        if (AllowUnalign)
+          VT = MVT::iAny;
+      } else {
+        int FI = cast<FrameIndexSDNode>(Dst)->getIndex();
+        MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+        if (MFI->isFixedObjectIndex(FI)) {
+          // Can't change destination alignment. It requires a unaligned store.
+          if (AllowUnalign)
+            VT = MVT::iAny;
+        } else {
+          // Give the stack frame object a larger alignment if needed.
+          if (MFI->getObjectAlignment(FI) < NewAlign)
+            MFI->setObjectAlignment(FI, NewAlign);
+          Align = NewAlign;
+        }
+      }
+    }
+  }
 
-  if (TLI.allowsUnalignedMemoryAccesses()) {
-    VT = MVT::i64;
-  } else {
-    switch (Align & 7) {
-    case 0:
+  if (VT == MVT::iAny) {
+    if (AllowUnalign) {
       VT = MVT::i64;
-      break;
-    case 4:
-      VT = MVT::i32;
-      break;
-    case 2:
-      VT = MVT::i16;
-      break;
-    default:
-      VT = MVT::i8;
-      break;
+    } else {
+      switch (Align & 7) {
+      case 0:  VT = MVT::i64; break;
+      case 4:  VT = MVT::i32; break;
+      case 2:  VT = MVT::i16; break;
+      default: VT = MVT::i8;  break;
+      }
     }
-  }
 
-  MVT::ValueType LVT = MVT::i64;
-  while (!TLI.isTypeLegal(LVT))
-    LVT = (MVT::ValueType)((unsigned)LVT - 1);
-  assert(MVT::isInteger(LVT));
+    MVT LVT = MVT::i64;
+    while (!TLI.isTypeLegal(LVT))
+      LVT = (MVT::SimpleValueType)(LVT.getSimpleVT() - 1);
+    assert(LVT.isInteger());
 
-  if (VT > LVT)
-    VT = LVT;
+    if (VT.bitsGT(LVT))
+      VT = LVT;
+  }
 
   unsigned NumMemOps = 0;
   while (Size != 0) {
-    unsigned VTSize = MVT::getSizeInBits(VT) / 8;
+    unsigned VTSize = VT.getSizeInBits() / 8;
     while (VTSize > Size) {
-      VT = (MVT::ValueType)((unsigned)VT - 1);
-      VTSize >>= 1;
+      // For now, only use non-vector load / store's for the left-over pieces.
+      if (VT.isVector()) {
+        VT = MVT::i64;
+        while (!TLI.isTypeLegal(VT))
+          VT = (MVT::SimpleValueType)(VT.getSimpleVT() - 1);
+        VTSize = VT.getSizeInBits() / 8;
+      } else {
+        VT = (MVT::SimpleValueType)(VT.getSimpleVT() - 1);
+        VTSize >>= 1;
+      }
     }
-    assert(MVT::isInteger(VT));
 
     if (++NumMemOps > Limit)
       return false;
@@ -2497,67 +2732,51 @@
 static SDOperand getMemcpyLoadsAndStores(SelectionDAG &DAG,
                                          SDOperand Chain, SDOperand Dst,
                                          SDOperand Src, uint64_t Size,
-                                         unsigned Align,
-                                         bool AlwaysInline,
-                                         const Value *DstSV, uint64_t DstOff,
-                                         const Value *SrcSV, uint64_t SrcOff) {
+                                         unsigned Align, bool AlwaysInline,
+                                         const Value *DstSV, uint64_t DstSVOff,
+                                         const Value *SrcSV, uint64_t SrcSVOff){
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
 
-  // Expand memcpy to a series of store ops if the size operand falls below
-  // a certain threshold.
-  std::vector<MVT::ValueType> MemOps;
+  // Expand memcpy to a series of load and store ops if the size operand falls
+  // below a certain threshold.
+  std::vector<MVT> MemOps;
   uint64_t Limit = -1;
   if (!AlwaysInline)
     Limit = TLI.getMaxStoresPerMemcpy();
-  if (!MeetsMaxMemopRequirement(MemOps, Limit, Size, Align, TLI))
+  unsigned DstAlign = Align;  // Destination alignment can change.
+  std::string Str;
+  bool CopyFromStr;
+  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
+                                Str, CopyFromStr, DAG, TLI))
     return SDOperand();
 
-  SmallVector<SDOperand, 8> OutChains;
 
+  bool isZeroStr = CopyFromStr && Str.empty();
+  SmallVector<SDOperand, 8> OutChains;
   unsigned NumMemOps = MemOps.size();
-  unsigned SrcDelta = 0;
-  GlobalAddressSDNode *G = NULL;
-  std::string Str;
-  bool CopyFromStr = false;
-
-  if (Src.getOpcode() == ISD::GlobalAddress)
-    G = cast<GlobalAddressSDNode>(Src);
-  else if (Src.getOpcode() == ISD::ADD &&
-           Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
-           Src.getOperand(1).getOpcode() == ISD::Constant) {
-    G = cast<GlobalAddressSDNode>(Src.getOperand(0));
-    SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getValue();
-  }
-  if (G) {
-    GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
-    if (GV && GV->isConstant()) {
-      Str = GV->getStringValue(false);
-      if (!Str.empty()) {
-        CopyFromStr = true;
-        SrcOff += SrcDelta;
-      }
-    }
-  }
-
+  uint64_t SrcOff = 0, DstOff = 0;
   for (unsigned i = 0; i < NumMemOps; i++) {
-    MVT::ValueType VT = MemOps[i];
-    unsigned VTSize = MVT::getSizeInBits(VT) / 8;
+    MVT VT = MemOps[i];
+    unsigned VTSize = VT.getSizeInBits() / 8;
     SDOperand Value, Store;
 
-    if (CopyFromStr) {
+    if (CopyFromStr && (isZeroStr || !VT.isVector())) {
+      // It's unlikely a store of a vector immediate can be done in a single
+      // instruction. It would require a load from a constantpool first.
+      // We also handle store a vector with all zero's.
+      // FIXME: Handle other cases where store of vector immediate is done in
+      // a single instruction.
       Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
-      Store =
-        DAG.getStore(Chain, Value,
-                     getMemBasePlusOffset(Dst, DstOff, DAG),
-                     DstSV, DstOff);
+      Store = DAG.getStore(Chain, Value,
+                           getMemBasePlusOffset(Dst, DstOff, DAG),
+                           DstSV, DstSVOff + DstOff);
     } else {
       Value = DAG.getLoad(VT, Chain,
                           getMemBasePlusOffset(Src, SrcOff, DAG),
-                          SrcSV, SrcOff, false, Align);
-      Store =
-        DAG.getStore(Chain, Value,
-                     getMemBasePlusOffset(Dst, DstOff, DAG),
-                     DstSV, DstOff, false, Align);
+                          SrcSV, SrcSVOff + SrcOff, false, Align);
+      Store = DAG.getStore(Chain, Value,
+                           getMemBasePlusOffset(Dst, DstOff, DAG),
+                           DstSV, DstSVOff + DstOff, false, DstAlign);
     }
     OutChains.push_back(Store);
     SrcOff += VTSize;
@@ -2568,30 +2787,91 @@
                      &OutChains[0], OutChains.size());
 }
 
+static SDOperand getMemmoveLoadsAndStores(SelectionDAG &DAG,
+                                          SDOperand Chain, SDOperand Dst,
+                                          SDOperand Src, uint64_t Size,
+                                          unsigned Align, bool AlwaysInline,
+                                          const Value *DstSV, uint64_t DstSVOff,
+                                          const Value *SrcSV, uint64_t SrcSVOff){
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+  // Expand memmove to a series of load and store ops if the size operand falls
+  // below a certain threshold.
+  std::vector<MVT> MemOps;
+  uint64_t Limit = -1;
+  if (!AlwaysInline)
+    Limit = TLI.getMaxStoresPerMemmove();
+  unsigned DstAlign = Align;  // Destination alignment can change.
+  std::string Str;
+  bool CopyFromStr;
+  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
+                                Str, CopyFromStr, DAG, TLI))
+    return SDOperand();
+
+  uint64_t SrcOff = 0, DstOff = 0;
+
+  SmallVector<SDOperand, 8> LoadValues;
+  SmallVector<SDOperand, 8> LoadChains;
+  SmallVector<SDOperand, 8> OutChains;
+  unsigned NumMemOps = MemOps.size();
+  for (unsigned i = 0; i < NumMemOps; i++) {
+    MVT VT = MemOps[i];
+    unsigned VTSize = VT.getSizeInBits() / 8;
+    SDOperand Value, Store;
+
+    Value = DAG.getLoad(VT, Chain,
+                        getMemBasePlusOffset(Src, SrcOff, DAG),
+                        SrcSV, SrcSVOff + SrcOff, false, Align);
+    LoadValues.push_back(Value);
+    LoadChains.push_back(Value.getValue(1));
+    SrcOff += VTSize;
+  }
+  Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+                      &LoadChains[0], LoadChains.size());
+  OutChains.clear();
+  for (unsigned i = 0; i < NumMemOps; i++) {
+    MVT VT = MemOps[i];
+    unsigned VTSize = VT.getSizeInBits() / 8;
+    SDOperand Value, Store;
+
+    Store = DAG.getStore(Chain, LoadValues[i],
+                         getMemBasePlusOffset(Dst, DstOff, DAG),
+                         DstSV, DstSVOff + DstOff, false, DstAlign);
+    OutChains.push_back(Store);
+    DstOff += VTSize;
+  }
+
+  return DAG.getNode(ISD::TokenFactor, MVT::Other,
+                     &OutChains[0], OutChains.size());
+}
+
 static SDOperand getMemsetStores(SelectionDAG &DAG,
                                  SDOperand Chain, SDOperand Dst,
                                  SDOperand Src, uint64_t Size,
                                  unsigned Align,
-                                 const Value *DstSV, uint64_t DstOff) {
+                                 const Value *DstSV, uint64_t DstSVOff) {
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
 
   // Expand memset to a series of load/store ops if the size operand
   // falls below a certain threshold.
-  std::vector<MVT::ValueType> MemOps;
-  if (!MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
-                                Size, Align, TLI))
+  std::vector<MVT> MemOps;
+  std::string Str;
+  bool CopyFromStr;
+  if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, TLI.getMaxStoresPerMemset(),
+                                Size, Align, Str, CopyFromStr, DAG, TLI))
     return SDOperand();
 
   SmallVector<SDOperand, 8> OutChains;
+  uint64_t DstOff = 0;
 
   unsigned NumMemOps = MemOps.size();
   for (unsigned i = 0; i < NumMemOps; i++) {
-    MVT::ValueType VT = MemOps[i];
-    unsigned VTSize = MVT::getSizeInBits(VT) / 8;
+    MVT VT = MemOps[i];
+    unsigned VTSize = VT.getSizeInBits() / 8;
     SDOperand Value = getMemsetValue(Src, VT, DAG);
     SDOperand Store = DAG.getStore(Chain, Value,
                                    getMemBasePlusOffset(Dst, DstOff, DAG),
-                                   DstSV, DstOff);
+                                   DstSV, DstSVOff + DstOff);
     OutChains.push_back(Store);
     DstOff += VTSize;
   }
@@ -2603,8 +2883,8 @@
 SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst,
                                   SDOperand Src, SDOperand Size,
                                   unsigned Align, bool AlwaysInline,
-                                  const Value *DstSV, uint64_t DstOff,
-                                  const Value *SrcSV, uint64_t SrcOff) {
+                                  const Value *DstSV, uint64_t DstSVOff,
+                                  const Value *SrcSV, uint64_t SrcSVOff) {
 
   // Check to see if we should lower the memcpy to loads and stores first.
   // For cases within the target-specified limits, this is the best choice.
@@ -2616,7 +2896,7 @@
 
     SDOperand Result =
       getMemcpyLoadsAndStores(*this, Chain, Dst, Src, ConstantSize->getValue(),
-                              Align, false, DstSV, DstOff, SrcSV, SrcOff);
+                              Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
     if (Result.Val)
       return Result;
   }
@@ -2626,7 +2906,7 @@
   SDOperand Result =
     TLI.EmitTargetCodeForMemcpy(*this, Chain, Dst, Src, Size, Align,
                                 AlwaysInline,
-                                DstSV, DstOff, SrcSV, SrcOff);
+                                DstSV, DstSVOff, SrcSV, SrcSVOff);
   if (Result.Val)
     return Result;
 
@@ -2636,7 +2916,7 @@
     assert(ConstantSize && "AlwaysInline requires a constant size!");
     return getMemcpyLoadsAndStores(*this, Chain, Dst, Src,
                                    ConstantSize->getValue(), Align, true,
-                                   DstSV, DstOff, SrcSV, SrcOff);
+                                   DstSV, DstSVOff, SrcSV, SrcSVOff);
   }
 
   // Emit a library call.
@@ -2657,18 +2937,29 @@
 SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst,
                                    SDOperand Src, SDOperand Size,
                                    unsigned Align,
-                                   const Value *DstSV, uint64_t DstOff,
-                                   const Value *SrcSV, uint64_t SrcOff) {
+                                   const Value *DstSV, uint64_t DstSVOff,
+                                   const Value *SrcSV, uint64_t SrcSVOff) {
 
-  // TODO: Optimize small memmove cases with simple loads and stores,
-  // ensuring that all loads precede all stores. This can cause severe
-  // register pressure, so targets should be careful with the size limit.
+  // Check to see if we should lower the memmove to loads and stores first.
+  // For cases within the target-specified limits, this is the best choice.
+  ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+  if (ConstantSize) {
+    // Memmove with size zero? Just return the original chain.
+    if (ConstantSize->isNullValue())
+      return Chain;
+
+    SDOperand Result =
+      getMemmoveLoadsAndStores(*this, Chain, Dst, Src, ConstantSize->getValue(),
+                               Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+    if (Result.Val)
+      return Result;
+  }
 
   // Then check to see if we should lower the memmove with target-specific
   // code. If the target chooses to do this, this is the next best.
   SDOperand Result =
     TLI.EmitTargetCodeForMemmove(*this, Chain, Dst, Src, Size, Align,
-                                 DstSV, DstOff, SrcSV, SrcOff);
+                                 DstSV, DstSVOff, SrcSV, SrcSVOff);
   if (Result.Val)
     return Result;
 
@@ -2690,7 +2981,7 @@
 SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst,
                                   SDOperand Src, SDOperand Size,
                                   unsigned Align,
-                                  const Value *DstSV, uint64_t DstOff) {
+                                  const Value *DstSV, uint64_t DstSVOff) {
 
   // Check to see if we should lower the memset to stores first.
   // For cases within the target-specified limits, this is the best choice.
@@ -2702,7 +2993,7 @@
 
     SDOperand Result =
       getMemsetStores(*this, Chain, Dst, Src, ConstantSize->getValue(), Align,
-                      DstSV, DstOff);
+                      DstSV, DstSVOff);
     if (Result.Val)
       return Result;
   }
@@ -2711,7 +3002,7 @@
   // code. If the target chooses to do this, this is the next best.
   SDOperand Result =
     TLI.EmitTargetCodeForMemset(*this, Chain, Dst, Src, Size, Align,
-                                DstSV, DstOff);
+                                DstSV, DstSVOff);
   if (Result.Val)
     return Result;
 
@@ -2722,7 +3013,7 @@
   Entry.Node = Dst; Entry.Ty = IntPtrTy;
   Args.push_back(Entry);
   // Extend or truncate the argument to be an i32 value for the call.
-  if (Src.getValueType() > MVT::i32)
+  if (Src.getValueType().bitsGT(MVT::i32))
     Src = getNode(ISD::TRUNCATE, MVT::i32, Src);
   else
     Src = getNode(ISD::ZERO_EXTEND, MVT::i32, Src);
@@ -2740,18 +3031,19 @@
 
 SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, 
                                   SDOperand Ptr, SDOperand Cmp, 
-                                  SDOperand Swp, MVT::ValueType VT) {
-  assert(Opcode == ISD::ATOMIC_LCS && "Invalid Atomic Op");
+                                  SDOperand Swp, const Value* PtrVal,
+                                  unsigned Alignment) {
+  assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
   SDVTList VTs = getVTList(Cmp.getValueType(), MVT::Other);
   FoldingSetNodeID ID;
   SDOperand Ops[] = {Chain, Ptr, Cmp, Swp};
   AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
-  ID.AddInteger((unsigned int)VT);
   void* IP = 0;
   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
     return SDOperand(E, 0);
-  SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Cmp, Swp, VT);
+  SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Cmp, Swp,
+                               PtrVal, Alignment);
   CSEMap.InsertNode(N, IP);
   AllNodes.push_back(N);
   return SDOperand(N, 0);
@@ -2759,33 +3051,53 @@
 
 SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, 
                                   SDOperand Ptr, SDOperand Val, 
-                                  MVT::ValueType VT) {
-  assert((Opcode == ISD::ATOMIC_LAS || Opcode == ISD::ATOMIC_SWAP)
+                                  const Value* PtrVal,
+                                  unsigned Alignment) {
+  assert((   Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB
+          || Opcode == ISD::ATOMIC_SWAP || Opcode == ISD::ATOMIC_LOAD_AND
+          || Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR
+          || Opcode == ISD::ATOMIC_LOAD_NAND 
+          || Opcode == ISD::ATOMIC_LOAD_MIN || Opcode == ISD::ATOMIC_LOAD_MAX
+          || Opcode == ISD::ATOMIC_LOAD_UMIN || Opcode == ISD::ATOMIC_LOAD_UMAX) 
          && "Invalid Atomic Op");
   SDVTList VTs = getVTList(Val.getValueType(), MVT::Other);
   FoldingSetNodeID ID;
   SDOperand Ops[] = {Chain, Ptr, Val};
   AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
-  ID.AddInteger((unsigned int)VT);
   void* IP = 0;
   if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
     return SDOperand(E, 0);
-  SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Val, VT);
+  SDNode* N = new AtomicSDNode(Opcode, VTs, Chain, Ptr, Val,
+                               PtrVal, Alignment);
   CSEMap.InsertNode(N, IP);
   AllNodes.push_back(N);
   return SDOperand(N, 0);
 }
 
+/// getMergeValues - Create a MERGE_VALUES node from the given operands.
+/// Allowed to return something different (and simpler) if Simplify is true.
+SDOperand SelectionDAG::getMergeValues(SDOperandPtr Ops, unsigned NumOps,
+                                       bool Simplify) {
+  if (Simplify && NumOps == 1)
+    return Ops[0];
+
+  SmallVector<MVT, 4> VTs;
+  VTs.reserve(NumOps);
+  for (unsigned i = 0; i < NumOps; ++i)
+    VTs.push_back(Ops[i].getValueType());
+  return getNode(ISD::MERGE_VALUES, getVTList(&VTs[0], NumOps), Ops, NumOps);
+}
+
 SDOperand
 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
-                      MVT::ValueType VT, SDOperand Chain,
+                      MVT VT, SDOperand Chain,
                       SDOperand Ptr, SDOperand Offset,
-                      const Value *SV, int SVOffset, MVT::ValueType EVT,
+                      const Value *SV, int SVOffset, MVT EVT,
                       bool isVolatile, unsigned Alignment) {
   if (Alignment == 0) { // Ensure that codegen never sees alignment 0
     const Type *Ty = 0;
     if (VT != MVT::iPTR) {
-      Ty = MVT::getTypeForValueType(VT);
+      Ty = VT.getTypeForMVT();
     } else if (SV) {
       const PointerType *PT = dyn_cast<PointerType>(SV->getType());
       assert(PT && "Value for load must be a pointer");
@@ -2801,19 +3113,19 @@
     assert(VT == EVT && "Non-extending load from different memory type!");
   } else {
     // Extending load.
-    if (MVT::isVector(VT))
-      assert(EVT == MVT::getVectorElementType(VT) && "Invalid vector extload!");
+    if (VT.isVector())
+      assert(EVT == VT.getVectorElementType() && "Invalid vector extload!");
     else
-      assert(MVT::getSizeInBits(EVT) < MVT::getSizeInBits(VT) &&
+      assert(EVT.bitsLT(VT) &&
              "Should only be an extending load, not truncating!");
-    assert((ExtType == ISD::EXTLOAD || MVT::isInteger(VT)) &&
+    assert((ExtType == ISD::EXTLOAD || VT.isInteger()) &&
            "Cannot sign/zero extend a FP/Vector load!");
-    assert(MVT::isInteger(VT) == MVT::isInteger(EVT) &&
+    assert(VT.isInteger() == EVT.isInteger() &&
            "Cannot convert from FP to Int or Int -> FP!");
   }
 
   bool Indexed = AM != ISD::UNINDEXED;
-  assert(Indexed || Offset.getOpcode() == ISD::UNDEF &&
+  assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
          "Unindexed load with an offset!");
 
   SDVTList VTs = Indexed ?
@@ -2823,7 +3135,7 @@
   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
   ID.AddInteger(AM);
   ID.AddInteger(ExtType);
-  ID.AddInteger((unsigned int)EVT);
+  ID.AddInteger(EVT.getRawBits());
   ID.AddInteger(Alignment);
   ID.AddInteger(isVolatile);
   void *IP = 0;
@@ -2836,7 +3148,7 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getLoad(MVT::ValueType VT,
+SDOperand SelectionDAG::getLoad(MVT VT,
                                 SDOperand Chain, SDOperand Ptr,
                                 const Value *SV, int SVOffset,
                                 bool isVolatile, unsigned Alignment) {
@@ -2845,10 +3157,10 @@
                  SV, SVOffset, VT, isVolatile, Alignment);
 }
 
-SDOperand SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, MVT::ValueType VT,
+SDOperand SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, MVT VT,
                                    SDOperand Chain, SDOperand Ptr,
                                    const Value *SV,
-                                   int SVOffset, MVT::ValueType EVT,
+                                   int SVOffset, MVT EVT,
                                    bool isVolatile, unsigned Alignment) {
   SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType());
   return getLoad(ISD::UNINDEXED, ExtType, VT, Chain, Ptr, Undef,
@@ -2870,12 +3182,12 @@
 SDOperand SelectionDAG::getStore(SDOperand Chain, SDOperand Val,
                                  SDOperand Ptr, const Value *SV, int SVOffset,
                                  bool isVolatile, unsigned Alignment) {
-  MVT::ValueType VT = Val.getValueType();
+  MVT VT = Val.getValueType();
 
   if (Alignment == 0) { // Ensure that codegen never sees alignment 0
     const Type *Ty = 0;
     if (VT != MVT::iPTR) {
-      Ty = MVT::getTypeForValueType(VT);
+      Ty = VT.getTypeForMVT();
     } else if (SV) {
       const PointerType *PT = dyn_cast<PointerType>(SV->getType());
       assert(PT && "Value for store must be a pointer");
@@ -2891,7 +3203,7 @@
   AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
   ID.AddInteger(ISD::UNINDEXED);
   ID.AddInteger(false);
-  ID.AddInteger((unsigned int)VT);
+  ID.AddInteger(VT.getRawBits());
   ID.AddInteger(Alignment);
   ID.AddInteger(isVolatile);
   void *IP = 0;
@@ -2906,22 +3218,21 @@
 
 SDOperand SelectionDAG::getTruncStore(SDOperand Chain, SDOperand Val,
                                       SDOperand Ptr, const Value *SV,
-                                      int SVOffset, MVT::ValueType SVT,
+                                      int SVOffset, MVT SVT,
                                       bool isVolatile, unsigned Alignment) {
-  MVT::ValueType VT = Val.getValueType();
+  MVT VT = Val.getValueType();
 
   if (VT == SVT)
     return getStore(Chain, Val, Ptr, SV, SVOffset, isVolatile, Alignment);
 
-  assert(MVT::getSizeInBits(VT) > MVT::getSizeInBits(SVT) &&
-         "Not a truncation?");
-  assert(MVT::isInteger(VT) == MVT::isInteger(SVT) &&
+  assert(VT.bitsGT(SVT) && "Not a truncation?");
+  assert(VT.isInteger() == SVT.isInteger() &&
          "Can't do FP-INT conversion!");
 
   if (Alignment == 0) { // Ensure that codegen never sees alignment 0
     const Type *Ty = 0;
     if (VT != MVT::iPTR) {
-      Ty = MVT::getTypeForValueType(VT);
+      Ty = VT.getTypeForMVT();
     } else if (SV) {
       const PointerType *PT = dyn_cast<PointerType>(SV->getType());
       assert(PT && "Value for store must be a pointer");
@@ -2937,7 +3248,7 @@
   AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
   ID.AddInteger(ISD::UNINDEXED);
   ID.AddInteger(1);
-  ID.AddInteger((unsigned int)SVT);
+  ID.AddInteger(SVT.getRawBits());
   ID.AddInteger(Alignment);
   ID.AddInteger(isVolatile);
   void *IP = 0;
@@ -2962,7 +3273,7 @@
   AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
   ID.AddInteger(AM);
   ID.AddInteger(ST->isTruncatingStore());
-  ID.AddInteger((unsigned int)(ST->getMemoryVT()));
+  ID.AddInteger(ST->getMemoryVT().getRawBits());
   ID.AddInteger(ST->getAlignment());
   ID.AddInteger(ST->isVolatile());
   void *IP = 0;
@@ -2977,14 +3288,14 @@
   return SDOperand(N, 0);
 }
 
-SDOperand SelectionDAG::getVAArg(MVT::ValueType VT,
+SDOperand SelectionDAG::getVAArg(MVT VT,
                                  SDOperand Chain, SDOperand Ptr,
                                  SDOperand SV) {
   SDOperand Ops[] = { Chain, Ptr, SV };
   return getNode(ISD::VAARG, getVTList(VT, MVT::Other), Ops, 3);
 }
 
-SDOperand SelectionDAG::getNode(unsigned Opcode, MVT::ValueType VT,
+SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT,
                                 SDOperandPtr Ops, unsigned NumOps) {
   switch (NumOps) {
   case 0: return getNode(Opcode, VT);
@@ -3033,14 +3344,14 @@
 }
 
 SDOperand SelectionDAG::getNode(unsigned Opcode,
-                                std::vector<MVT::ValueType> &ResultTys,
+                                std::vector<MVT> &ResultTys,
                                 SDOperandPtr Ops, unsigned NumOps) {
   return getNode(Opcode, getNodeValueTypes(ResultTys), ResultTys.size(),
                  Ops, NumOps);
 }
 
 SDOperand SelectionDAG::getNode(unsigned Opcode,
-                                const MVT::ValueType *VTs, unsigned NumVTs,
+                                const MVT *VTs, unsigned NumVTs,
                                 SDOperandPtr Ops, unsigned NumOps) {
   if (NumVTs == 1)
     return getNode(Opcode, VTs[0], Ops, NumOps);
@@ -3067,7 +3378,7 @@
       if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
         // If the and is only masking out bits that cannot effect the shift,
         // eliminate the and.
-        unsigned NumBits = MVT::getSizeInBits(VT)*2;
+        unsigned NumBits = VT.getSizeInBits()*2;
         if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
           return getNode(Opcode, VT, N1, N2, N3.getOperand(0));
       }
@@ -3142,31 +3453,31 @@
   return getNode(Opcode, VTList, Ops, 5);
 }
 
-SDVTList SelectionDAG::getVTList(MVT::ValueType VT) {
+SDVTList SelectionDAG::getVTList(MVT VT) {
   return makeVTList(SDNode::getValueTypeList(VT), 1);
 }
 
-SDVTList SelectionDAG::getVTList(MVT::ValueType VT1, MVT::ValueType VT2) {
-  for (std::list<std::vector<MVT::ValueType> >::iterator I = VTList.begin(),
+SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2) {
+  for (std::list<std::vector<MVT> >::iterator I = VTList.begin(),
        E = VTList.end(); I != E; ++I) {
     if (I->size() == 2 && (*I)[0] == VT1 && (*I)[1] == VT2)
       return makeVTList(&(*I)[0], 2);
   }
-  std::vector<MVT::ValueType> V;
+  std::vector<MVT> V;
   V.push_back(VT1);
   V.push_back(VT2);
   VTList.push_front(V);
   return makeVTList(&(*VTList.begin())[0], 2);
 }
-SDVTList SelectionDAG::getVTList(MVT::ValueType VT1, MVT::ValueType VT2,
-                                 MVT::ValueType VT3) {
-  for (std::list<std::vector<MVT::ValueType> >::iterator I = VTList.begin(),
+SDVTList SelectionDAG::getVTList(MVT VT1, MVT VT2,
+                                 MVT VT3) {
+  for (std::list<std::vector<MVT> >::iterator I = VTList.begin(),
        E = VTList.end(); I != E; ++I) {
     if (I->size() == 3 && (*I)[0] == VT1 && (*I)[1] == VT2 &&
         (*I)[2] == VT3)
       return makeVTList(&(*I)[0], 3);
   }
-  std::vector<MVT::ValueType> V;
+  std::vector<MVT> V;
   V.push_back(VT1);
   V.push_back(VT2);
   V.push_back(VT3);
@@ -3174,7 +3485,7 @@
   return makeVTList(&(*VTList.begin())[0], 3);
 }
 
-SDVTList SelectionDAG::getVTList(const MVT::ValueType *VTs, unsigned NumVTs) {
+SDVTList SelectionDAG::getVTList(const MVT *VTs, unsigned NumVTs) {
   switch (NumVTs) {
     case 0: assert(0 && "Cannot have nodes without results!");
     case 1: return getVTList(VTs[0]);
@@ -3183,7 +3494,7 @@
     default: break;
   }
 
-  for (std::list<std::vector<MVT::ValueType> >::iterator I = VTList.begin(),
+  for (std::list<std::vector<MVT> >::iterator I = VTList.begin(),
        E = VTList.end(); I != E; ++I) {
     if (I->size() != NumVTs || VTs[0] != (*I)[0] || VTs[1] != (*I)[1]) continue;
    
@@ -3197,7 +3508,7 @@
       return makeVTList(&*I->begin(), NumVTs);
   }
   
-  VTList.push_front(std::vector<MVT::ValueType>(VTs, VTs+NumVTs));
+  VTList.push_front(std::vector<MVT>(VTs, VTs+NumVTs));
   return makeVTList(&*VTList.begin()->begin(), NumVTs);
 }
 
@@ -3316,7 +3627,7 @@
   if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
     return SDOperand(Existing, InN.ResNo);
   
-  // Nope it doesn't.  Remove the node from it's current place in the maps.
+  // Nope it doesn't.  Remove the node from its current place in the maps.
   if (InsertPos)
     RemoveNodeFromCSEMaps(N);
   
@@ -3380,132 +3691,99 @@
 /// node of the specified opcode and operands, it returns that node instead of
 /// the current one.
 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
-                                   MVT::ValueType VT) {
+                                   MVT VT) {
   SDVTList VTs = getVTList(VT);
-  FoldingSetNodeID ID;
-  AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, (SDOperand*)0, 0);
-  void *IP = 0;
-  if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
-    return ON;
-   
-  RemoveNodeFromCSEMaps(N);
-  
-  N->MorphNodeTo(ISD::BUILTIN_OP_END+TargetOpc, VTs, SDOperandPtr(), 0);
-
-  CSEMap.InsertNode(N, IP);
-  return N;
+  return SelectNodeTo(N, TargetOpc, VTs, (SDOperand*)0, 0);
 }
 
 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
-                                   MVT::ValueType VT, SDOperand Op1) {
-  // If an identical node already exists, use it.
+                                   MVT VT, SDOperand Op1) {
   SDVTList VTs = getVTList(VT);
   SDOperand Ops[] = { Op1 };
-  
-  FoldingSetNodeID ID;
-  AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 1);
-  void *IP = 0;
-  if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
-    return ON;
-                                       
-  RemoveNodeFromCSEMaps(N);
-  N->MorphNodeTo(ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 1);
-  CSEMap.InsertNode(N, IP);
-  return N;
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, 1);
 }
 
 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
-                                   MVT::ValueType VT, SDOperand Op1,
+                                   MVT VT, SDOperand Op1,
                                    SDOperand Op2) {
-  // If an identical node already exists, use it.
   SDVTList VTs = getVTList(VT);
   SDOperand Ops[] = { Op1, Op2 };
-  
-  FoldingSetNodeID ID;
-  AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 2);
-  void *IP = 0;
-  if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
-    return ON;
-                                       
-  RemoveNodeFromCSEMaps(N);
-  
-  N->MorphNodeTo(ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 2);
-  
-  CSEMap.InsertNode(N, IP);   // Memoize the new node.
-  return N;
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, 2);
 }
 
 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
-                                   MVT::ValueType VT, SDOperand Op1,
+                                   MVT VT, SDOperand Op1,
                                    SDOperand Op2, SDOperand Op3) {
-  // If an identical node already exists, use it.
   SDVTList VTs = getVTList(VT);
   SDOperand Ops[] = { Op1, Op2, Op3 };
-  FoldingSetNodeID ID;
-  AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 3);
-  void *IP = 0;
-  if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
-    return ON;
-                                       
-  RemoveNodeFromCSEMaps(N);
-  
-  N->MorphNodeTo(ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 3);
-
-  CSEMap.InsertNode(N, IP);   // Memoize the new node.
-  return N;
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, 3);
 }
 
 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
-                                   MVT::ValueType VT, SDOperandPtr Ops,
+                                   MVT VT, SDOperandPtr Ops,
                                    unsigned NumOps) {
-  // If an identical node already exists, use it.
   SDVTList VTs = getVTList(VT);
-  FoldingSetNodeID ID;
-  AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, NumOps);
-  void *IP = 0;
-  if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
-    return ON;
-                                       
-  RemoveNodeFromCSEMaps(N);
-  N->MorphNodeTo(ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, NumOps);
-  
-  CSEMap.InsertNode(N, IP);   // Memoize the new node.
-  return N;
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, NumOps);
+}
+
+SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
+                                   MVT VT1, MVT VT2, SDOperandPtr Ops,
+                                   unsigned NumOps) {
+  SDVTList VTs = getVTList(VT1, VT2);
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, NumOps);
+}
+
+SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
+                                   MVT VT1, MVT VT2) {
+  SDVTList VTs = getVTList(VT1, VT2);
+  return SelectNodeTo(N, TargetOpc, VTs, (SDOperand *)0, 0);
+}
+
+SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
+                                   MVT VT1, MVT VT2, MVT VT3, SDOperandPtr Ops,
+                                   unsigned NumOps) {
+  SDVTList VTs = getVTList(VT1, VT2, VT3);
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, NumOps);
 }
 
 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, 
-                                   MVT::ValueType VT1, MVT::ValueType VT2,
+                                   MVT VT1, MVT VT2,
+                                   SDOperand Op1) {
+  SDVTList VTs = getVTList(VT1, VT2);
+  SDOperand Ops[] = { Op1 };
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, 1);
+}
+
+SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc, 
+                                   MVT VT1, MVT VT2,
                                    SDOperand Op1, SDOperand Op2) {
   SDVTList VTs = getVTList(VT1, VT2);
-  FoldingSetNodeID ID;
   SDOperand Ops[] = { Op1, Op2 };
-  AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 2);
-  void *IP = 0;
-  if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
-    return ON;
-
-  RemoveNodeFromCSEMaps(N);
-  N->MorphNodeTo(ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 2);
-  CSEMap.InsertNode(N, IP);   // Memoize the new node.
-  return N;
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, 2);
 }
 
 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
-                                   MVT::ValueType VT1, MVT::ValueType VT2,
+                                   MVT VT1, MVT VT2,
                                    SDOperand Op1, SDOperand Op2, 
                                    SDOperand Op3) {
-  // If an identical node already exists, use it.
   SDVTList VTs = getVTList(VT1, VT2);
   SDOperand Ops[] = { Op1, Op2, Op3 };
+  return SelectNodeTo(N, TargetOpc, VTs, Ops, 3);
+}
+
+SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned TargetOpc,
+                                   SDVTList VTs, SDOperandPtr Ops,
+                                   unsigned NumOps) {
+  // If an identical node already exists, use it.
   FoldingSetNodeID ID;
-  AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 3);
+  AddNodeIDNode(ID, ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, NumOps);
   void *IP = 0;
   if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
     return ON;
 
   RemoveNodeFromCSEMaps(N);
 
-  N->MorphNodeTo(ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, 3);
+  N->MorphNodeTo(ISD::BUILTIN_OP_END+TargetOpc, VTs, Ops, NumOps);
   CSEMap.InsertNode(N, IP);   // Memoize the new node.
   return N;
 }
@@ -3517,94 +3795,87 @@
 /// Note that getTargetNode returns the resultant node.  If there is already a
 /// node of the specified opcode and operands, it returns that node instead of
 /// the current one.
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT) {
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT) {
   return getNode(ISD::BUILTIN_OP_END+Opcode, VT).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT,
-                                    SDOperand Op1) {
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, SDOperand Op1) {
   return getNode(ISD::BUILTIN_OP_END+Opcode, VT, Op1).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT,
                                     SDOperand Op1, SDOperand Op2) {
   return getNode(ISD::BUILTIN_OP_END+Opcode, VT, Op1, Op2).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT,
                                     SDOperand Op1, SDOperand Op2,
                                     SDOperand Op3) {
   return getNode(ISD::BUILTIN_OP_END+Opcode, VT, Op1, Op2, Op3).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT,
                                     SDOperandPtr Ops, unsigned NumOps) {
   return getNode(ISD::BUILTIN_OP_END+Opcode, VT, Ops, NumOps).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1,
-                                    MVT::ValueType VT2) {
-  const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2);
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2) {
+  const MVT *VTs = getNodeValueTypes(VT1, VT2);
   SDOperand Op;
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, &Op, 0).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1,
-                                    MVT::ValueType VT2, SDOperand Op1) {
-  const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2);
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1,
+                                    MVT VT2, SDOperand Op1) {
+  const MVT *VTs = getNodeValueTypes(VT1, VT2);
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, &Op1, 1).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1,
-                                    MVT::ValueType VT2, SDOperand Op1,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1,
+                                    MVT VT2, SDOperand Op1,
                                     SDOperand Op2) {
-  const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2);
+  const MVT *VTs = getNodeValueTypes(VT1, VT2);
   SDOperand Ops[] = { Op1, Op2 };
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, Ops, 2).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1,
-                                    MVT::ValueType VT2, SDOperand Op1,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1,
+                                    MVT VT2, SDOperand Op1,
                                     SDOperand Op2, SDOperand Op3) {
-  const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2);
+  const MVT *VTs = getNodeValueTypes(VT1, VT2);
   SDOperand Ops[] = { Op1, Op2, Op3 };
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, Ops, 3).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, 
-                                    MVT::ValueType VT2,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2,
                                     SDOperandPtr Ops, unsigned NumOps) {
-  const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2);
+  const MVT *VTs = getNodeValueTypes(VT1, VT2);
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 2, Ops, NumOps).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1,
-                                    MVT::ValueType VT2, MVT::ValueType VT3,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3,
                                     SDOperand Op1, SDOperand Op2) {
-  const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2, VT3);
+  const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3);
   SDOperand Ops[] = { Op1, Op2 };
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 3, Ops, 2).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1,
-                                    MVT::ValueType VT2, MVT::ValueType VT3,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3,
                                     SDOperand Op1, SDOperand Op2,
                                     SDOperand Op3) {
-  const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2, VT3);
+  const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3);
   SDOperand Ops[] = { Op1, Op2, Op3 };
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 3, Ops, 3).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, 
-                                    MVT::ValueType VT2, MVT::ValueType VT3,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3,
                                     SDOperandPtr Ops, unsigned NumOps) {
-  const MVT::ValueType *VTs = getNodeValueTypes(VT1, VT2, VT3);
+  const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3);
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 3, Ops, NumOps).Val;
 }
-SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT::ValueType VT1, 
-                                    MVT::ValueType VT2, MVT::ValueType VT3,
-                                    MVT::ValueType VT4,
+SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1,
+                                    MVT VT2, MVT VT3, MVT VT4,
                                     SDOperandPtr Ops, unsigned NumOps) {
-  std::vector<MVT::ValueType> VTList;
+  std::vector<MVT> VTList;
   VTList.push_back(VT1);
   VTList.push_back(VT2);
   VTList.push_back(VT3);
   VTList.push_back(VT4);
-  const MVT::ValueType *VTs = getNodeValueTypes(VTList);
+  const MVT *VTs = getNodeValueTypes(VTList);
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, 4, Ops, NumOps).Val;
 }
 SDNode *SelectionDAG::getTargetNode(unsigned Opcode,
-                                    std::vector<MVT::ValueType> &ResultTys,
+                                    std::vector<MVT> &ResultTys,
                                     SDOperandPtr Ops, unsigned NumOps) {
-  const MVT::ValueType *VTs = getNodeValueTypes(ResultTys);
+  const MVT *VTs = getNodeValueTypes(ResultTys);
   return getNode(ISD::BUILTIN_OP_END+Opcode, VTs, ResultTys.size(),
                  Ops, NumOps).Val;
 }
@@ -3658,7 +3929,7 @@
       ReplaceAllUsesWith(U, Existing, UpdateListener);
       // U is now dead.  Inform the listener if it exists and delete it.
       if (UpdateListener) 
-        UpdateListener->NodeDeleted(U);
+        UpdateListener->NodeDeleted(U, Existing);
       DeleteNodeNotInCSEMaps(U);
     } else {
       // If the node doesn't already exist, we updated it.  Inform a listener if
@@ -3705,7 +3976,7 @@
       ReplaceAllUsesWith(U, Existing, UpdateListener);
       // U is now dead.  Inform the listener if it exists and delete it.
       if (UpdateListener) 
-        UpdateListener->NodeDeleted(U);
+        UpdateListener->NodeDeleted(U, Existing);
       DeleteNodeNotInCSEMaps(U);
     } else {
       // If the node doesn't already exist, we updated it.  Inform a listener if
@@ -3750,7 +4021,7 @@
       ReplaceAllUsesWith(U, Existing, UpdateListener);
       // U is now dead.  Inform the listener if it exists and delete it.
       if (UpdateListener) 
-        UpdateListener->NodeDeleted(U);
+        UpdateListener->NodeDeleted(U, Existing);
       DeleteNodeNotInCSEMaps(U);
     } else {
       // If the node doesn't already exist, we updated it.  Inform a listener if
@@ -3774,9 +4045,9 @@
                               SelectionDAG::DAGUpdateListener *chain)
       : Set(set), Chain(chain) {}
  
-    virtual void NodeDeleted(SDNode *N) {
+    virtual void NodeDeleted(SDNode *N, SDNode *E) {
       Set.remove(N);
-      if (Chain) Chain->NodeDeleted(N);
+      if (Chain) Chain->NodeDeleted(N, E);
     }
     virtual void NodeUpdated(SDNode *N) {
       if (Chain) Chain->NodeUpdated(N);
@@ -3805,8 +4076,7 @@
   for (SDNode::use_iterator UI = From.Val->use_begin(), 
       E = From.Val->use_end(); UI != E; ++UI) {
     SDNode *User = UI->getUser();
-    if (!Users.count(User))
-      Users.insert(User);
+    Users.insert(User);
   }
 
   // When one of the recursive merges deletes nodes from the graph, we need to
@@ -3858,7 +4128,7 @@
     ReplaceAllUsesWith(User, Existing, &CSUL);
     
     // User is now dead.  Notify a listener if present.
-    if (UpdateListener) UpdateListener->NodeDeleted(User);
+    if (UpdateListener) UpdateListener->NodeDeleted(User, Existing);
     DeleteNodeNotInCSEMaps(User);
   }
 }
@@ -3927,7 +4197,6 @@
 void BinarySDNode::ANCHOR() {}
 void TernarySDNode::ANCHOR() {}
 void HandleSDNode::ANCHOR() {}
-void StringSDNode::ANCHOR() {}
 void ConstantSDNode::ANCHOR() {}
 void ConstantFPSDNode::ANCHOR() {}
 void GlobalAddressSDNode::ANCHOR() {}
@@ -3938,10 +4207,13 @@
 void SrcValueSDNode::ANCHOR() {}
 void MemOperandSDNode::ANCHOR() {}
 void RegisterSDNode::ANCHOR() {}
+void DbgStopPointSDNode::ANCHOR() {}
+void LabelSDNode::ANCHOR() {}
 void ExternalSymbolSDNode::ANCHOR() {}
 void CondCodeSDNode::ANCHOR() {}
 void ARG_FLAGSSDNode::ANCHOR() {}
 void VTSDNode::ANCHOR() {}
+void MemSDNode::ANCHOR() {}
 void LoadSDNode::ANCHOR() {}
 void StoreSDNode::ANCHOR() {}
 void AtomicSDNode::ANCHOR() {}
@@ -3952,7 +4224,7 @@
 }
 
 GlobalAddressSDNode::GlobalAddressSDNode(bool isTarget, const GlobalValue *GA,
-                                         MVT::ValueType VT, int o)
+                                         MVT VT, int o)
   : SDNode(isa<GlobalVariable>(GA) &&
            cast<GlobalVariable>(GA)->isThreadLocal() ?
            // Thread Local
@@ -3964,13 +4236,31 @@
 }
 
 /// getMemOperand - Return a MachineMemOperand object describing the memory
+/// reference performed by this atomic.
+MachineMemOperand AtomicSDNode::getMemOperand() const {
+  int Size = (getValueType(0).getSizeInBits() + 7) >> 3;
+  int Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
+  if (isVolatile()) Flags |= MachineMemOperand::MOVolatile;
+  
+  // Check if the atomic references a frame index
+  const FrameIndexSDNode *FI = 
+  dyn_cast<const FrameIndexSDNode>(getBasePtr().Val);
+  if (!getSrcValue() && FI)
+    return MachineMemOperand(PseudoSourceValue::getFixedStack(), Flags,
+                             FI->getIndex(), Size, getAlignment());
+  else
+    return MachineMemOperand(getSrcValue(), Flags, getSrcValueOffset(),
+                             Size, getAlignment());
+}
+
+/// getMemOperand - Return a MachineMemOperand object describing the memory
 /// reference performed by this load or store.
 MachineMemOperand LSBaseSDNode::getMemOperand() const {
-  int Size = (MVT::getSizeInBits(getMemoryVT()) + 7) >> 3;
+  int Size = (getMemoryVT().getSizeInBits() + 7) >> 3;
   int Flags =
     getOpcode() == ISD::LOAD ? MachineMemOperand::MOLoad :
                                MachineMemOperand::MOStore;
-  if (IsVolatile) Flags |= MachineMemOperand::MOVolatile;
+  if (isVolatile()) Flags |= MachineMemOperand::MOVolatile;
 
   // Check if the load references a frame index, and does not have
   // an SV attached.
@@ -3978,10 +4268,10 @@
     dyn_cast<const FrameIndexSDNode>(getBasePtr().Val);
   if (!getSrcValue() && FI)
     return MachineMemOperand(PseudoSourceValue::getFixedStack(), Flags,
-                             FI->getIndex(), Size, Alignment);
+                             FI->getIndex(), Size, getAlignment());
   else
     return MachineMemOperand(getSrcValue(), Flags,
-                             getSrcValueOffset(), Size, Alignment);
+                             getSrcValueOffset(), Size, getAlignment());
 }
 
 /// Profile - Gather unique data for the node.
@@ -3992,14 +4282,14 @@
 
 /// getValueTypeList - Return a pointer to the specified value type.
 ///
-const MVT::ValueType *SDNode::getValueTypeList(MVT::ValueType VT) {
-  if (MVT::isExtendedVT(VT)) {
-    static std::set<MVT::ValueType> EVTs;
+const MVT *SDNode::getValueTypeList(MVT VT) {
+  if (VT.isExtended()) {
+    static std::set<MVT, MVT::compareRawBits> EVTs;
     return &(*EVTs.insert(VT).first);
   } else {
-    static MVT::ValueType VTs[MVT::LAST_VALUETYPE];
-    VTs[VT] = VT;
-    return &VTs[VT];
+    static MVT VTs[MVT::LAST_VALUETYPE];
+    VTs[VT.getSimpleVT()] = VT;
+    return &VTs[VT.getSimpleVT()];
   }
 }
 
@@ -4172,9 +4462,18 @@
    
   case ISD::PREFETCH:      return "Prefetch";
   case ISD::MEMBARRIER:    return "MemBarrier";
-  case ISD::ATOMIC_LCS:    return "AtomicLCS";
-  case ISD::ATOMIC_LAS:    return "AtomicLAS";
-  case ISD::ATOMIC_SWAP:    return "AtomicSWAP";
+  case ISD::ATOMIC_CMP_SWAP:  return "AtomicCmpSwap";
+  case ISD::ATOMIC_LOAD_ADD:  return "AtomicLoadAdd";
+  case ISD::ATOMIC_LOAD_SUB:  return "AtomicLoadSub";
+  case ISD::ATOMIC_LOAD_AND:  return "AtomicLoadAnd";
+  case ISD::ATOMIC_LOAD_OR:   return "AtomicLoadOr";
+  case ISD::ATOMIC_LOAD_XOR:  return "AtomicLoadXor";
+  case ISD::ATOMIC_LOAD_NAND: return "AtomicLoadNand";
+  case ISD::ATOMIC_LOAD_MIN:  return "AtomicLoadMin";
+  case ISD::ATOMIC_LOAD_MAX:  return "AtomicLoadMax";
+  case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
+  case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax";
+  case ISD::ATOMIC_SWAP:   return "AtomicSWAP";
   case ISD::PCMARKER:      return "PCMarker";
   case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
   case ISD::SRCVALUE:      return "SrcValue";
@@ -4184,7 +4483,6 @@
   case ISD::AssertSext:    return "AssertSext";
   case ISD::AssertZext:    return "AssertZext";
 
-  case ISD::STRING:        return "String";
   case ISD::BasicBlock:    return "BasicBlock";
   case ISD::ARG_FLAGS:     return "ArgFlags";
   case ISD::VALUETYPE:     return "ValueType";
@@ -4230,7 +4528,8 @@
   case ISD::UNDEF:         return "undef";
   case ISD::MERGE_VALUES:  return "merge_values";
   case ISD::INLINEASM:     return "inlineasm";
-  case ISD::LABEL:         return "label";
+  case ISD::DBG_LABEL:     return "dbg_label";
+  case ISD::EH_LABEL:      return "eh_label";
   case ISD::DECLARE:       return "declare";
   case ISD::HANDLENODE:    return "handlenode";
   case ISD::FORMAL_ARGUMENTS: return "formal_arguments";
@@ -4276,6 +4575,7 @@
   case ISD::FGETSIGN:  return "fgetsign";
 
   case ISD::SETCC:       return "setcc";
+  case ISD::VSETCC:      return "vsetcc";
   case ISD::SELECT:      return "select";
   case ISD::SELECT_CC:   return "select_cc";
   case ISD::INSERT_VECTOR_ELT:   return "insert_vector_elt";
@@ -4344,7 +4644,7 @@
   case ISD::CTLZ:    return "ctlz";
 
   // Debug info
-  case ISD::LOCATION: return "location";
+  case ISD::DBG_STOPPOINT: return "dbg_stoppoint";
   case ISD::DEBUG_LOC: return "debug_loc";
 
   // Trampolines
@@ -4427,7 +4727,7 @@
     if (getValueType(i) == MVT::Other)
       cerr << "ch";
     else
-      cerr << MVT::getValueTypeString(getValueType(i));
+      cerr << getValueType(i).getMVTString();
   }
   cerr << " = " << getOperationName(G);
 
@@ -4516,8 +4816,9 @@
   } else if (const ARG_FLAGSSDNode *N = dyn_cast<ARG_FLAGSSDNode>(this)) {
     cerr << N->getArgFlags().getArgFlagsString();
   } else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
-    cerr << ":" << MVT::getValueTypeString(N->getVT());
-  } else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
+    cerr << ":" << N->getVT().getMVTString();
+  }
+  else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(this)) {
     const Value *SrcValue = LD->getSrcValue();
     int SrcOffset = LD->getSrcValueOffset();
     cerr << " <";
@@ -4541,7 +4842,7 @@
       break;
     }
     if (doExt)
-      cerr << MVT::getValueTypeString(LD->getMemoryVT()) << ">";
+      cerr << LD->getMemoryVT().getMVTString() << ">";
 
     const char *AM = getIndexedModeName(LD->getAddressingMode());
     if (*AM)
@@ -4561,7 +4862,7 @@
 
     if (ST->isTruncatingStore())
       cerr << " <trunc "
-           << MVT::getValueTypeString(ST->getMemoryVT()) << ">";
+           << ST->getMemoryVT().getMVTString() << ">";
 
     const char *AM = getIndexedModeName(ST->getAddressingMode());
     if (*AM)
@@ -4569,6 +4870,18 @@
     if (ST->isVolatile())
       cerr << " <volatile>";
     cerr << " alignment=" << ST->getAlignment();
+  } else if (const AtomicSDNode* AT = dyn_cast<AtomicSDNode>(this)) {
+    const Value *SrcValue = AT->getSrcValue();
+    int SrcOffset = AT->getSrcValueOffset();
+    cerr << " <";
+    if (SrcValue)
+      cerr << SrcValue;
+    else
+      cerr << "null";
+    cerr << ":" << SrcOffset << ">";
+    if (AT->isVolatile())
+      cerr << " <volatile>";
+    cerr << " alignment=" << AT->getAlignment();
   }
 }
 

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Sun Jul  6 15:45:41 2008
@@ -42,12 +42,17 @@
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/Debug.h"
 #include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Timer.h"
 #include <algorithm>
 using namespace llvm;
 
+static cl::opt<bool>
+EnableValueProp("enable-value-prop", cl::Hidden, cl::init(false));
+
+
 #ifndef NDEBUG
 static cl::opt<bool>
 ViewISelDAGs("view-isel-dags", cl::Hidden,
@@ -74,53 +79,166 @@
 /// ISHeuristic command line option for instruction schedulers.
 ///
 //===---------------------------------------------------------------------===//
-namespace {
-  cl::opt<RegisterScheduler::FunctionPassCtor, false,
-          RegisterPassParser<RegisterScheduler> >
-  ISHeuristic("pre-RA-sched",
-              cl::init(&createDefaultScheduler),
-              cl::desc("Instruction schedulers available (before register"
-                       " allocation):"));
-
-  static RegisterScheduler
-  defaultListDAGScheduler("default", "  Best scheduler for the target",
-                          createDefaultScheduler);
-} // namespace
+static cl::opt<RegisterScheduler::FunctionPassCtor, false,
+               RegisterPassParser<RegisterScheduler> >
+ISHeuristic("pre-RA-sched",
+            cl::init(&createDefaultScheduler),
+            cl::desc("Instruction schedulers available (before register"
+                     " allocation):"));
+
+static RegisterScheduler
+defaultListDAGScheduler("default", "  Best scheduler for the target",
+                        createDefaultScheduler);
 
 namespace { struct SDISelAsmOperandInfo; }
 
+/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
+/// insertvalue or extractvalue indices that identify a member, return
+/// the linearized index of the start of the member.
+///
+static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
+                                   const unsigned *Indices,
+                                   const unsigned *IndicesEnd,
+                                   unsigned CurIndex = 0) {
+  // Base case: We're done.
+  if (Indices && Indices == IndicesEnd)
+    return CurIndex;
+
+  // Given a struct type, recursively traverse the elements.
+  if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+    for (StructType::element_iterator EB = STy->element_begin(),
+                                      EI = EB,
+                                      EE = STy->element_end();
+        EI != EE; ++EI) {
+      if (Indices && *Indices == unsigned(EI - EB))
+        return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
+      CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
+    }
+  }
+  // Given an array type, recursively traverse the elements.
+  else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+    const Type *EltTy = ATy->getElementType();
+    for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
+      if (Indices && *Indices == i)
+        return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
+      CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
+    }
+  }
+  // We haven't found the type we're looking for, so keep searching.
+  return CurIndex + 1;
+}
+
+/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
+/// MVTs that represent all the individual underlying
+/// non-aggregate types that comprise it.
+///
+/// If Offsets is non-null, it points to a vector to be filled in
+/// with the in-memory offsets of each of the individual values.
+///
+static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
+                            SmallVectorImpl<MVT> &ValueVTs,
+                            SmallVectorImpl<uint64_t> *Offsets = 0,
+                            uint64_t StartingOffset = 0) {
+  // Given a struct type, recursively traverse the elements.
+  if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+    const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
+    for (StructType::element_iterator EB = STy->element_begin(),
+                                      EI = EB,
+                                      EE = STy->element_end();
+         EI != EE; ++EI)
+      ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
+                      StartingOffset + SL->getElementOffset(EI - EB));
+    return;
+  }
+  // Given an array type, recursively traverse the elements.
+  if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+    const Type *EltTy = ATy->getElementType();
+    uint64_t EltSize = TLI.getTargetData()->getABITypeSize(EltTy);
+    for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
+      ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
+                      StartingOffset + i * EltSize);
+    return;
+  }
+  // Base case: we can get an MVT for this LLVM IR type.
+  ValueVTs.push_back(TLI.getValueType(Ty));
+  if (Offsets)
+    Offsets->push_back(StartingOffset);
+}
+
 namespace {
-  /// RegsForValue - This struct represents the physical registers that a
-  /// particular value is assigned and the type information about the value.
-  /// This is needed because values can be promoted into larger registers and
-  /// expanded into multiple smaller registers than the value.
+  /// RegsForValue - This struct represents the registers (physical or virtual)
+  /// that a particular set of values is assigned, and the type information about
+  /// the value. The most common situation is to represent one value at a time,
+  /// but struct or array values are handled element-wise as multiple values.
+  /// The splitting of aggregates is performed recursively, so that we never
+  /// have aggregate-typed registers. The values at this point do not necessarily
+  /// have legal types, so each value may require one or more registers of some
+  /// legal type.
+  /// 
   struct VISIBILITY_HIDDEN RegsForValue {
-    /// Regs - This list holds the register (for legal and promoted values)
-    /// or register set (for expanded values) that the value should be assigned
-    /// to.
-    std::vector<unsigned> Regs;
+    /// TLI - The TargetLowering object.
+    ///
+    const TargetLowering *TLI;
+
+    /// ValueVTs - The value types of the values, which may not be legal, and
+    /// may need be promoted or synthesized from one or more registers.
+    ///
+    SmallVector<MVT, 4> ValueVTs;
     
-    /// RegVT - The value type of each register.
+    /// RegVTs - The value types of the registers. This is the same size as
+    /// ValueVTs and it records, for each value, what the type of the assigned
+    /// register or registers are. (Individual values are never synthesized
+    /// from more than one type of register.)
+    ///
+    /// With virtual registers, the contents of RegVTs is redundant with TLI's
+    /// getRegisterType member function, however when with physical registers
+    /// it is necessary to have a separate record of the types.
     ///
-    MVT::ValueType RegVT;
+    SmallVector<MVT, 4> RegVTs;
     
-    /// ValueVT - The value type of the LLVM value, which may be promoted from
-    /// RegVT or made from merging the two expanded parts.
-    MVT::ValueType ValueVT;
+    /// Regs - This list holds the registers assigned to the values.
+    /// Each legal or promoted value requires one register, and each
+    /// expanded value requires multiple registers.
+    ///
+    SmallVector<unsigned, 4> Regs;
     
-    RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
+    RegsForValue() : TLI(0) {}
     
-    RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
-      : RegVT(regvt), ValueVT(valuevt) {
-        Regs.push_back(Reg);
+    RegsForValue(const TargetLowering &tli,
+                 const SmallVector<unsigned, 4> &regs, 
+                 MVT regvt, MVT valuevt)
+      : TLI(&tli),  ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
+    RegsForValue(const TargetLowering &tli,
+                 const SmallVector<unsigned, 4> &regs, 
+                 const SmallVector<MVT, 4> &regvts,
+                 const SmallVector<MVT, 4> &valuevts)
+      : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
+    RegsForValue(const TargetLowering &tli,
+                 unsigned Reg, const Type *Ty) : TLI(&tli) {
+      ComputeValueVTs(tli, Ty, ValueVTs);
+
+      for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
+        MVT ValueVT = ValueVTs[Value];
+        unsigned NumRegs = TLI->getNumRegisters(ValueVT);
+        MVT RegisterVT = TLI->getRegisterType(ValueVT);
+        for (unsigned i = 0; i != NumRegs; ++i)
+          Regs.push_back(Reg + i);
+        RegVTs.push_back(RegisterVT);
+        Reg += NumRegs;
+      }
     }
-    RegsForValue(const std::vector<unsigned> &regs, 
-                 MVT::ValueType regvt, MVT::ValueType valuevt)
-      : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
+    
+    /// append - Add the specified values to this one.
+    void append(const RegsForValue &RHS) {
+      TLI = RHS.TLI;
+      ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
+      RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
+      Regs.append(RHS.Regs.begin(), RHS.Regs.end());
     }
     
+    
     /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
-    /// this value and returns the result as a ValueVT value.  This uses 
+    /// this value and returns the result as a ValueVTs value.  This uses 
     /// Chain/Flag as the input and updates them for the output Chain/Flag.
     /// If the Flag pointer is NULL, no flag is used.
     SDOperand getCopyFromRegs(SelectionDAG &DAG,
@@ -147,15 +265,16 @@
   /// for the target.
   ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS,
                                       SelectionDAG *DAG,
-                                      MachineBasicBlock *BB) {
+                                      MachineBasicBlock *BB,
+                                      bool Fast) {
     TargetLowering &TLI = IS->getTargetLowering();
     
     if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) {
-      return createTDListDAGScheduler(IS, DAG, BB);
+      return createTDListDAGScheduler(IS, DAG, BB, Fast);
     } else {
       assert(TLI.getSchedulingPreference() ==
            TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
-      return createBURRListDAGScheduler(IS, DAG, BB);
+      return createBURRListDAGScheduler(IS, DAG, BB, Fast);
     }
   }
 
@@ -190,7 +309,7 @@
     SmallSet<Instruction*, 8> CatchInfoFound;
 #endif
 
-    unsigned MakeReg(MVT::ValueType VT) {
+    unsigned MakeReg(MVT VT) {
       return RegInfo.createVirtualRegister(TLI.getRegClassFor(VT));
     }
     
@@ -207,6 +326,16 @@
       assert(R == 0 && "Already initialized this value register!");
       return R = CreateRegForValue(V);
     }
+    
+    struct LiveOutInfo {
+      unsigned NumSignBits;
+      APInt KnownOne, KnownZero;
+      LiveOutInfo() : NumSignBits(0) {}
+    };
+    
+    /// LiveOutRegInfo - Information about live out vregs, indexed by their
+    /// register number offset by 'FirstVirtualRegister'.
+    std::vector<LiveOutInfo> LiveOutRegInfo;
   };
 }
 
@@ -295,7 +424,7 @@
     for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){
       if (PN->use_empty()) continue;
       
-      MVT::ValueType VT = TLI.getValueType(PN->getType());
+      MVT VT = TLI.getValueType(PN->getType());
       unsigned NumRegisters = TLI.getNumRegisters(VT);
       unsigned PHIReg = ValueMap[PN];
       assert(PHIReg && "PHI node does not have an assigned virtual register!");
@@ -309,17 +438,26 @@
 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
 /// the correctly promoted or expanded types.  Assign these registers
 /// consecutive vreg numbers and return the first assigned number.
+///
+/// In the case that the given value has struct or array type, this function
+/// will assign registers for each member or element.
+///
 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
-  MVT::ValueType VT = TLI.getValueType(V->getType());
-  
-  unsigned NumRegisters = TLI.getNumRegisters(VT);
-  MVT::ValueType RegisterVT = TLI.getRegisterType(VT);
+  SmallVector<MVT, 4> ValueVTs;
+  ComputeValueVTs(TLI, V->getType(), ValueVTs);
 
-  unsigned R = MakeReg(RegisterVT);
-  for (unsigned i = 1; i != NumRegisters; ++i)
-    MakeReg(RegisterVT);
+  unsigned FirstReg = 0;
+  for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
+    MVT ValueVT = ValueVTs[Value];
+    MVT RegisterVT = TLI.getRegisterType(ValueVT);
 
-  return R;
+    unsigned NumRegs = TLI.getNumRegisters(ValueVT);
+    for (unsigned i = 0; i != NumRegs; ++i) {
+      unsigned R = MakeReg(RegisterVT);
+      if (!FirstReg) FirstReg = R;
+    }
+  }
+  return FirstReg;
 }
 
 //===----------------------------------------------------------------------===//
@@ -337,7 +475,7 @@
   /// them up and then emit token factor nodes when possible.  This allows us to
   /// get simple disambiguation between loads without worrying about alias
   /// analysis.
-  std::vector<SDOperand> PendingLoads;
+  SmallVector<SDOperand, 8> PendingLoads;
 
   /// PendingExports - CopyToReg nodes that copy values to virtual registers
   /// for export to other blocks need to be emitted before any terminator
@@ -521,10 +659,6 @@
 
   void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
 
-  SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
-                        const Value *SV, SDOperand Root,
-                        bool isVolatile, unsigned Alignment);
-
   SDOperand getValue(const Value *V);
 
   void setValue(const Value *V, SDOperand NewN) {
@@ -610,6 +744,8 @@
   void visitAShr(User &I) { visitShift(I, ISD::SRA); }
   void visitICmp(User &I);
   void visitFCmp(User &I);
+  void visitVICmp(User &I);
+  void visitVFCmp(User &I);
   // Visit the conversion instructions
   void visitTrunc(User &I);
   void visitZExt(User &I);
@@ -628,6 +764,9 @@
   void visitInsertElement(User &I);
   void visitShuffleVector(User &I);
 
+  void visitExtractValue(ExtractValueInst &I);
+  void visitInsertValue(InsertValueInst &I);
+
   void visitGetElementPtr(User &I);
   void visitSelect(User &I);
 
@@ -657,6 +796,10 @@
     assert(0 && "UserOp2 should not exist at instruction selection time!");
     abort();
   }
+  
+private:
+  inline const char *implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op);
+
 };
 } // end namespace llvm
 
@@ -669,8 +812,8 @@
 static SDOperand getCopyFromParts(SelectionDAG &DAG,
                                   const SDOperand *Parts,
                                   unsigned NumParts,
-                                  MVT::ValueType PartVT,
-                                  MVT::ValueType ValueVT,
+                                  MVT PartVT,
+                                  MVT ValueVT,
                                   ISD::NodeType AssertOp = ISD::DELETED_NODE) {
   assert(NumParts > 0 && "No parts to assemble!");
   TargetLowering &TLI = DAG.getTargetLoweringInfo();
@@ -678,20 +821,20 @@
 
   if (NumParts > 1) {
     // Assemble the value from multiple parts.
-    if (!MVT::isVector(ValueVT)) {
-      unsigned PartBits = MVT::getSizeInBits(PartVT);
-      unsigned ValueBits = MVT::getSizeInBits(ValueVT);
+    if (!ValueVT.isVector()) {
+      unsigned PartBits = PartVT.getSizeInBits();
+      unsigned ValueBits = ValueVT.getSizeInBits();
 
       // Assemble the power of 2 part.
       unsigned RoundParts = NumParts & (NumParts - 1) ?
         1 << Log2_32(NumParts) : NumParts;
       unsigned RoundBits = PartBits * RoundParts;
-      MVT::ValueType RoundVT = RoundBits == ValueBits ?
-        ValueVT : MVT::getIntegerType(RoundBits);
+      MVT RoundVT = RoundBits == ValueBits ?
+        ValueVT : MVT::getIntegerVT(RoundBits);
       SDOperand Lo, Hi;
 
       if (RoundParts > 2) {
-        MVT::ValueType HalfVT = MVT::getIntegerType(RoundBits/2);
+        MVT HalfVT = MVT::getIntegerVT(RoundBits/2);
         Lo = getCopyFromParts(DAG, Parts, RoundParts/2, PartVT, HalfVT);
         Hi = getCopyFromParts(DAG, Parts+RoundParts/2, RoundParts/2,
                               PartVT, HalfVT);
@@ -706,30 +849,30 @@
       if (RoundParts < NumParts) {
         // Assemble the trailing non-power-of-2 part.
         unsigned OddParts = NumParts - RoundParts;
-        MVT::ValueType OddVT = MVT::getIntegerType(OddParts * PartBits);
+        MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
         Hi = getCopyFromParts(DAG, Parts+RoundParts, OddParts, PartVT, OddVT);
 
         // Combine the round and odd parts.
         Lo = Val;
         if (TLI.isBigEndian())
           std::swap(Lo, Hi);
-        MVT::ValueType TotalVT = MVT::getIntegerType(NumParts * PartBits);
+        MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
         Hi = DAG.getNode(ISD::ANY_EXTEND, TotalVT, Hi);
         Hi = DAG.getNode(ISD::SHL, TotalVT, Hi,
-                         DAG.getConstant(MVT::getSizeInBits(Lo.getValueType()),
+                         DAG.getConstant(Lo.getValueType().getSizeInBits(),
                                          TLI.getShiftAmountTy()));
         Lo = DAG.getNode(ISD::ZERO_EXTEND, TotalVT, Lo);
         Val = DAG.getNode(ISD::OR, TotalVT, Lo, Hi);
       }
     } else {
       // Handle a multi-element vector.
-      MVT::ValueType IntermediateVT, RegisterVT;
+      MVT IntermediateVT, RegisterVT;
       unsigned NumIntermediates;
       unsigned NumRegs =
         TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
                                    RegisterVT);
-
       assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
+      NumParts = NumRegs; // Silence a compiler warning.
       assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
       assert(RegisterVT == Parts[0].getValueType() &&
              "Part type doesn't match part!");
@@ -755,7 +898,7 @@
 
       // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
       // operands.
-      Val = DAG.getNode(MVT::isVector(IntermediateVT) ?
+      Val = DAG.getNode(IntermediateVT.isVector() ?
                         ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR,
                         ValueVT, &Ops[0], NumIntermediates);
     }
@@ -767,21 +910,21 @@
   if (PartVT == ValueVT)
     return Val;
 
-  if (MVT::isVector(PartVT)) {
-    assert(MVT::isVector(ValueVT) && "Unknown vector conversion!");
+  if (PartVT.isVector()) {
+    assert(ValueVT.isVector() && "Unknown vector conversion!");
     return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val);
   }
 
-  if (MVT::isVector(ValueVT)) {
-    assert(MVT::getVectorElementType(ValueVT) == PartVT &&
-           MVT::getVectorNumElements(ValueVT) == 1 &&
+  if (ValueVT.isVector()) {
+    assert(ValueVT.getVectorElementType() == PartVT &&
+           ValueVT.getVectorNumElements() == 1 &&
            "Only trivial scalar-to-vector conversions should get here!");
     return DAG.getNode(ISD::BUILD_VECTOR, ValueVT, Val);
   }
 
-  if (MVT::isInteger(PartVT) &&
-      MVT::isInteger(ValueVT)) {
-    if (MVT::getSizeInBits(ValueVT) < MVT::getSizeInBits(PartVT)) {
+  if (PartVT.isInteger() &&
+      ValueVT.isInteger()) {
+    if (ValueVT.bitsLT(PartVT)) {
       // For a truncate, see if we have any information to
       // indicate whether the truncated bits will always be
       // zero or sign-extension.
@@ -794,15 +937,15 @@
     }
   }
 
-  if (MVT::isFloatingPoint(PartVT) && MVT::isFloatingPoint(ValueVT)) {
-    if (ValueVT < Val.getValueType())
+  if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
+    if (ValueVT.bitsLT(Val.getValueType()))
       // FP_ROUND's are always exact here.
       return DAG.getNode(ISD::FP_ROUND, ValueVT, Val,
                          DAG.getIntPtrConstant(1));
     return DAG.getNode(ISD::FP_EXTEND, ValueVT, Val);
   }
 
-  if (MVT::getSizeInBits(PartVT) == MVT::getSizeInBits(ValueVT))
+  if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
     return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val);
 
   assert(0 && "Unknown mismatch!");
@@ -816,43 +959,43 @@
                            SDOperand Val,
                            SDOperand *Parts,
                            unsigned NumParts,
-                           MVT::ValueType PartVT,
+                           MVT PartVT,
                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
   TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  MVT::ValueType PtrVT = TLI.getPointerTy();
-  MVT::ValueType ValueVT = Val.getValueType();
-  unsigned PartBits = MVT::getSizeInBits(PartVT);
+  MVT PtrVT = TLI.getPointerTy();
+  MVT ValueVT = Val.getValueType();
+  unsigned PartBits = PartVT.getSizeInBits();
   assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
 
   if (!NumParts)
     return;
 
-  if (!MVT::isVector(ValueVT)) {
+  if (!ValueVT.isVector()) {
     if (PartVT == ValueVT) {
       assert(NumParts == 1 && "No-op copy with multiple parts!");
       Parts[0] = Val;
       return;
     }
 
-    if (NumParts * PartBits > MVT::getSizeInBits(ValueVT)) {
+    if (NumParts * PartBits > ValueVT.getSizeInBits()) {
       // If the parts cover more bits than the value has, promote the value.
-      if (MVT::isFloatingPoint(PartVT) && MVT::isFloatingPoint(ValueVT)) {
+      if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
         assert(NumParts == 1 && "Do not know what to promote to!");
         Val = DAG.getNode(ISD::FP_EXTEND, PartVT, Val);
-      } else if (MVT::isInteger(PartVT) && MVT::isInteger(ValueVT)) {
-        ValueVT = MVT::getIntegerType(NumParts * PartBits);
+      } else if (PartVT.isInteger() && ValueVT.isInteger()) {
+        ValueVT = MVT::getIntegerVT(NumParts * PartBits);
         Val = DAG.getNode(ExtendKind, ValueVT, Val);
       } else {
         assert(0 && "Unknown mismatch!");
       }
-    } else if (PartBits == MVT::getSizeInBits(ValueVT)) {
+    } else if (PartBits == ValueVT.getSizeInBits()) {
       // Different types of the same size.
       assert(NumParts == 1 && PartVT != ValueVT);
       Val = DAG.getNode(ISD::BIT_CONVERT, PartVT, Val);
-    } else if (NumParts * PartBits < MVT::getSizeInBits(ValueVT)) {
+    } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
       // If the parts cover less bits than value has, truncate the value.
-      if (MVT::isInteger(PartVT) && MVT::isInteger(ValueVT)) {
-        ValueVT = MVT::getIntegerType(NumParts * PartBits);
+      if (PartVT.isInteger() && ValueVT.isInteger()) {
+        ValueVT = MVT::getIntegerVT(NumParts * PartBits);
         Val = DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
       } else {
         assert(0 && "Unknown mismatch!");
@@ -861,7 +1004,7 @@
 
     // The value may have changed - recompute ValueVT.
     ValueVT = Val.getValueType();
-    assert(NumParts * PartBits == MVT::getSizeInBits(ValueVT) &&
+    assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
            "Failed to tile the value with PartVT!");
 
     if (NumParts == 1) {
@@ -873,7 +1016,7 @@
     // Expand the value into multiple parts.
     if (NumParts & (NumParts - 1)) {
       // The number of parts is not a power of 2.  Split off and copy the tail.
-      assert(MVT::isInteger(PartVT) && MVT::isInteger(ValueVT) &&
+      assert(PartVT.isInteger() && ValueVT.isInteger() &&
              "Do not know what to expand to!");
       unsigned RoundParts = 1 << Log2_32(NumParts);
       unsigned RoundBits = RoundParts * PartBits;
@@ -886,19 +1029,19 @@
         // The odd parts were reversed by getCopyToParts - unreverse them.
         std::reverse(Parts + RoundParts, Parts + NumParts);
       NumParts = RoundParts;
-      ValueVT = MVT::getIntegerType(NumParts * PartBits);
+      ValueVT = MVT::getIntegerVT(NumParts * PartBits);
       Val = DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
     }
 
     // The number of parts is a power of 2.  Repeatedly bisect the value using
     // EXTRACT_ELEMENT.
     Parts[0] = DAG.getNode(ISD::BIT_CONVERT,
-                           MVT::getIntegerType(MVT::getSizeInBits(ValueVT)),
+                           MVT::getIntegerVT(ValueVT.getSizeInBits()),
                            Val);
     for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
       for (unsigned i = 0; i < NumParts; i += StepSize) {
         unsigned ThisBits = StepSize * PartBits / 2;
-        MVT::ValueType ThisVT = MVT::getIntegerType (ThisBits);
+        MVT ThisVT = MVT::getIntegerVT (ThisBits);
         SDOperand &Part0 = Parts[i];
         SDOperand &Part1 = Parts[i+StepSize/2];
 
@@ -923,11 +1066,11 @@
   // Vector ValueVT.
   if (NumParts == 1) {
     if (PartVT != ValueVT) {
-      if (MVT::isVector(PartVT)) {
+      if (PartVT.isVector()) {
         Val = DAG.getNode(ISD::BIT_CONVERT, PartVT, Val);
       } else {
-        assert(MVT::getVectorElementType(ValueVT) == PartVT &&
-               MVT::getVectorNumElements(ValueVT) == 1 &&
+        assert(ValueVT.getVectorElementType() == PartVT &&
+               ValueVT.getVectorNumElements() == 1 &&
                "Only trivial vector-to-scalar conversions should get here!");
         Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, PartVT, Val,
                           DAG.getConstant(0, PtrVT));
@@ -939,21 +1082,22 @@
   }
 
   // Handle a multi-element vector.
-  MVT::ValueType IntermediateVT, RegisterVT;
+  MVT IntermediateVT, RegisterVT;
   unsigned NumIntermediates;
   unsigned NumRegs =
     DAG.getTargetLoweringInfo()
       .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
                               RegisterVT);
-  unsigned NumElements = MVT::getVectorNumElements(ValueVT);
+  unsigned NumElements = ValueVT.getVectorNumElements();
 
   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
+  NumParts = NumRegs; // Silence a compiler warning.
   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
 
   // Split the vector into intermediate operands.
   SmallVector<SDOperand, 8> Ops(NumIntermediates);
   for (unsigned i = 0; i != NumIntermediates; ++i)
-    if (MVT::isVector(IntermediateVT))
+    if (IntermediateVT.isVector())
       Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR,
                            IntermediateVT, Val,
                            DAG.getConstant(i * (NumElements / NumIntermediates),
@@ -985,70 +1129,114 @@
   SDOperand &N = NodeMap[V];
   if (N.Val) return N;
   
-  const Type *VTy = V->getType();
-  MVT::ValueType VT = TLI.getValueType(VTy);
   if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
+    MVT VT = TLI.getValueType(V->getType(), true);
+    
+    if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+      return N = DAG.getConstant(CI->getValue(), VT);
+
+    if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
+      return N = DAG.getGlobalAddress(GV, VT);
+    
+    if (isa<ConstantPointerNull>(C))
+      return N = DAG.getConstant(0, TLI.getPointerTy());
+    
+    if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
+      return N = DAG.getConstantFP(CFP->getValueAPF(), VT);
+    
+    if (isa<UndefValue>(C) && !isa<VectorType>(V->getType()) &&
+        !V->getType()->isAggregateType())
+      return N = DAG.getNode(ISD::UNDEF, VT);
+
     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
       visit(CE->getOpcode(), *CE);
       SDOperand N1 = NodeMap[V];
       assert(N1.Val && "visit didn't populate the ValueMap!");
       return N1;
-    } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
-      return N = DAG.getGlobalAddress(GV, VT);
-    } else if (isa<ConstantPointerNull>(C)) {
-      return N = DAG.getConstant(0, TLI.getPointerTy());
-    } else if (isa<UndefValue>(C)) {
-      if (!isa<VectorType>(VTy))
-        return N = DAG.getNode(ISD::UNDEF, VT);
-
-      // Create a BUILD_VECTOR of undef nodes.
-      const VectorType *PTy = cast<VectorType>(VTy);
-      unsigned NumElements = PTy->getNumElements();
-      MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
+    }
+    
+    if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
+      SmallVector<SDOperand, 4> Constants;
+      for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
+           OI != OE; ++OI) {
+        SDNode *Val = getValue(*OI).Val;
+        for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
+          Constants.push_back(SDOperand(Val, i));
+      }
+      return DAG.getMergeValues(&Constants[0], Constants.size());
+    }
+
+    if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
+      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
+             "Unknown array constant!");
+      unsigned NumElts = ATy->getNumElements();
+      if (NumElts == 0)
+        return SDOperand(); // empty array
+      MVT EltVT = TLI.getValueType(ATy->getElementType());
+      SmallVector<SDOperand, 4> Constants(NumElts);
+      for (unsigned i = 0, e = NumElts; i != e; ++i) {
+        if (isa<UndefValue>(C))
+          Constants[i] = DAG.getNode(ISD::UNDEF, EltVT);
+        else if (EltVT.isFloatingPoint())
+          Constants[i] = DAG.getConstantFP(0, EltVT);
+        else
+          Constants[i] = DAG.getConstant(0, EltVT);
+      }
+      return DAG.getMergeValues(&Constants[0], Constants.size());
+    }
 
-      SmallVector<SDOperand, 8> Ops;
-      Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
-      
-      // Create a VConstant node with generic Vector type.
-      MVT::ValueType VT = MVT::getVectorType(PVT, NumElements);
-      return N = DAG.getNode(ISD::BUILD_VECTOR, VT,
-                             &Ops[0], Ops.size());
-    } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
-      return N = DAG.getConstantFP(CFP->getValueAPF(), VT);
-    } else if (const VectorType *PTy = dyn_cast<VectorType>(VTy)) {
-      unsigned NumElements = PTy->getNumElements();
-      MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
-      
-      // Now that we know the number and type of the elements, push a
-      // Constant or ConstantFP node onto the ops list for each element of
-      // the vector constant.
-      SmallVector<SDOperand, 8> Ops;
-      if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
-        for (unsigned i = 0; i != NumElements; ++i)
-          Ops.push_back(getValue(CP->getOperand(i)));
-      } else {
-        assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
-        SDOperand Op;
-        if (MVT::isFloatingPoint(PVT))
-          Op = DAG.getConstantFP(0, PVT);
+    if (const StructType *STy = dyn_cast<StructType>(C->getType())) {
+      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
+             "Unknown struct constant!");
+      unsigned NumElts = STy->getNumElements();
+      if (NumElts == 0)
+        return SDOperand(); // empty struct
+      SmallVector<SDOperand, 4> Constants(NumElts);
+      for (unsigned i = 0, e = NumElts; i != e; ++i) {
+        MVT EltVT = TLI.getValueType(STy->getElementType(i));
+        if (isa<UndefValue>(C))
+          Constants[i] = DAG.getNode(ISD::UNDEF, EltVT);
+        else if (EltVT.isFloatingPoint())
+          Constants[i] = DAG.getConstantFP(0, EltVT);
         else
-          Op = DAG.getConstant(0, PVT);
-        Ops.assign(NumElements, Op);
+          Constants[i] = DAG.getConstant(0, EltVT);
       }
-      
-      // Create a BUILD_VECTOR node.
-      MVT::ValueType VT = MVT::getVectorType(PVT, NumElements);
-      return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0],
-                                      Ops.size());
+      return DAG.getMergeValues(&Constants[0], Constants.size());
+    }
+
+    const VectorType *VecTy = cast<VectorType>(V->getType());
+    unsigned NumElements = VecTy->getNumElements();
+    
+    // Now that we know the number and type of the elements, get that number of
+    // elements into the Ops array based on what kind of constant it is.
+    SmallVector<SDOperand, 16> Ops;
+    if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
+      for (unsigned i = 0; i != NumElements; ++i)
+        Ops.push_back(getValue(CP->getOperand(i)));
     } else {
-      // Canonicalize all constant ints to be unsigned.
-      return N = DAG.getConstant(cast<ConstantInt>(C)->getValue(),VT);
+      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
+             "Unknown vector constant!");
+      MVT EltVT = TLI.getValueType(VecTy->getElementType());
+
+      SDOperand Op;
+      if (isa<UndefValue>(C))
+        Op = DAG.getNode(ISD::UNDEF, EltVT);
+      else if (EltVT.isFloatingPoint())
+        Op = DAG.getConstantFP(0, EltVT);
+      else
+        Op = DAG.getConstant(0, EltVT);
+      Ops.assign(NumElements, Op);
     }
+    
+    // Create a BUILD_VECTOR node.
+    return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
   }
       
+  // If this is a static alloca, generate it as the frameindex instead of
+  // computation.
   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
     std::map<const AllocaInst*, int>::iterator SI =
-    FuncInfo.StaticAllocaMap.find(AI);
+      FuncInfo.StaticAllocaMap.find(AI);
     if (SI != FuncInfo.StaticAllocaMap.end())
       return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
   }
@@ -1056,16 +1244,8 @@
   unsigned InReg = FuncInfo.ValueMap[V];
   assert(InReg && "Value not in map!");
   
-  MVT::ValueType RegisterVT = TLI.getRegisterType(VT);
-  unsigned NumRegs = TLI.getNumRegisters(VT);
-
-  std::vector<unsigned> Regs(NumRegs);
-  for (unsigned i = 0; i != NumRegs; ++i)
-    Regs[i] = InReg + i;
-
-  RegsForValue RFV(Regs, RegisterVT, VT);
+  RegsForValue RFV(TLI, InReg, V->getType());
   SDOperand Chain = DAG.getEntryNode();
-
   return RFV.getCopyFromRegs(DAG, Chain, NULL);
 }
 
@@ -1075,36 +1255,43 @@
     DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getControlRoot()));
     return;
   }
+  
   SmallVector<SDOperand, 8> NewValues;
   NewValues.push_back(getControlRoot());
   for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {  
     SDOperand RetOp = getValue(I.getOperand(i));
-    MVT::ValueType VT = RetOp.getValueType();
 
-    // FIXME: C calling convention requires the return type to be promoted to
-    // at least 32-bit. But this is not necessary for non-C calling conventions.
-    if (MVT::isInteger(VT)) {
-      MVT::ValueType MinVT = TLI.getRegisterType(MVT::i32);
-      if (MVT::getSizeInBits(VT) < MVT::getSizeInBits(MinVT))
-        VT = MinVT;
-    }
-
-    unsigned NumParts = TLI.getNumRegisters(VT);
-    MVT::ValueType PartVT = TLI.getRegisterType(VT);
-    SmallVector<SDOperand, 4> Parts(NumParts);
-    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
-
-    const Function *F = I.getParent()->getParent();
-    if (F->paramHasAttr(0, ParamAttr::SExt))
-      ExtendKind = ISD::SIGN_EXTEND;
-    else if (F->paramHasAttr(0, ParamAttr::ZExt))
-      ExtendKind = ISD::ZERO_EXTEND;
-
-    getCopyToParts(DAG, RetOp, &Parts[0], NumParts, PartVT, ExtendKind);
-
-    for (unsigned i = 0; i < NumParts; ++i) {
-      NewValues.push_back(Parts[i]);
-      NewValues.push_back(DAG.getArgFlags(ISD::ArgFlagsTy()));
+    SmallVector<MVT, 4> ValueVTs;
+    ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
+    for (unsigned j = 0, f = ValueVTs.size(); j != f; ++j) {
+      MVT VT = ValueVTs[j];
+
+      // FIXME: C calling convention requires the return type to be promoted to
+      // at least 32-bit. But this is not necessary for non-C calling conventions.
+      if (VT.isInteger()) {
+        MVT MinVT = TLI.getRegisterType(MVT::i32);
+        if (VT.bitsLT(MinVT))
+          VT = MinVT;
+      }
+
+      unsigned NumParts = TLI.getNumRegisters(VT);
+      MVT PartVT = TLI.getRegisterType(VT);
+      SmallVector<SDOperand, 4> Parts(NumParts);
+      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+  
+      const Function *F = I.getParent()->getParent();
+      if (F->paramHasAttr(0, ParamAttr::SExt))
+        ExtendKind = ISD::SIGN_EXTEND;
+      else if (F->paramHasAttr(0, ParamAttr::ZExt))
+        ExtendKind = ISD::ZERO_EXTEND;
+
+      getCopyToParts(DAG, SDOperand(RetOp.Val, RetOp.ResNo + j),
+                     &Parts[0], NumParts, PartVT, ExtendKind);
+
+      for (unsigned i = 0; i < NumParts; ++i) {
+        NewValues.push_back(Parts[i]);
+        NewValues.push_back(DAG.getArgFlags(ISD::ArgFlagsTy()));
+      }
     }
   }
   DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other,
@@ -1210,8 +1397,8 @@
         case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
         case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
         case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
-        case FCmpInst::FCMP_ORD:   FOC = ISD::SETEQ; FPC = ISD::SETO;   break;
-        case FCmpInst::FCMP_UNO:   FOC = ISD::SETNE; FPC = ISD::SETUO;  break;
+        case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
+        case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
         case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
         case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
         case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
@@ -1311,13 +1498,13 @@
     NextBlock = BBI;
 
   if (I.isUnconditional()) {
+    // Update machine-CFG edges.
+    CurMBB->addSuccessor(Succ0MBB);
+    
     // If this is not a fall-through branch, emit the branch.
     if (Succ0MBB != NextBlock)
       DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(),
                               DAG.getBasicBlock(Succ0MBB)));
-
-    // Update machine-CFG edges.
-    CurMBB->addSuccessor(Succ0MBB);
     return;
   }
 
@@ -1405,7 +1592,7 @@
     uint64_t High  = cast<ConstantInt>(CB.CmpRHS)->getSExtValue();
 
     SDOperand CmpOp = getValue(CB.CmpMHS);
-    MVT::ValueType VT = CmpOp.getValueType();
+    MVT VT = CmpOp.getValueType();
 
     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
       Cond = DAG.getSetCC(MVT::i1, CmpOp, DAG.getConstant(High, VT), ISD::SETLE);
@@ -1414,9 +1601,12 @@
       Cond = DAG.getSetCC(MVT::i1, SUB,
                           DAG.getConstant(High-Low, VT), ISD::SETULE);
     }
-    
   }
   
+  // Update successor info
+  CurMBB->addSuccessor(CB.TrueBB);
+  CurMBB->addSuccessor(CB.FalseBB);
+  
   // Set NextBlock to be the MBB immediately after the current one, if any.
   // This is used to avoid emitting unnecessary branches to the next block.
   MachineBasicBlock *NextBlock = 0;
@@ -1438,16 +1628,13 @@
   else
     DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond, 
                             DAG.getBasicBlock(CB.FalseBB)));
-  // Update successor info
-  CurMBB->addSuccessor(CB.TrueBB);
-  CurMBB->addSuccessor(CB.FalseBB);
 }
 
 /// visitJumpTable - Emit JumpTable node in the current MBB
 void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) {
   // Emit the code for the jump table
   assert(JT.Reg != -1U && "Should lower JT Header first!");
-  MVT::ValueType PTy = TLI.getPointerTy();
+  MVT PTy = TLI.getPointerTy();
   SDOperand Index = DAG.getCopyFromReg(getControlRoot(), JT.Reg, PTy);
   SDOperand Table = DAG.getJumpTable(JT.JTI, PTy);
   DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1),
@@ -1463,7 +1650,7 @@
   // and conditional branch to default mbb if the result is greater than the
   // difference between smallest and largest cases.
   SDOperand SwitchOp = getValue(JTH.SValue);
-  MVT::ValueType VT = SwitchOp.getValueType();
+  MVT VT = SwitchOp.getValueType();
   SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
                               DAG.getConstant(JTH.First, VT));
   
@@ -1472,7 +1659,7 @@
   // register so it can be used as an index into the jump table in a 
   // subsequent basic block.  This value may be smaller or larger than the
   // target's pointer type, and therefore require extension or truncating.
-  if (MVT::getSizeInBits(VT) > MVT::getSizeInBits(TLI.getPointerTy()))
+  if (VT.bitsGT(TLI.getPointerTy()))
     SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
   else
     SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
@@ -1512,7 +1699,7 @@
 void SelectionDAGLowering::visitBitTestHeader(SelectionDAGISel::BitTestBlock &B) {
   // Subtract the minimum value
   SDOperand SwitchOp = getValue(B.SValue);
-  MVT::ValueType VT = SwitchOp.getValueType();
+  MVT VT = SwitchOp.getValueType();
   SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
                               DAG.getConstant(B.First, VT));
 
@@ -1522,7 +1709,7 @@
                                     ISD::SETUGT);
 
   SDOperand ShiftOp;
-  if (MVT::getSizeInBits(VT) > MVT::getSizeInBits(TLI.getShiftAmountTy()))
+  if (VT.bitsGT(TLI.getShiftAmountTy()))
     ShiftOp = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), SUB);
   else
     ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getShiftAmountTy(), SUB);
@@ -1536,9 +1723,6 @@
   SDOperand CopyTo = DAG.getCopyToReg(getControlRoot(), SwitchReg, SwitchVal);
   B.Reg = SwitchReg;
 
-  SDOperand BrRange = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, RangeCmp,
-                                  DAG.getBasicBlock(B.Default));
-
   // Set NextBlock to be the MBB immediately after the current one, if any.
   // This is used to avoid emitting unnecessary branches to the next block.
   MachineBasicBlock *NextBlock = 0;
@@ -1547,15 +1731,19 @@
     NextBlock = BBI;
 
   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
+
+  CurMBB->addSuccessor(B.Default);
+  CurMBB->addSuccessor(MBB);
+
+  SDOperand BrRange = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, RangeCmp,
+                                  DAG.getBasicBlock(B.Default));
+  
   if (MBB == NextBlock)
     DAG.setRoot(BrRange);
   else
     DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, CopyTo,
                             DAG.getBasicBlock(MBB)));
 
-  CurMBB->addSuccessor(B.Default);
-  CurMBB->addSuccessor(MBB);
-
   return;
 }
 
@@ -1564,15 +1752,18 @@
                                             unsigned Reg,
                                             SelectionDAGISel::BitTestCase &B) {
   // Emit bit tests and jumps
-  SDOperand SwitchVal = DAG.getCopyFromReg(getControlRoot(), Reg, TLI.getPointerTy());
+  SDOperand SwitchVal = DAG.getCopyFromReg(getControlRoot(), Reg, 
+                                           TLI.getPointerTy());
   
-  SDOperand AndOp = DAG.getNode(ISD::AND, TLI.getPointerTy(),
-                                SwitchVal,
-                                DAG.getConstant(B.Mask,
-                                                TLI.getPointerTy()));
+  SDOperand AndOp = DAG.getNode(ISD::AND, TLI.getPointerTy(), SwitchVal,
+                                DAG.getConstant(B.Mask, TLI.getPointerTy()));
   SDOperand AndCmp = DAG.getSetCC(TLI.getSetCCResultType(AndOp), AndOp,
                                   DAG.getConstant(0, TLI.getPointerTy()),
                                   ISD::SETNE);
+
+  CurMBB->addSuccessor(B.TargetBB);
+  CurMBB->addSuccessor(NextMBB);
+  
   SDOperand BrAnd = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(),
                                 AndCmp, DAG.getBasicBlock(B.TargetBB));
 
@@ -1589,9 +1780,6 @@
     DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrAnd,
                             DAG.getBasicBlock(NextMBB)));
 
-  CurMBB->addSuccessor(B.TargetBB);
-  CurMBB->addSuccessor(NextMBB);
-
   return;
 }
 
@@ -1613,13 +1801,13 @@
       CopyValueToVirtualRegister(&I, VMI->second);
   }
 
-  // Drop into normal successor.
-  DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(),
-                          DAG.getBasicBlock(Return)));
-
   // Update successor info
   CurMBB->addSuccessor(Return);
   CurMBB->addSuccessor(LandingPad);
+
+  // Drop into normal successor.
+  DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(),
+                          DAG.getBasicBlock(Return)));
 }
 
 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
@@ -1935,7 +2123,7 @@
                                                     CaseRecVector& WorkList,
                                                     Value* SV,
                                                     MachineBasicBlock* Default){
-  unsigned IntPtrBits = MVT::getSizeInBits(TLI.getPointerTy());
+  unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits();
 
   Case& FrontCase = *CR.Range.first;
   Case& BackCase  = *(CR.Range.second-1);
@@ -2108,11 +2296,11 @@
     // Update machine-CFG edges.
 
     // If this is not a fall-through branch, emit the branch.
+    CurMBB->addSuccessor(Default);
     if (Default != NextBlock)
       DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(),
                               DAG.getBasicBlock(Default)));
-
-    CurMBB->addSuccessor(Default);
+    
     return;
   }
   
@@ -2201,10 +2389,9 @@
   SDOperand Op1 = getValue(I.getOperand(0));
   SDOperand Op2 = getValue(I.getOperand(1));
   
-  if (MVT::getSizeInBits(TLI.getShiftAmountTy()) <
-      MVT::getSizeInBits(Op2.getValueType()))
+  if (TLI.getShiftAmountTy().bitsLT(Op2.getValueType()))
     Op2 = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), Op2);
-  else if (TLI.getShiftAmountTy() > Op2.getValueType())
+  else if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
     Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
   
   setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
@@ -2255,8 +2442,8 @@
     case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
     case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
     case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
-    case FCmpInst::FCMP_ORD:   FOC = ISD::SETEQ; FPC = ISD::SETO;   break;
-    case FCmpInst::FCMP_UNO:   FOC = ISD::SETNE; FPC = ISD::SETUO;  break;
+    case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
+    case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
     case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
     case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
     case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
@@ -2276,6 +2463,75 @@
   setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition));
 }
 
+void SelectionDAGLowering::visitVICmp(User &I) {
+  ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
+  if (VICmpInst *IC = dyn_cast<VICmpInst>(&I))
+    predicate = IC->getPredicate();
+  else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
+    predicate = ICmpInst::Predicate(IC->getPredicate());
+  SDOperand Op1 = getValue(I.getOperand(0));
+  SDOperand Op2 = getValue(I.getOperand(1));
+  ISD::CondCode Opcode;
+  switch (predicate) {
+    case ICmpInst::ICMP_EQ  : Opcode = ISD::SETEQ; break;
+    case ICmpInst::ICMP_NE  : Opcode = ISD::SETNE; break;
+    case ICmpInst::ICMP_UGT : Opcode = ISD::SETUGT; break;
+    case ICmpInst::ICMP_UGE : Opcode = ISD::SETUGE; break;
+    case ICmpInst::ICMP_ULT : Opcode = ISD::SETULT; break;
+    case ICmpInst::ICMP_ULE : Opcode = ISD::SETULE; break;
+    case ICmpInst::ICMP_SGT : Opcode = ISD::SETGT; break;
+    case ICmpInst::ICMP_SGE : Opcode = ISD::SETGE; break;
+    case ICmpInst::ICMP_SLT : Opcode = ISD::SETLT; break;
+    case ICmpInst::ICMP_SLE : Opcode = ISD::SETLE; break;
+    default:
+      assert(!"Invalid ICmp predicate value");
+      Opcode = ISD::SETEQ;
+      break;
+  }
+  setValue(&I, DAG.getVSetCC(Op1.getValueType(), Op1, Op2, Opcode));
+}
+
+void SelectionDAGLowering::visitVFCmp(User &I) {
+  FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
+  if (VFCmpInst *FC = dyn_cast<VFCmpInst>(&I))
+    predicate = FC->getPredicate();
+  else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
+    predicate = FCmpInst::Predicate(FC->getPredicate());
+  SDOperand Op1 = getValue(I.getOperand(0));
+  SDOperand Op2 = getValue(I.getOperand(1));
+  ISD::CondCode Condition, FOC, FPC;
+  switch (predicate) {
+    case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
+    case FCmpInst::FCMP_OEQ:   FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
+    case FCmpInst::FCMP_OGT:   FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
+    case FCmpInst::FCMP_OGE:   FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
+    case FCmpInst::FCMP_OLT:   FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
+    case FCmpInst::FCMP_OLE:   FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
+    case FCmpInst::FCMP_ONE:   FOC = ISD::SETNE; FPC = ISD::SETONE; break;
+    case FCmpInst::FCMP_ORD:   FOC = FPC = ISD::SETO;   break;
+    case FCmpInst::FCMP_UNO:   FOC = FPC = ISD::SETUO;  break;
+    case FCmpInst::FCMP_UEQ:   FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
+    case FCmpInst::FCMP_UGT:   FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
+    case FCmpInst::FCMP_UGE:   FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
+    case FCmpInst::FCMP_ULT:   FOC = ISD::SETLT; FPC = ISD::SETULT; break;
+    case FCmpInst::FCMP_ULE:   FOC = ISD::SETLE; FPC = ISD::SETULE; break;
+    case FCmpInst::FCMP_UNE:   FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
+    case FCmpInst::FCMP_TRUE:  FOC = FPC = ISD::SETTRUE; break;
+    default:
+      assert(!"Invalid VFCmp predicate value");
+      FOC = FPC = ISD::SETFALSE;
+      break;
+  }
+  if (FiniteOnlyFPMath())
+    Condition = FOC;
+  else 
+    Condition = FPC;
+    
+  MVT DestVT = TLI.getValueType(I.getType());
+    
+  setValue(&I, DAG.getVSetCC(DestVT, Op1, Op2, Condition));
+}
+
 void SelectionDAGLowering::visitSelect(User &I) {
   SDOperand Cond     = getValue(I.getOperand(0));
   SDOperand TrueVal  = getValue(I.getOperand(1));
@@ -2288,7 +2544,7 @@
 void SelectionDAGLowering::visitTrunc(User &I) {
   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
 }
 
@@ -2296,7 +2552,7 @@
   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
 }
 
@@ -2304,49 +2560,49 @@
   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
   // SExt also can't be a cast to bool for same reason. So, nothing much to do
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
 }
 
 void SelectionDAGLowering::visitFPTrunc(User &I) {
   // FPTrunc is never a no-op cast, no need to check
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N, DAG.getIntPtrConstant(0)));
 }
 
 void SelectionDAGLowering::visitFPExt(User &I){ 
   // FPTrunc is never a no-op cast, no need to check
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
 }
 
 void SelectionDAGLowering::visitFPToUI(User &I) { 
   // FPToUI is never a no-op cast, no need to check
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
 }
 
 void SelectionDAGLowering::visitFPToSI(User &I) {
   // FPToSI is never a no-op cast, no need to check
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
 }
 
 void SelectionDAGLowering::visitUIToFP(User &I) { 
   // UIToFP is never a no-op cast, no need to check
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
 }
 
 void SelectionDAGLowering::visitSIToFP(User &I){ 
   // UIToFP is never a no-op cast, no need to check
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
 }
 
@@ -2354,10 +2610,10 @@
   // What to do depends on the size of the integer and the size of the pointer.
   // We can either truncate, zero extend, or no-op, accordingly.
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType SrcVT = N.getValueType();
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT SrcVT = N.getValueType();
+  MVT DestVT = TLI.getValueType(I.getType());
   SDOperand Result;
-  if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT))
+  if (DestVT.bitsLT(SrcVT))
     Result = DAG.getNode(ISD::TRUNCATE, DestVT, N);
   else 
     // Note: ZERO_EXTEND can handle cases where the sizes are equal too
@@ -2369,9 +2625,9 @@
   // What to do depends on the size of the integer and the size of the pointer.
   // We can either truncate, zero extend, or no-op, accordingly.
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType SrcVT = N.getValueType();
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
-  if (MVT::getSizeInBits(DestVT) < MVT::getSizeInBits(SrcVT))
+  MVT SrcVT = N.getValueType();
+  MVT DestVT = TLI.getValueType(I.getType());
+  if (DestVT.bitsLT(SrcVT))
     setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
   else 
     // Note: ZERO_EXTEND can handle cases where the sizes are equal too
@@ -2380,7 +2636,7 @@
 
 void SelectionDAGLowering::visitBitCast(User &I) { 
   SDOperand N = getValue(I.getOperand(0));
-  MVT::ValueType DestVT = TLI.getValueType(I.getType());
+  MVT DestVT = TLI.getValueType(I.getType());
 
   // BitCast assures us that source and destination are the same size so this 
   // is either a BIT_CONVERT or a no-op.
@@ -2419,6 +2675,72 @@
                            V1, V2, Mask));
 }
 
+void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
+  const Value *Op0 = I.getOperand(0);
+  const Value *Op1 = I.getOperand(1);
+  const Type *AggTy = I.getType();
+  const Type *ValTy = Op1->getType();
+  bool IntoUndef = isa<UndefValue>(Op0);
+  bool FromUndef = isa<UndefValue>(Op1);
+
+  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
+                                            I.idx_begin(), I.idx_end());
+
+  SmallVector<MVT, 4> AggValueVTs;
+  ComputeValueVTs(TLI, AggTy, AggValueVTs);
+  SmallVector<MVT, 4> ValValueVTs;
+  ComputeValueVTs(TLI, ValTy, ValValueVTs);
+
+  unsigned NumAggValues = AggValueVTs.size();
+  unsigned NumValValues = ValValueVTs.size();
+  SmallVector<SDOperand, 4> Values(NumAggValues);
+
+  SDOperand Agg = getValue(Op0);
+  SDOperand Val = getValue(Op1);
+  unsigned i = 0;
+  // Copy the beginning value(s) from the original aggregate.
+  for (; i != LinearIndex; ++i)
+    Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) :
+                SDOperand(Agg.Val, Agg.ResNo + i);
+  // Copy values from the inserted value(s).
+  for (; i != LinearIndex + NumValValues; ++i)
+    Values[i] = FromUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) :
+                SDOperand(Val.Val, Val.ResNo + i - LinearIndex);
+  // Copy remaining value(s) from the original aggregate.
+  for (; i != NumAggValues; ++i)
+    Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) :
+                SDOperand(Agg.Val, Agg.ResNo + i);
+
+  setValue(&I, DAG.getMergeValues(DAG.getVTList(&AggValueVTs[0], NumAggValues),
+                                  &Values[0], NumAggValues));
+}
+
+void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
+  const Value *Op0 = I.getOperand(0);
+  const Type *AggTy = Op0->getType();
+  const Type *ValTy = I.getType();
+  bool OutOfUndef = isa<UndefValue>(Op0);
+
+  unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
+                                            I.idx_begin(), I.idx_end());
+
+  SmallVector<MVT, 4> ValValueVTs;
+  ComputeValueVTs(TLI, ValTy, ValValueVTs);
+
+  unsigned NumValValues = ValValueVTs.size();
+  SmallVector<SDOperand, 4> Values(NumValValues);
+
+  SDOperand Agg = getValue(Op0);
+  // Copy out the selected value(s).
+  for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
+    Values[i - LinearIndex] =
+      OutOfUndef ? DAG.getNode(ISD::UNDEF, Agg.Val->getValueType(Agg.ResNo + i)) :
+                   SDOperand(Agg.Val, Agg.ResNo + i);
+
+  setValue(&I, DAG.getMergeValues(DAG.getVTList(&ValValueVTs[0], NumValValues),
+                                  &Values[0], NumValValues));
+}
+
 
 void SelectionDAGLowering::visitGetElementPtr(User &I) {
   SDOperand N = getValue(I.getOperand(0));
@@ -2455,9 +2777,9 @@
 
       // If the index is smaller or larger than intptr_t, truncate or extend
       // it.
-      if (IdxN.getValueType() < N.getValueType()) {
+      if (IdxN.getValueType().bitsLT(N.getValueType())) {
         IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
-      } else if (IdxN.getValueType() > N.getValueType())
+      } else if (IdxN.getValueType().bitsGT(N.getValueType()))
         IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
 
       // If this is a multiply by a power of two, turn it into a shl
@@ -2491,10 +2813,10 @@
              I.getAlignment());
 
   SDOperand AllocSize = getValue(I.getArraySize());
-  MVT::ValueType IntPtr = TLI.getPointerTy();
-  if (IntPtr < AllocSize.getValueType())
+  MVT IntPtr = TLI.getPointerTy();
+  if (IntPtr.bitsLT(AllocSize.getValueType()))
     AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
-  else if (IntPtr > AllocSize.getValueType())
+  else if (IntPtr.bitsGT(AllocSize.getValueType()))
     AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
 
   AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
@@ -2517,7 +2839,7 @@
                           DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
 
   SDOperand Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
-  const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(),
+  const MVT *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(),
                                                     MVT::Other);
   SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3);
   setValue(&I, DSA);
@@ -2529,7 +2851,19 @@
 }
 
 void SelectionDAGLowering::visitLoad(LoadInst &I) {
-  SDOperand Ptr = getValue(I.getOperand(0));
+  const Value *SV = I.getOperand(0);
+  SDOperand Ptr = getValue(SV);
+
+  const Type *Ty = I.getType();
+  bool isVolatile = I.isVolatile();
+  unsigned Alignment = I.getAlignment();
+
+  SmallVector<MVT, 4> ValueVTs;
+  SmallVector<uint64_t, 4> Offsets;
+  ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
+  unsigned NumValues = ValueVTs.size();
+  if (NumValues == 0)
+    return;
 
   SDOperand Root;
   if (I.isVolatile())
@@ -2539,33 +2873,57 @@
     Root = DAG.getRoot();
   }
 
-  setValue(&I, getLoadFrom(I.getType(), Ptr, I.getOperand(0),
-                           Root, I.isVolatile(), I.getAlignment()));
-}
-
-SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
-                                            const Value *SV, SDOperand Root,
-                                            bool isVolatile, 
-                                            unsigned Alignment) {
-  SDOperand L =
-    DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, 0, 
-                isVolatile, Alignment);
-
+  SmallVector<SDOperand, 4> Values(NumValues);
+  SmallVector<SDOperand, 4> Chains(NumValues);
+  MVT PtrVT = Ptr.getValueType();
+  for (unsigned i = 0; i != NumValues; ++i) {
+    SDOperand L = DAG.getLoad(ValueVTs[i], Root,
+                              DAG.getNode(ISD::ADD, PtrVT, Ptr,
+                                          DAG.getConstant(Offsets[i], PtrVT)),
+                              SV, Offsets[i],
+                              isVolatile, Alignment);
+    Values[i] = L;
+    Chains[i] = L.getValue(1);
+  }
+  
+  SDOperand Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+                                &Chains[0], NumValues);
   if (isVolatile)
-    DAG.setRoot(L.getValue(1));
+    DAG.setRoot(Chain);
   else
-    PendingLoads.push_back(L.getValue(1));
-  
-  return L;
+    PendingLoads.push_back(Chain);
+
+  setValue(&I, DAG.getMergeValues(DAG.getVTList(&ValueVTs[0], NumValues),
+                                  &Values[0], NumValues));
 }
 
 
 void SelectionDAGLowering::visitStore(StoreInst &I) {
   Value *SrcV = I.getOperand(0);
   SDOperand Src = getValue(SrcV);
-  SDOperand Ptr = getValue(I.getOperand(1));
-  DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1), 0,
-                           I.isVolatile(), I.getAlignment()));
+  Value *PtrV = I.getOperand(1);
+  SDOperand Ptr = getValue(PtrV);
+
+  SmallVector<MVT, 4> ValueVTs;
+  SmallVector<uint64_t, 4> Offsets;
+  ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
+  unsigned NumValues = ValueVTs.size();
+  if (NumValues == 0)
+    return;
+
+  SDOperand Root = getRoot();
+  SmallVector<SDOperand, 4> Chains(NumValues);
+  MVT PtrVT = Ptr.getValueType();
+  bool isVolatile = I.isVolatile();
+  unsigned Alignment = I.getAlignment();
+  for (unsigned i = 0; i != NumValues; ++i)
+    Chains[i] = DAG.getStore(Root, SDOperand(Src.Val, Src.ResNo + i),
+                             DAG.getNode(ISD::ADD, PtrVT, Ptr,
+                                         DAG.getConstant(Offsets[i], PtrVT)),
+                             PtrV, Offsets[i],
+                             isVolatile, Alignment);
+
+  DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, &Chains[0], NumValues));
 }
 
 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
@@ -2597,14 +2955,14 @@
     Ops.push_back(Op);
   }
 
-  std::vector<MVT::ValueType> VTs;
+  std::vector<MVT> VTs;
   if (I.getType() != Type::VoidTy) {
-    MVT::ValueType VT = TLI.getValueType(I.getType());
-    if (MVT::isVector(VT)) {
+    MVT VT = TLI.getValueType(I.getType());
+    if (VT.isVector()) {
       const VectorType *DestTy = cast<VectorType>(I.getType());
-      MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
+      MVT EltVT = TLI.getValueType(DestTy->getElementType());
       
-      VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
+      VT = MVT::getVectorVT(EltVT, DestTy->getNumElements());
       assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
     }
     
@@ -2614,7 +2972,7 @@
   if (HasChain)
     VTs.push_back(MVT::Other);
 
-  const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs);
+  const MVT *VTList = DAG.getNodeValueTypes(VTs);
 
   // Create the node.
   SDOperand Result;
@@ -2637,7 +2995,7 @@
   }
   if (I.getType() != Type::VoidTy) {
     if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
-      MVT::ValueType VT = TLI.getValueType(PTy);
+      MVT VT = TLI.getValueType(PTy);
       Result = DAG.getNode(ISD::BIT_CONVERT, VT, Result);
     } 
     setValue(&I, Result);
@@ -2646,7 +3004,7 @@
 
 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
 static GlobalVariable *ExtractTypeInfo (Value *V) {
-  V = IntrinsicInst::StripPointerCasts(V);
+  V = V->stripPointerCasts();
   GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
   assert ((GV || isa<ConstantPointerNull>(V)) &&
           "TypeInfo must be a global variable or NULL");
@@ -2707,6 +3065,22 @@
   }
 }
 
+
+/// Inlined utility function to implement binary input atomic intrinsics for 
+// visitIntrinsicCall: I is a call instruction
+//                     Op is the associated NodeType for I
+const char *
+SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
+  SDOperand Root = getRoot();   
+  SDOperand L = DAG.getAtomic(Op, Root, 
+                              getValue(I.getOperand(1)), 
+                              getValue(I.getOperand(2)),
+                              I.getOperand(1));
+  setValue(&I, L);
+  DAG.setRoot(L.getValue(1));
+  return 0;
+}
+
 /// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
 /// we want to emit this as a call to a named external function, return the name
 /// otherwise lower it and return null.
@@ -2781,20 +3155,12 @@
     MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
     DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
     if (MMI && SPI.getContext() && MMI->Verify(SPI.getContext())) {
-      SDOperand Ops[5];
-
-      Ops[0] = getRoot();
-      Ops[1] = getValue(SPI.getLineValue());
-      Ops[2] = getValue(SPI.getColumnValue());
-
       DebugInfoDesc *DD = MMI->getDescFor(SPI.getContext());
       assert(DD && "Not a debug information descriptor");
-      CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
-      
-      Ops[3] = DAG.getString(CompileUnit->getFileName());
-      Ops[4] = DAG.getString(CompileUnit->getDirectory());
-      
-      DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops, 5));
+      DAG.setRoot(DAG.getDbgStopPoint(getRoot(),
+                                      SPI.getLine(),
+                                      SPI.getColumn(),
+                                      cast<CompileUnitDesc>(DD)));
     }
 
     return 0;
@@ -2804,9 +3170,7 @@
     DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
     if (MMI && RSI.getContext() && MMI->Verify(RSI.getContext())) {
       unsigned LabelID = MMI->RecordRegionStart(RSI.getContext());
-      DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(),
-                              DAG.getConstant(LabelID, MVT::i32),
-                              DAG.getConstant(0, MVT::i32)));
+      DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
     }
 
     return 0;
@@ -2816,9 +3180,7 @@
     DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
     if (MMI && REI.getContext() && MMI->Verify(REI.getContext())) {
       unsigned LabelID = MMI->RecordRegionEnd(REI.getContext());
-      DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(),
-                              DAG.getConstant(LabelID, MVT::i32),
-                              DAG.getConstant(0, MVT::i32)));
+      DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
     }
 
     return 0;
@@ -2835,8 +3197,7 @@
       assert(DD && "Not a debug information descriptor");
       SubprogramDesc *Subprogram = cast<SubprogramDesc>(DD);
       const CompileUnitDesc *CompileUnit = Subprogram->getFile();
-      unsigned SrcFile = MMI->RecordSource(CompileUnit->getDirectory(),
-                                           CompileUnit->getFileName());
+      unsigned SrcFile = MMI->RecordSource(CompileUnit);
       // Record the source line but does create a label. It will be emitted
       // at asm emission time.
       MMI->RecordSourceLine(Subprogram->getLine(), 0, SrcFile);
@@ -2873,7 +3234,7 @@
   case Intrinsic::eh_selector_i32:
   case Intrinsic::eh_selector_i64: {
     MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
-    MVT::ValueType VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
+    MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
                          MVT::i32 : MVT::i64);
     
     if (MMI) {
@@ -2906,7 +3267,7 @@
   case Intrinsic::eh_typeid_for_i32:
   case Intrinsic::eh_typeid_for_i64: {
     MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
-    MVT::ValueType VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
+    MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
                          MVT::i32 : MVT::i64);
     
     if (MMI) {
@@ -2949,9 +3310,9 @@
    }
 
    case Intrinsic::eh_dwarf_cfa: {
-     MVT::ValueType VT = getValue(I.getOperand(1)).getValueType();
+     MVT VT = getValue(I.getOperand(1)).getValueType();
      SDOperand CfaArg;
-     if (MVT::getSizeInBits(VT) > MVT::getSizeInBits(TLI.getPointerTy()))
+     if (VT.bitsGT(TLI.getPointerTy()))
        CfaArg = DAG.getNode(ISD::TRUNCATE,
                             TLI.getPointerTy(), getValue(I.getOperand(1)));
      else
@@ -3031,21 +3392,21 @@
     return 0;
   case Intrinsic::cttz: {
     SDOperand Arg = getValue(I.getOperand(1));
-    MVT::ValueType Ty = Arg.getValueType();
+    MVT Ty = Arg.getValueType();
     SDOperand result = DAG.getNode(ISD::CTTZ, Ty, Arg);
     setValue(&I, result);
     return 0;
   }
   case Intrinsic::ctlz: {
     SDOperand Arg = getValue(I.getOperand(1));
-    MVT::ValueType Ty = Arg.getValueType();
+    MVT Ty = Arg.getValueType();
     SDOperand result = DAG.getNode(ISD::CTLZ, Ty, Arg);
     setValue(&I, result);
     return 0;
   }
   case Intrinsic::ctpop: {
     SDOperand Arg = getValue(I.getOperand(1));
-    MVT::ValueType Ty = Arg.getValueType();
+    MVT Ty = Arg.getValueType();
     SDOperand result = DAG.getNode(ISD::CTPOP, Ty, Arg);
     setValue(&I, result);
     return 0;
@@ -3068,8 +3429,7 @@
     return 0;
 
   case Intrinsic::init_trampoline: {
-    const Function *F =
-      cast<Function>(IntrinsicInst::StripPointerCasts(I.getOperand(2)));
+    const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
 
     SDOperand Ops[6];
     Ops[0] = getRoot();
@@ -3132,38 +3492,39 @@
     DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, MVT::Other, &Ops[0], 6));
     return 0;
   }
-  case Intrinsic::atomic_lcs: {
+  case Intrinsic::atomic_cmp_swap: {
     SDOperand Root = getRoot();   
-    SDOperand O3 = getValue(I.getOperand(3));
-    SDOperand L = DAG.getAtomic(ISD::ATOMIC_LCS, Root, 
+    SDOperand L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, Root, 
                                 getValue(I.getOperand(1)), 
                                 getValue(I.getOperand(2)),
-                                O3, O3.getValueType());
+                                getValue(I.getOperand(3)),
+                                I.getOperand(1));
     setValue(&I, L);
     DAG.setRoot(L.getValue(1));
     return 0;
   }
-  case Intrinsic::atomic_las: {
-    SDOperand Root = getRoot();   
-    SDOperand O2 = getValue(I.getOperand(2));
-    SDOperand L = DAG.getAtomic(ISD::ATOMIC_LAS, Root, 
-                                getValue(I.getOperand(1)), 
-                                O2, O2.getValueType());
-    setValue(&I, L);
-    DAG.setRoot(L.getValue(1));
-    return 0;
-  }
-  case Intrinsic::atomic_swap: {
-    SDOperand Root = getRoot();   
-    SDOperand O2 = getValue(I.getOperand(2));
-    SDOperand L = DAG.getAtomic(ISD::ATOMIC_SWAP, Root, 
-                                getValue(I.getOperand(1)), 
-                                O2, O2.getValueType());
-    setValue(&I, L);
-    DAG.setRoot(L.getValue(1));
-    return 0;
-  }
-
+  case Intrinsic::atomic_load_add:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
+  case Intrinsic::atomic_load_sub:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
+  case Intrinsic::atomic_load_and:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
+  case Intrinsic::atomic_load_or:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
+  case Intrinsic::atomic_load_xor:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
+  case Intrinsic::atomic_load_nand:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
+  case Intrinsic::atomic_load_min:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
+  case Intrinsic::atomic_load_max:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
+  case Intrinsic::atomic_load_umin:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
+  case Intrinsic::atomic_load_umax:
+      return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);                                              
+  case Intrinsic::atomic_swap:
+    return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
   }
 }
 
@@ -3202,9 +3563,7 @@
     // Both PendingLoads and PendingExports must be flushed here;
     // this call might not return.
     (void)getRoot();
-    DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getControlRoot(),
-                            DAG.getConstant(BeginLabel, MVT::i32),
-                            DAG.getConstant(1, MVT::i32)));
+    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getControlRoot(), BeginLabel));
   }
 
   std::pair<SDOperand,SDOperand> Result =
@@ -3221,9 +3580,7 @@
     // Insert a label at the end of the invoke call to mark the try range.  This
     // can be used to detect deletion of the invoke via the MachineModuleInfo.
     EndLabel = MMI->NextLabelID();
-    DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, getRoot(),
-                            DAG.getConstant(EndLabel, MVT::i32),
-                            DAG.getConstant(1, MVT::i32)));
+    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getRoot(), EndLabel));
 
     // Inform MachineModuleInfo of range.
     MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
@@ -3311,6 +3668,18 @@
 
 
 void SelectionDAGLowering::visitGetResult(GetResultInst &I) {
+  if (isa<UndefValue>(I.getOperand(0))) {
+    SDOperand Undef = DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType()));
+    setValue(&I, Undef);
+    return;
+  }
+  
+  // To add support for individual return values with aggregate types,
+  // we'd need a way to take a getresult index and determine which
+  // values of the Call SDNode are associated with it.
+  assert(TLI.getValueType(I.getType(), true) != MVT::Other &&
+         "Individual return values must not be aggregates!");
+
   SDOperand Call = getValue(I.getOperand(0));
   setValue(&I, SDOperand(Call.Val, I.getIndex()));
 }
@@ -3320,23 +3689,81 @@
 /// this value and returns the result as a ValueVT value.  This uses 
 /// Chain/Flag as the input and updates them for the output Chain/Flag.
 /// If the Flag pointer is NULL, no flag is used.
-SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
-                                        SDOperand &Chain, SDOperand *Flag)const{
-  // Copy the legal parts from the registers.
-  unsigned NumParts = Regs.size();
-  SmallVector<SDOperand, 8> Parts(NumParts);
-  for (unsigned i = 0; i != NumParts; ++i) {
-    SDOperand Part = Flag ?
-                     DAG.getCopyFromReg(Chain, Regs[i], RegVT, *Flag) :
-                     DAG.getCopyFromReg(Chain, Regs[i], RegVT);
-    Chain = Part.getValue(1);
-    if (Flag)
-      *Flag = Part.getValue(2);
-    Parts[i] = Part;
-  }
+SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 
+                                        SDOperand &Chain,
+                                        SDOperand *Flag) const {
+  // Assemble the legal parts into the final values.
+  SmallVector<SDOperand, 4> Values(ValueVTs.size());
+  SmallVector<SDOperand, 8> Parts;
+  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
+    // Copy the legal parts from the registers.
+    MVT ValueVT = ValueVTs[Value];
+    unsigned NumRegs = TLI->getNumRegisters(ValueVT);
+    MVT RegisterVT = RegVTs[Value];
+
+    Parts.resize(NumRegs);
+    for (unsigned i = 0; i != NumRegs; ++i) {
+      SDOperand P;
+      if (Flag == 0)
+        P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT);
+      else {
+        P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT, *Flag);
+        *Flag = P.getValue(2);
+      }
+      Chain = P.getValue(1);
+      
+      // If the source register was virtual and if we know something about it,
+      // add an assert node.
+      if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
+          RegisterVT.isInteger() && !RegisterVT.isVector()) {
+        unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
+        FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
+        if (FLI.LiveOutRegInfo.size() > SlotNo) {
+          FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
+          
+          unsigned RegSize = RegisterVT.getSizeInBits();
+          unsigned NumSignBits = LOI.NumSignBits;
+          unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
+          
+          // FIXME: We capture more information than the dag can represent.  For
+          // now, just use the tightest assertzext/assertsext possible.
+          bool isSExt = true;
+          MVT FromVT(MVT::Other);
+          if (NumSignBits == RegSize)
+            isSExt = true, FromVT = MVT::i1;   // ASSERT SEXT 1
+          else if (NumZeroBits >= RegSize-1)
+            isSExt = false, FromVT = MVT::i1;  // ASSERT ZEXT 1
+          else if (NumSignBits > RegSize-8)
+            isSExt = true, FromVT = MVT::i8;   // ASSERT SEXT 8
+          else if (NumZeroBits >= RegSize-9)
+            isSExt = false, FromVT = MVT::i8;  // ASSERT ZEXT 8
+          else if (NumSignBits > RegSize-16)
+            isSExt = true, FromVT = MVT::i16;   // ASSERT SEXT 16
+          else if (NumZeroBits >= RegSize-17)
+            isSExt = false, FromVT = MVT::i16;  // ASSERT ZEXT 16
+          else if (NumSignBits > RegSize-32)
+            isSExt = true, FromVT = MVT::i32;   // ASSERT SEXT 32
+          else if (NumZeroBits >= RegSize-33)
+            isSExt = false, FromVT = MVT::i32;  // ASSERT ZEXT 32
+          
+          if (FromVT != MVT::Other) {
+            P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext,
+                            RegisterVT, P, DAG.getValueType(FromVT));
+
+          }
+        }
+      }
+      
+      Parts[Part+i] = P;
+    }
   
-  // Assemble the legal parts into the final value.
-  return getCopyFromParts(DAG, &Parts[0], NumParts, RegVT, ValueVT);
+    Values[Value] = getCopyFromParts(DAG, &Parts[Part], NumRegs, RegisterVT,
+                                     ValueVT);
+    Part += NumRegs;
+  }
+
+  return DAG.getMergeValues(DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
+                            &Values[0], ValueVTs.size());
 }
 
 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
@@ -3346,19 +3773,45 @@
 void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
                                  SDOperand &Chain, SDOperand *Flag) const {
   // Get the list of the values's legal parts.
-  unsigned NumParts = Regs.size();
-  SmallVector<SDOperand, 8> Parts(NumParts);
-  getCopyToParts(DAG, Val, &Parts[0], NumParts, RegVT);
+  unsigned NumRegs = Regs.size();
+  SmallVector<SDOperand, 8> Parts(NumRegs);
+  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
+    MVT ValueVT = ValueVTs[Value];
+    unsigned NumParts = TLI->getNumRegisters(ValueVT);
+    MVT RegisterVT = RegVTs[Value];
+
+    getCopyToParts(DAG, Val.getValue(Val.ResNo + Value),
+                   &Parts[Part], NumParts, RegisterVT);
+    Part += NumParts;
+  }
 
   // Copy the parts into the registers.
-  for (unsigned i = 0; i != NumParts; ++i) {
-    SDOperand Part = Flag ?
-                     DAG.getCopyToReg(Chain, Regs[i], Parts[i], *Flag) :
-                     DAG.getCopyToReg(Chain, Regs[i], Parts[i]);
-    Chain = Part.getValue(0);
-    if (Flag)
+  SmallVector<SDOperand, 8> Chains(NumRegs);
+  for (unsigned i = 0; i != NumRegs; ++i) {
+    SDOperand Part;
+    if (Flag == 0)
+      Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i]);
+    else {
+      Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i], *Flag);
       *Flag = Part.getValue(1);
+    }
+    Chains[i] = Part.getValue(0);
   }
+  
+  if (NumRegs == 1 || Flag)
+    // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is 
+    // flagged to it. That is the CopyToReg nodes and the user are considered
+    // a single scheduling unit. If we create a TokenFactor and return it as
+    // chain, then the TokenFactor is both a predecessor (operand) of the
+    // user as well as a successor (the TF operands are flagged to the user).
+    // c1, f1 = CopyToReg
+    // c2, f2 = CopyToReg
+    // c3     = TokenFactor c1, c2
+    // ...
+    //        = op c3, ..., f2
+    Chain = Chains[NumRegs-1];
+  else
+    Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Chains[0], NumRegs);
 }
 
 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
@@ -3366,10 +3819,14 @@
 /// values added into it.
 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
                                         std::vector<SDOperand> &Ops) const {
-  MVT::ValueType IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
+  MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
   Ops.push_back(DAG.getTargetConstant(Code | (Regs.size() << 3), IntPtrTy));
-  for (unsigned i = 0, e = Regs.size(); i != e; ++i)
-    Ops.push_back(DAG.getRegister(Regs[i], RegVT));
+  for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
+    unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
+    MVT RegisterVT = RegVTs[Value];
+    for (unsigned i = 0; i != NumRegs; ++i)
+      Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
+  }
 }
 
 /// isAllocatableRegister - If the specified register is safe to allocate, 
@@ -3379,11 +3836,11 @@
 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
                       const TargetLowering &TLI,
                       const TargetRegisterInfo *TRI) {
-  MVT::ValueType FoundVT = MVT::Other;
+  MVT FoundVT = MVT::Other;
   const TargetRegisterClass *FoundRC = 0;
   for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
        E = TRI->regclass_end(); RCI != E; ++RCI) {
-    MVT::ValueType ThisVT = MVT::Other;
+    MVT ThisVT = MVT::Other;
 
     const TargetRegisterClass *RC = *RCI;
     // If none of the the value types for this register class are valid, we 
@@ -3394,8 +3851,7 @@
         // If we have already found this register in a different register class,
         // choose the one with the largest VT specified.  For example, on
         // PowerPC, we favor f64 register classes over f32.
-        if (FoundVT == MVT::Other || 
-            MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
+        if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
           ThisVT = *I;
           break;
         }
@@ -3435,7 +3891,7 @@
   /// contains the set of register corresponding to the operand.
   RegsForValue AssignedRegs;
   
-  SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
+  explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
     : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
   }
   
@@ -3510,7 +3966,7 @@
   
   
   MachineFunction &MF = DAG.getMachineFunction();
-  std::vector<unsigned> Regs;
+  SmallVector<unsigned, 4> Regs;
   
   // If this is a constraint for a single physreg, or a constraint for a
   // register class, find it.
@@ -3521,8 +3977,8 @@
   unsigned NumRegs = 1;
   if (OpInfo.ConstraintVT != MVT::Other)
     NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT);
-  MVT::ValueType RegVT;
-  MVT::ValueType ValueVT = OpInfo.ConstraintVT;
+  MVT RegVT;
+  MVT ValueVT = OpInfo.ConstraintVT;
   
 
   // If this is a constraint for a specific physical register, like {r17},
@@ -3542,18 +3998,17 @@
     // If this is an expanded reference, add the rest of the regs to Regs.
     if (NumRegs != 1) {
       TargetRegisterClass::iterator I = PhysReg.second->begin();
-      TargetRegisterClass::iterator E = PhysReg.second->end();
       for (; *I != PhysReg.first; ++I)
-        assert(I != E && "Didn't find reg!"); 
+        assert(I != PhysReg.second->end() && "Didn't find reg!"); 
       
       // Already added the first reg.
       --NumRegs; ++I;
       for (; NumRegs; --NumRegs, ++I) {
-        assert(I != E && "Ran out of registers to allocate!");
+        assert(I != PhysReg.second->end() && "Ran out of registers to allocate!");
         Regs.push_back(*I);
       }
     }
-    OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
+    OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
     const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
     OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
     return;
@@ -3582,7 +4037,7 @@
       for (; NumRegs; --NumRegs)
         Regs.push_back(RegInfo.createVirtualRegister(PhysReg.second));
       
-      OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
+      OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
       return;
     }
     
@@ -3632,7 +4087,7 @@
       for (unsigned i = RegStart; i != RegEnd; ++i)
         Regs.push_back(RegClassRegs[i]);
       
-      OpInfo.AssignedRegs = RegsForValue(Regs, *RC->vt_begin(), 
+      OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(), 
                                          OpInfo.ConstraintVT);
       OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
       return;
@@ -3640,7 +4095,6 @@
   }
   
   // Otherwise, we couldn't allocate enough registers for this.
-  return;
 }
 
 
@@ -3668,23 +4122,31 @@
   bool SawEarlyClobber = false;
   
   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
+  unsigned ResNo = 0;   // ResNo - The result number of the next output.
   for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
     ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
     
-    MVT::ValueType OpVT = MVT::Other;
+    MVT OpVT = MVT::Other;
 
     // Compute the value type for each operand.
     switch (OpInfo.Type) {
     case InlineAsm::isOutput:
-      if (!OpInfo.isIndirect) {
-        // The return value of the call is this value.  As such, there is no
-        // corresponding argument.
-        assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
-        OpVT = TLI.getValueType(CS.getType());
-      } else {
+      // Indirect outputs just consume an argument.
+      if (OpInfo.isIndirect) {
         OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
+        break;
+      }
+      // The return value of the call is this value.  As such, there is no
+      // corresponding argument.
+      assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
+      if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
+        OpVT = TLI.getValueType(STy->getElementType(ResNo));
+      } else {
+        assert(ResNo == 0 && "Asm only has one result!");
+        OpVT = TLI.getValueType(CS.getType());
       }
+      ++ResNo;
       break;
     case InlineAsm::isInput:
       OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
@@ -3697,10 +4159,8 @@
     // If this is an input or an indirect output, process the call argument.
     // BasicBlocks are labels, currently appearing only in asm's.
     if (OpInfo.CallOperandVal) {
-      if (isa<BasicBlock>(OpInfo.CallOperandVal))
-        OpInfo.CallOperand = 
-          DAG.getBasicBlock(FuncInfo.MBBMap[cast<BasicBlock>(
-                                                 OpInfo.CallOperandVal)]);
+      if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal))
+        OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
       else {
         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
         const Type *OpTy = OpInfo.CallOperandVal->getType();
@@ -3709,9 +4169,9 @@
         if (OpInfo.isIndirect)
           OpTy = cast<PointerType>(OpTy)->getElementType();
 
-        // If OpTy is not a first-class value, it may be a struct/union that we
+        // If OpTy is not a single value, it may be a struct/union that we
         // can tile with integers.
-        if (!OpTy->isFirstClassType() && OpTy->isSized()) {
+        if (!OpTy->isSingleValueType() && OpTy->isSized()) {
           unsigned BitSize = TD->getTypeSizeInBits(OpTy);
           switch (BitSize) {
           default: break;
@@ -3732,7 +4192,7 @@
     OpInfo.ConstraintVT = OpVT;
     
     // Compute the constraint code and ConstraintType to use.
-    OpInfo.ComputeConstraintToUse(TLI);
+    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
 
     // Keep track of whether we see an earlyclobber.
     SawEarlyClobber |= OpInfo.isEarlyClobber;
@@ -3818,7 +4278,7 @@
   // Loop over all of the inputs, copying the operand values into the
   // appropriate registers and processing the output regs.
   RegsForValue RetValRegs;
-  
+ 
   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
   std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
   
@@ -3845,20 +4305,21 @@
       // Copy the output from the appropriate register.  Find a register that
       // we can use.
       if (OpInfo.AssignedRegs.Regs.empty()) {
-        cerr << "Couldn't allocate output reg for contraint '"
+        cerr << "Couldn't allocate output reg for constraint '"
              << OpInfo.ConstraintCode << "'!\n";
         exit(1);
       }
 
-      if (!OpInfo.isIndirect) {
-        // This is the result value of the call.
-        assert(RetValRegs.Regs.empty() &&
-               "Cannot have multiple output constraints yet!");
-        assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
-        RetValRegs = OpInfo.AssignedRegs;
-      } else {
+      // If this is an indirect operand, store through the pointer after the
+      // asm.
+      if (OpInfo.isIndirect) {
         IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
                                                       OpInfo.CallOperandVal));
+      } else {
+        // This is the result value of the call.
+        assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
+        // Concatenate this output onto the outputs list.
+        RetValRegs.append(OpInfo.AssignedRegs);
       }
       
       // Add information to the INLINEASM node to know that this register is
@@ -3893,8 +4354,9 @@
         if ((NumOps & 7) == 2 /*REGDEF*/) {
           // Add NumOps>>3 registers to MatchedRegs.
           RegsForValue MatchedRegs;
-          MatchedRegs.ValueVT = InOperandVal.getValueType();
-          MatchedRegs.RegVT   = AsmNodeOperands[CurOp+1].getValueType();
+          MatchedRegs.TLI = &TLI;
+          MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
+          MatchedRegs.RegVTs.push_back(AsmNodeOperands[CurOp+1].getValueType());
           for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
             unsigned Reg =
               cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
@@ -3989,17 +4451,24 @@
   // and set it as the value of the call.
   if (!RetValRegs.Regs.empty()) {
     SDOperand Val = RetValRegs.getCopyFromRegs(DAG, Chain, &Flag);
-    
-    // If the result of the inline asm is a vector, it may have the wrong
-    // width/num elts.  Make sure to convert it to the right type with
+
+    // If any of the results of the inline asm is a vector, it may have the
+    // wrong width/num elts.  This can happen for register classes that can
+    // contain multiple different value types.  The preg or vreg allocated may
+    // not have the same VT as was expected.  Convert it to the right type with
     // bit_convert.
-    if (MVT::isVector(Val.getValueType())) {
-      const VectorType *VTy = cast<VectorType>(CS.getType());
-      MVT::ValueType DesiredVT = TLI.getValueType(VTy);
-      
-      Val = DAG.getNode(ISD::BIT_CONVERT, DesiredVT, Val);
+    if (const StructType *ResSTy = dyn_cast<StructType>(CS.getType())) {
+      for (unsigned i = 0, e = ResSTy->getNumElements(); i != e; ++i) {
+        if (Val.Val->getValueType(i).isVector())
+          Val = DAG.getNode(ISD::BIT_CONVERT,
+                            TLI.getValueType(ResSTy->getElementType(i)), Val);
+      }
+    } else {
+      if (Val.getValueType().isVector())
+        Val = DAG.getNode(ISD::BIT_CONVERT, TLI.getValueType(CS.getType()),
+                          Val);
     }
-    
+
     setValue(CS.getInstruction(), Val);
   }
   
@@ -4030,11 +4499,11 @@
 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
   SDOperand Src = getValue(I.getOperand(0));
 
-  MVT::ValueType IntPtr = TLI.getPointerTy();
+  MVT IntPtr = TLI.getPointerTy();
 
-  if (IntPtr < Src.getValueType())
+  if (IntPtr.bitsLT(Src.getValueType()))
     Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
-  else if (IntPtr > Src.getValueType())
+  else if (IntPtr.bitsGT(Src.getValueType()))
     Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
 
   // Scale the source by the type size.
@@ -4061,7 +4530,7 @@
   Entry.Node = getValue(I.getOperand(0));
   Entry.Ty = TLI.getTargetData()->getIntPtrType();
   Args.push_back(Entry);
-  MVT::ValueType IntPtr = TLI.getPointerTy();
+  MVT IntPtr = TLI.getPointerTy();
   std::pair<SDOperand,SDOperand> Result =
     TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false,
                     CallingConv::C, true,
@@ -4115,60 +4584,66 @@
 /// implementation, which just inserts a FORMAL_ARGUMENTS node.  FIXME: When all
 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be 
 /// integrated into SDISel.
-std::vector<SDOperand> 
-TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
+void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
+                                    SmallVectorImpl<SDOperand> &ArgValues) {
   // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
-  std::vector<SDOperand> Ops;
+  SmallVector<SDOperand, 3+16> Ops;
   Ops.push_back(DAG.getRoot());
   Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
   Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
 
   // Add one result value for each formal argument.
-  std::vector<MVT::ValueType> RetVals;
+  SmallVector<MVT, 16> RetVals;
   unsigned j = 1;
   for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
        I != E; ++I, ++j) {
-    MVT::ValueType VT = getValueType(I->getType());
-    ISD::ArgFlagsTy Flags;
-    unsigned OriginalAlignment =
-      getTargetData()->getABITypeAlignment(I->getType());
-
-    if (F.paramHasAttr(j, ParamAttr::ZExt))
-      Flags.setZExt();
-    if (F.paramHasAttr(j, ParamAttr::SExt))
-      Flags.setSExt();
-    if (F.paramHasAttr(j, ParamAttr::InReg))
-      Flags.setInReg();
-    if (F.paramHasAttr(j, ParamAttr::StructRet))
-      Flags.setSRet();
-    if (F.paramHasAttr(j, ParamAttr::ByVal)) {
-      Flags.setByVal();
-      const PointerType *Ty = cast<PointerType>(I->getType());
-      const Type *ElementTy = Ty->getElementType();
-      unsigned FrameAlign = getByValTypeAlignment(ElementTy);
-      unsigned FrameSize  = getTargetData()->getABITypeSize(ElementTy);
-      // For ByVal, alignment should be passed from FE.  BE will guess if
-      // this info is not there but there are cases it cannot get right.
-      if (F.getParamAlignment(j))
-        FrameAlign = F.getParamAlignment(j);
-      Flags.setByValAlign(FrameAlign);
-      Flags.setByValSize(FrameSize);
-    }
-    if (F.paramHasAttr(j, ParamAttr::Nest))
-      Flags.setNest();
-    Flags.setOrigAlign(OriginalAlignment);
+    SmallVector<MVT, 4> ValueVTs;
+    ComputeValueVTs(*this, I->getType(), ValueVTs);
+    for (unsigned Value = 0, NumValues = ValueVTs.size();
+         Value != NumValues; ++Value) {
+      MVT VT = ValueVTs[Value];
+      const Type *ArgTy = VT.getTypeForMVT();
+      ISD::ArgFlagsTy Flags;
+      unsigned OriginalAlignment =
+        getTargetData()->getABITypeAlignment(ArgTy);
+
+      if (F.paramHasAttr(j, ParamAttr::ZExt))
+        Flags.setZExt();
+      if (F.paramHasAttr(j, ParamAttr::SExt))
+        Flags.setSExt();
+      if (F.paramHasAttr(j, ParamAttr::InReg))
+        Flags.setInReg();
+      if (F.paramHasAttr(j, ParamAttr::StructRet))
+        Flags.setSRet();
+      if (F.paramHasAttr(j, ParamAttr::ByVal)) {
+        Flags.setByVal();
+        const PointerType *Ty = cast<PointerType>(I->getType());
+        const Type *ElementTy = Ty->getElementType();
+        unsigned FrameAlign = getByValTypeAlignment(ElementTy);
+        unsigned FrameSize  = getTargetData()->getABITypeSize(ElementTy);
+        // For ByVal, alignment should be passed from FE.  BE will guess if
+        // this info is not there but there are cases it cannot get right.
+        if (F.getParamAlignment(j))
+          FrameAlign = F.getParamAlignment(j);
+        Flags.setByValAlign(FrameAlign);
+        Flags.setByValSize(FrameSize);
+      }
+      if (F.paramHasAttr(j, ParamAttr::Nest))
+        Flags.setNest();
+      Flags.setOrigAlign(OriginalAlignment);
 
-    MVT::ValueType RegisterVT = getRegisterType(VT);
-    unsigned NumRegs = getNumRegisters(VT);
-    for (unsigned i = 0; i != NumRegs; ++i) {
-      RetVals.push_back(RegisterVT);
-      ISD::ArgFlagsTy MyFlags = Flags;
-      if (NumRegs > 1 && i == 0)
-        MyFlags.setSplit();
-      // if it isn't first piece, alignment must be 1
-      else if (i > 0)
-        MyFlags.setOrigAlign(1);
-      Ops.push_back(DAG.getArgFlags(MyFlags));
+      MVT RegisterVT = getRegisterType(VT);
+      unsigned NumRegs = getNumRegisters(VT);
+      for (unsigned i = 0; i != NumRegs; ++i) {
+        RetVals.push_back(RegisterVT);
+        ISD::ArgFlagsTy MyFlags = Flags;
+        if (NumRegs > 1 && i == 0)
+          MyFlags.setSplit();
+        // if it isn't first piece, alignment must be 1
+        else if (i > 0)
+          MyFlags.setOrigAlign(1);
+        Ops.push_back(DAG.getArgFlags(MyFlags));
+      }
     }
   }
 
@@ -4196,30 +4671,33 @@
   DAG.setRoot(SDOperand(Result, NumArgRegs));
 
   // Set up the return result vector.
-  Ops.clear();
   unsigned i = 0;
   unsigned Idx = 1;
   for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; 
       ++I, ++Idx) {
-    MVT::ValueType VT = getValueType(I->getType());
-    MVT::ValueType PartVT = getRegisterType(VT);
-
-    unsigned NumParts = getNumRegisters(VT);
-    SmallVector<SDOperand, 4> Parts(NumParts);
-    for (unsigned j = 0; j != NumParts; ++j)
-      Parts[j] = SDOperand(Result, i++);
-
-    ISD::NodeType AssertOp = ISD::DELETED_NODE;
-    if (F.paramHasAttr(Idx, ParamAttr::SExt))
-      AssertOp = ISD::AssertSext;
-    else if (F.paramHasAttr(Idx, ParamAttr::ZExt))
-      AssertOp = ISD::AssertZext;
+    SmallVector<MVT, 4> ValueVTs;
+    ComputeValueVTs(*this, I->getType(), ValueVTs);
+    for (unsigned Value = 0, NumValues = ValueVTs.size();
+         Value != NumValues; ++Value) {
+      MVT VT = ValueVTs[Value];
+      MVT PartVT = getRegisterType(VT);
+
+      unsigned NumParts = getNumRegisters(VT);
+      SmallVector<SDOperand, 4> Parts(NumParts);
+      for (unsigned j = 0; j != NumParts; ++j)
+        Parts[j] = SDOperand(Result, i++);
+
+      ISD::NodeType AssertOp = ISD::DELETED_NODE;
+      if (F.paramHasAttr(Idx, ParamAttr::SExt))
+        AssertOp = ISD::AssertSext;
+      else if (F.paramHasAttr(Idx, ParamAttr::ZExt))
+        AssertOp = ISD::AssertZext;
 
-    Ops.push_back(getCopyFromParts(DAG, &Parts[0], NumParts, PartVT, VT,
-                                   AssertOp));
+      ArgValues.push_back(getCopyFromParts(DAG, &Parts[0], NumParts, PartVT, VT,
+                                           AssertOp));
+    }
   }
   assert(i == NumArgRegs && "Argument register count mismatch!");
-  return Ops;
 }
 
 
@@ -4242,79 +4720,78 @@
   
   // Handle all of the outgoing arguments.
   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
-    MVT::ValueType VT = getValueType(Args[i].Ty);
-    SDOperand Op = Args[i].Node;
-    ISD::ArgFlagsTy Flags;
-    unsigned OriginalAlignment =
-      getTargetData()->getABITypeAlignment(Args[i].Ty);
-
-    if (Args[i].isZExt)
-      Flags.setZExt();
-    if (Args[i].isSExt)
-      Flags.setSExt();
-    if (Args[i].isInReg)
-      Flags.setInReg();
-    if (Args[i].isSRet)
-      Flags.setSRet();
-    if (Args[i].isByVal) {
-      Flags.setByVal();
-      const PointerType *Ty = cast<PointerType>(Args[i].Ty);
-      const Type *ElementTy = Ty->getElementType();
-      unsigned FrameAlign = getByValTypeAlignment(ElementTy);
-      unsigned FrameSize  = getTargetData()->getABITypeSize(ElementTy);
-      // For ByVal, alignment should come from FE.  BE will guess if this
-      // info is not there but there are cases it cannot get right.
-      if (Args[i].Alignment)
-        FrameAlign = Args[i].Alignment;
-      Flags.setByValAlign(FrameAlign);
-      Flags.setByValSize(FrameSize);
-    }
-    if (Args[i].isNest)
-      Flags.setNest();
-    Flags.setOrigAlign(OriginalAlignment);
-
-    MVT::ValueType PartVT = getRegisterType(VT);
-    unsigned NumParts = getNumRegisters(VT);
-    SmallVector<SDOperand, 4> Parts(NumParts);
-    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
-
-    if (Args[i].isSExt)
-      ExtendKind = ISD::SIGN_EXTEND;
-    else if (Args[i].isZExt)
-      ExtendKind = ISD::ZERO_EXTEND;
-
-    getCopyToParts(DAG, Op, &Parts[0], NumParts, PartVT, ExtendKind);
-
-    for (unsigned i = 0; i != NumParts; ++i) {
-      // if it isn't first piece, alignment must be 1
-      ISD::ArgFlagsTy MyFlags = Flags;
-      if (NumParts > 1 && i == 0)
-        MyFlags.setSplit();
-      else if (i != 0)
-        MyFlags.setOrigAlign(1);
+    SmallVector<MVT, 4> ValueVTs;
+    ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
+    for (unsigned Value = 0, NumValues = ValueVTs.size();
+         Value != NumValues; ++Value) {
+      MVT VT = ValueVTs[Value];
+      const Type *ArgTy = VT.getTypeForMVT();
+      SDOperand Op = SDOperand(Args[i].Node.Val, Args[i].Node.ResNo + Value);
+      ISD::ArgFlagsTy Flags;
+      unsigned OriginalAlignment =
+        getTargetData()->getABITypeAlignment(ArgTy);
+
+      if (Args[i].isZExt)
+        Flags.setZExt();
+      if (Args[i].isSExt)
+        Flags.setSExt();
+      if (Args[i].isInReg)
+        Flags.setInReg();
+      if (Args[i].isSRet)
+        Flags.setSRet();
+      if (Args[i].isByVal) {
+        Flags.setByVal();
+        const PointerType *Ty = cast<PointerType>(Args[i].Ty);
+        const Type *ElementTy = Ty->getElementType();
+        unsigned FrameAlign = getByValTypeAlignment(ElementTy);
+        unsigned FrameSize  = getTargetData()->getABITypeSize(ElementTy);
+        // For ByVal, alignment should come from FE.  BE will guess if this
+        // info is not there but there are cases it cannot get right.
+        if (Args[i].Alignment)
+          FrameAlign = Args[i].Alignment;
+        Flags.setByValAlign(FrameAlign);
+        Flags.setByValSize(FrameSize);
+      }
+      if (Args[i].isNest)
+        Flags.setNest();
+      Flags.setOrigAlign(OriginalAlignment);
+
+      MVT PartVT = getRegisterType(VT);
+      unsigned NumParts = getNumRegisters(VT);
+      SmallVector<SDOperand, 4> Parts(NumParts);
+      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+
+      if (Args[i].isSExt)
+        ExtendKind = ISD::SIGN_EXTEND;
+      else if (Args[i].isZExt)
+        ExtendKind = ISD::ZERO_EXTEND;
+
+      getCopyToParts(DAG, Op, &Parts[0], NumParts, PartVT, ExtendKind);
+
+      for (unsigned i = 0; i != NumParts; ++i) {
+        // if it isn't first piece, alignment must be 1
+        ISD::ArgFlagsTy MyFlags = Flags;
+        if (NumParts > 1 && i == 0)
+          MyFlags.setSplit();
+        else if (i != 0)
+          MyFlags.setOrigAlign(1);
 
-      Ops.push_back(Parts[i]);
-      Ops.push_back(DAG.getArgFlags(MyFlags));
+        Ops.push_back(Parts[i]);
+        Ops.push_back(DAG.getArgFlags(MyFlags));
+      }
     }
   }
   
   // Figure out the result value types. We start by making a list of
-  // the high-level LLVM return types.
-  SmallVector<const Type *, 4> LLVMRetTys;
-  if (const StructType *ST = dyn_cast<StructType>(RetTy))
-    // A struct return type in the LLVM IR means we have multiple return values.
-    LLVMRetTys.insert(LLVMRetTys.end(), ST->element_begin(), ST->element_end());
-  else
-    LLVMRetTys.push_back(RetTy);
-
-  // Then we translate that to a list of lowered codegen result types.
-  SmallVector<MVT::ValueType, 4> LoweredRetTys;
-  SmallVector<MVT::ValueType, 4> RetTys;
-  for (unsigned I = 0, E = LLVMRetTys.size(); I != E; ++I) {
-    MVT::ValueType VT = getValueType(LLVMRetTys[I]);
-    RetTys.push_back(VT);
-
-    MVT::ValueType RegisterVT = getRegisterType(VT);
+  // the potentially illegal return value types.
+  SmallVector<MVT, 4> LoweredRetTys;
+  SmallVector<MVT, 4> RetTys;
+  ComputeValueVTs(*this, RetTy, RetTys);
+
+  // Then we translate that to a list of legal types.
+  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
+    MVT VT = RetTys[I];
+    MVT RegisterVT = getRegisterType(VT);
     unsigned NumRegs = getNumRegisters(VT);
     for (unsigned i = 0; i != NumRegs; ++i)
       LoweredRetTys.push_back(RegisterVT);
@@ -4340,9 +4817,9 @@
 
     SmallVector<SDOperand, 4> ReturnValues;
     unsigned RegNo = 0;
-    for (unsigned I = 0, E = LLVMRetTys.size(); I != E; ++I) {
-      MVT::ValueType VT = getValueType(LLVMRetTys[I]);
-      MVT::ValueType RegisterVT = getRegisterType(VT);
+    for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
+      MVT VT = RetTys[I];
+      MVT RegisterVT = getRegisterType(VT);
       unsigned NumRegs = getNumRegisters(VT);
       unsigned RegNoEnd = NumRegs + RegNo;
       SmallVector<SDOperand, 4> Results;
@@ -4353,10 +4830,8 @@
                          AssertOp);
       ReturnValues.push_back(ReturnValue);
     }
-    Res = ReturnValues.size() == 1 ? ReturnValues.front() :
-          DAG.getNode(ISD::MERGE_VALUES,
-                      DAG.getVTList(&RetTys[0], RetTys.size()),
-                      &ReturnValues[0], ReturnValues.size());
+    Res = DAG.getMergeValues(DAG.getVTList(&RetTys[0], RetTys.size()),
+                             &ReturnValues[0], ReturnValues.size());
   }
 
   return std::make_pair(Res, Chain);
@@ -4368,18 +4843,12 @@
   return SDOperand();
 }
 
-SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
-                                                 SelectionDAG &DAG) {
-  assert(0 && "CustomPromoteOperation not implemented for this target!");
-  abort();
-  return SDOperand();
-}
 
 //===----------------------------------------------------------------------===//
 // SelectionDAGISel code
 //===----------------------------------------------------------------------===//
 
-unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
+unsigned SelectionDAGISel::MakeReg(MVT VT) {
   return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
 }
 
@@ -4389,8 +4858,6 @@
   AU.setPreservesAll();
 }
 
-
-
 bool SelectionDAGISel::runOnFunction(Function &Fn) {
   // Get alias analysis for load/store combining.
   AA = &getAnalysis<AliasAnalysis>();
@@ -4429,26 +4896,17 @@
   return true;
 }
 
-void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, 
-                                                      unsigned Reg) {
+void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
   SDOperand Op = getValue(V);
   assert((Op.getOpcode() != ISD::CopyFromReg ||
           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
          "Copy from a reg to the same reg!");
   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
-  
-  MVT::ValueType SrcVT = Op.getValueType();
-  MVT::ValueType RegisterVT = TLI.getRegisterType(SrcVT);
-  unsigned NumRegs = TLI.getNumRegisters(SrcVT);
-  SmallVector<SDOperand, 8> Regs(NumRegs);
-  SmallVector<SDOperand, 8> Chains(NumRegs);
 
-  // Copy the value by legal parts into sequential virtual registers.
-  getCopyToParts(DAG, Op, &Regs[0], NumRegs, RegisterVT);
-  for (unsigned i = 0; i != NumRegs; ++i)
-    Chains[i] = DAG.getCopyToReg(DAG.getEntryNode(), Reg + i, Regs[i]);
-  SDOperand Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, &Chains[0], NumRegs);
-  PendingExports.push_back(Ch);
+  RegsForValue RFV(TLI, Reg, V->getType());
+  SDOperand Chain = DAG.getEntryNode();
+  RFV.getCopyToRegs(Op, DAG, Chain, 0);
+  PendingExports.push_back(Chain);
 }
 
 void SelectionDAGISel::
@@ -4457,14 +4915,17 @@
   Function &F = *LLVMBB->getParent();
   FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
   SDOperand OldRoot = SDL.DAG.getRoot();
-  std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
+  SmallVector<SDOperand, 16> Args;
+  TLI.LowerArguments(F, SDL.DAG, Args);
 
   unsigned a = 0;
   for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
-       AI != E; ++AI, ++a)
+       AI != E; ++AI) {
+    SmallVector<MVT, 4> ValueVTs;
+    ComputeValueVTs(TLI, AI->getType(), ValueVTs);
+    unsigned NumValues = ValueVTs.size();
     if (!AI->use_empty()) {
-      SDL.setValue(AI, Args[a]);
-
+      SDL.setValue(AI, SDL.DAG.getMergeValues(&Args[a], NumValues));
       // If this argument is live outside of the entry block, insert a copy from
       // whereever we got it to the vreg that other BB's will reference it as.
       DenseMap<const Value*, unsigned>::iterator VMI=FuncInfo.ValueMap.find(AI);
@@ -4472,6 +4933,8 @@
         SDL.CopyValueToVirtualRegister(AI, VMI->second);
       }
     }
+    a += NumValues;
+  }
 
   // Finally, if the target has anything special to do, allow it to do so.
   // FIXME: this should insert code into the DAG!
@@ -4491,6 +4954,40 @@
     }
 }
 
+/// IsFixedFrameObjectWithPosOffset - Check if object is a fixed frame object and
+/// whether object offset >= 0.
+static bool
+IsFixedFrameObjectWithPosOffset(MachineFrameInfo * MFI, SDOperand Op) {
+  if (!isa<FrameIndexSDNode>(Op)) return false;
+
+  FrameIndexSDNode * FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op);
+  int FrameIdx =  FrameIdxNode->getIndex();
+  return MFI->isFixedObjectIndex(FrameIdx) &&
+    MFI->getObjectOffset(FrameIdx) >= 0;
+}
+
+/// IsPossiblyOverwrittenArgumentOfTailCall - Check if the operand could
+/// possibly be overwritten when lowering the outgoing arguments in a tail
+/// call. Currently the implementation of this call is very conservative and
+/// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with
+/// virtual registers would be overwritten by direct lowering.
+static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op,
+                                                    MachineFrameInfo * MFI) {
+  RegisterSDNode * OpReg = NULL;
+  if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS ||
+      (Op.getOpcode()== ISD::CopyFromReg &&
+       (OpReg = dyn_cast<RegisterSDNode>(Op.getOperand(1))) &&
+       (OpReg->getReg() >= TargetRegisterInfo::FirstVirtualRegister)) ||
+      (Op.getOpcode() == ISD::LOAD &&
+       IsFixedFrameObjectWithPosOffset(MFI, Op.getOperand(1))) ||
+      (Op.getOpcode() == ISD::MERGE_VALUES &&
+       Op.getOperand(Op.ResNo).getOpcode() == ISD::LOAD &&
+       IsFixedFrameObjectWithPosOffset(MFI, Op.getOperand(Op.ResNo).
+                                       getOperand(1))))
+    return true;
+  return false;
+}
+
 /// CheckDAGForTailCallsAndFixThem - This Function looks for CALL nodes in the
 /// DAG and fixes their tailcall attribute operand.
 static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG, 
@@ -4515,19 +5012,51 @@
       // eligible (no RET or the target rejects) the attribute is fixed to
       // false. The TargetLowering::IsEligibleForTailCallOptimization function
       // must correctly identify tail call optimizable calls.
-      if (isMarkedTailCall && 
-          (Ret==NULL || 
-           !TLI.IsEligibleForTailCallOptimization(OpCall, OpRet, DAG))) {
+      if (!isMarkedTailCall) continue;
+      if (Ret==NULL ||
+          !TLI.IsEligibleForTailCallOptimization(OpCall, OpRet, DAG)) {
+        // Not eligible. Mark CALL node as non tail call.
         SmallVector<SDOperand, 32> Ops;
         unsigned idx=0;
-        for(SDNode::op_iterator I =OpCall.Val->op_begin(), 
-              E=OpCall.Val->op_end(); I!=E; I++, idx++) {
+        for(SDNode::op_iterator I =OpCall.Val->op_begin(),
+              E = OpCall.Val->op_end(); I != E; I++, idx++) {
           if (idx!=3)
             Ops.push_back(*I);
-          else 
+          else
             Ops.push_back(DAG.getConstant(false, TLI.getPointerTy()));
         }
         DAG.UpdateNodeOperands(OpCall, Ops.begin(), Ops.size());
+      } else {
+        // Look for tail call clobbered arguments. Emit a series of
+        // copyto/copyfrom virtual register nodes to protect them.
+        SmallVector<SDOperand, 32> Ops;
+        SDOperand Chain = OpCall.getOperand(0), InFlag;
+        unsigned idx=0;
+        for(SDNode::op_iterator I = OpCall.Val->op_begin(),
+              E = OpCall.Val->op_end(); I != E; I++, idx++) {
+          SDOperand Arg = *I;
+          if (idx > 4 && (idx % 2)) {
+            bool isByVal = cast<ARG_FLAGSSDNode>(OpCall.getOperand(idx+1))->
+              getArgFlags().isByVal();
+            MachineFunction &MF = DAG.getMachineFunction();
+            MachineFrameInfo *MFI = MF.getFrameInfo();
+            if (!isByVal &&
+                IsPossiblyOverwrittenArgumentOfTailCall(Arg, MFI)) {
+              MVT VT = Arg.getValueType();
+              unsigned VReg = MF.getRegInfo().
+                createVirtualRegister(TLI.getRegClassFor(VT));
+              Chain = DAG.getCopyToReg(Chain, VReg, Arg, InFlag);
+              InFlag = Chain.getValue(1);
+              Arg = DAG.getCopyFromReg(Chain, VReg, VT, InFlag);
+              Chain = Arg.getValue(1);
+              InFlag = Arg.getValue(2);
+            }
+          }
+          Ops.push_back(Arg);
+        }
+        // Link in chain of CopyTo/CopyFromReg.
+        Ops[0] = Chain;
+        DAG.UpdateNodeOperands(OpCall, Ops.begin(), Ops.size());
       }
     }
   }
@@ -4551,9 +5080,7 @@
     // Add a label to mark the beginning of the landing pad.  Deletion of the
     // landing pad can thus be detected via the MachineModuleInfo.
     unsigned LabelID = MMI->addLandingPad(BB);
-    DAG.setRoot(DAG.getNode(ISD::LABEL, MVT::Other, DAG.getEntryNode(),
-                            DAG.getConstant(LabelID, MVT::i32),
-                            DAG.getConstant(1, MVT::i32)));
+    DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, DAG.getEntryNode(), LabelID));
 
     // Mark exception register as live in.
     unsigned Reg = TLI.getExceptionAddressRegister();
@@ -4666,7 +5193,7 @@
 
       // Remember that this register needs to added to the machine PHI node as
       // the input for this MBB.
-      MVT::ValueType VT = TLI.getValueType(PN->getType());
+      MVT VT = TLI.getValueType(PN->getType());
       unsigned NumRegisters = TLI.getNumRegisters(VT);
       for (unsigned i = 0, e = NumRegisters; i != e; ++i)
         PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
@@ -4696,12 +5223,72 @@
   CheckDAGForTailCallsAndFixThem(DAG, TLI);
 }
 
+void SelectionDAGISel::ComputeLiveOutVRegInfo(SelectionDAG &DAG) {
+  SmallPtrSet<SDNode*, 128> VisitedNodes;
+  SmallVector<SDNode*, 128> Worklist;
+  
+  Worklist.push_back(DAG.getRoot().Val);
+  
+  APInt Mask;
+  APInt KnownZero;
+  APInt KnownOne;
+  
+  while (!Worklist.empty()) {
+    SDNode *N = Worklist.back();
+    Worklist.pop_back();
+    
+    // If we've already seen this node, ignore it.
+    if (!VisitedNodes.insert(N))
+      continue;
+    
+    // Otherwise, add all chain operands to the worklist.
+    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+      if (N->getOperand(i).getValueType() == MVT::Other)
+        Worklist.push_back(N->getOperand(i).Val);
+    
+    // If this is a CopyToReg with a vreg dest, process it.
+    if (N->getOpcode() != ISD::CopyToReg)
+      continue;
+    
+    unsigned DestReg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
+    if (!TargetRegisterInfo::isVirtualRegister(DestReg))
+      continue;
+    
+    // Ignore non-scalar or non-integer values.
+    SDOperand Src = N->getOperand(2);
+    MVT SrcVT = Src.getValueType();
+    if (!SrcVT.isInteger() || SrcVT.isVector())
+      continue;
+    
+    unsigned NumSignBits = DAG.ComputeNumSignBits(Src);
+    Mask = APInt::getAllOnesValue(SrcVT.getSizeInBits());
+    DAG.ComputeMaskedBits(Src, Mask, KnownZero, KnownOne);
+    
+    // Only install this information if it tells us something.
+    if (NumSignBits != 1 || KnownZero != 0 || KnownOne != 0) {
+      DestReg -= TargetRegisterInfo::FirstVirtualRegister;
+      FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
+      if (DestReg >= FLI.LiveOutRegInfo.size())
+        FLI.LiveOutRegInfo.resize(DestReg+1);
+      FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[DestReg];
+      LOI.NumSignBits = NumSignBits;
+      LOI.KnownOne = NumSignBits;
+      LOI.KnownZero = NumSignBits;
+    }
+  }
+}
+
 void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
   DOUT << "Lowered selection DAG:\n";
   DEBUG(DAG.dump());
 
   // Run the DAG combiner in pre-legalize mode.
-  DAG.Combine(false, *AA);
+  if (TimePassesIsEnabled) {
+    NamedRegionTimer T("DAG Combining 1");
+    DAG.Combine(false, *AA);
+  } else {
+    DAG.Combine(false, *AA);
+  }
   
   DOUT << "Optimized lowered selection DAG:\n";
   DEBUG(DAG.dump());
@@ -4712,22 +5299,57 @@
   DAG.LegalizeTypes();
   // Someday even later, enable a dag combine pass here.
 #endif
-  DAG.Legalize();
+  if (TimePassesIsEnabled) {
+    NamedRegionTimer T("DAG Legalization");
+    DAG.Legalize();
+  } else {
+    DAG.Legalize();
+  }
   
   DOUT << "Legalized selection DAG:\n";
   DEBUG(DAG.dump());
   
   // Run the DAG combiner in post-legalize mode.
-  DAG.Combine(true, *AA);
+  if (TimePassesIsEnabled) {
+    NamedRegionTimer T("DAG Combining 2");
+    DAG.Combine(true, *AA);
+  } else {
+    DAG.Combine(true, *AA);
+  }
   
   DOUT << "Optimized legalized selection DAG:\n";
   DEBUG(DAG.dump());
 
   if (ViewISelDAGs) DAG.viewGraph();
+  
+  if (!FastISel && EnableValueProp)
+    ComputeLiveOutVRegInfo(DAG);
 
   // Third, instruction select all of the operations to machine code, adding the
   // code to the MachineBasicBlock.
-  InstructionSelectBasicBlock(DAG);
+  if (TimePassesIsEnabled) {
+    NamedRegionTimer T("Instruction Selection");
+    InstructionSelect(DAG);
+  } else {
+    InstructionSelect(DAG);
+  }
+
+  // Emit machine code to BB.  This can change 'BB' to the last block being 
+  // inserted into.
+  if (TimePassesIsEnabled) {
+    NamedRegionTimer T("Instruction Scheduling");
+    ScheduleAndEmitDAG(DAG);
+  } else {
+    ScheduleAndEmitDAG(DAG);
+  }
+
+  // Perform target specific isel post processing.
+  if (TimePassesIsEnabled) {
+    NamedRegionTimer T("Instruction Selection Post Processing");
+    InstructionSelectPostProcessing(DAG);
+  } else {
+    InstructionSelectPostProcessing(DAG);
+  }
   
   DOUT << "Selected machine code:\n";
   DEBUG(BB->dump());
@@ -4737,7 +5359,8 @@
                                         FunctionLoweringInfo &FuncInfo) {
   std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
   {
-    SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
+    SelectionDAG DAG(TLI, MF, FuncInfo, 
+                     getAnalysisToUpdate<MachineModuleInfo>());
     CurDAG = &DAG;
   
     // First step, lower LLVM code to some DAG.  This DAG may use operations and
@@ -4771,7 +5394,8 @@
   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i) {
     // Lower header first, if it wasn't already lowered
     if (!BitTestCases[i].Emitted) {
-      SelectionDAG HSDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
+      SelectionDAG HSDAG(TLI, MF, FuncInfo, 
+                         getAnalysisToUpdate<MachineModuleInfo>());
       CurDAG = &HSDAG;
       SelectionDAGLowering HSDL(HSDAG, TLI, *AA, FuncInfo, GCI);
       // Set the current basic block to the mbb we wish to insert the code into
@@ -4784,7 +5408,8 @@
     }    
 
     for (unsigned j = 0, ej = BitTestCases[i].Cases.size(); j != ej; ++j) {
-      SelectionDAG BSDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
+      SelectionDAG BSDAG(TLI, MF, FuncInfo, 
+                         getAnalysisToUpdate<MachineModuleInfo>());
       CurDAG = &BSDAG;
       SelectionDAGLowering BSDL(BSDAG, TLI, *AA, FuncInfo, GCI);
       // Set the current basic block to the mbb we wish to insert the code into
@@ -4841,7 +5466,8 @@
   for (unsigned i = 0, e = JTCases.size(); i != e; ++i) {
     // Lower header first, if it wasn't already lowered
     if (!JTCases[i].first.Emitted) {
-      SelectionDAG HSDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
+      SelectionDAG HSDAG(TLI, MF, FuncInfo, 
+                         getAnalysisToUpdate<MachineModuleInfo>());
       CurDAG = &HSDAG;
       SelectionDAGLowering HSDL(HSDAG, TLI, *AA, FuncInfo, GCI);
       // Set the current basic block to the mbb we wish to insert the code into
@@ -4853,7 +5479,8 @@
       CodeGenAndEmitDAG(HSDAG);
     }
     
-    SelectionDAG JSDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
+    SelectionDAG JSDAG(TLI, MF, FuncInfo, 
+                       getAnalysisToUpdate<MachineModuleInfo>());
     CurDAG = &JSDAG;
     SelectionDAGLowering JSDL(JSDAG, TLI, *AA, FuncInfo, GCI);
     // Set the current basic block to the mbb we wish to insert the code into
@@ -4901,7 +5528,8 @@
   // If we generated any switch lowering information, build and codegen any
   // additional DAGs necessary.
   for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
-    SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineModuleInfo>());
+    SelectionDAG SDAG(TLI, MF, FuncInfo, 
+                      getAnalysisToUpdate<MachineModuleInfo>());
     CurDAG = &SDAG;
     SelectionDAGLowering SDL(SDAG, TLI, *AA, FuncInfo, GCI);
     
@@ -4959,7 +5587,7 @@
     RegisterScheduler::setDefault(Ctor);
   }
   
-  ScheduleDAG *SL = Ctor(this, &DAG, BB);
+  ScheduleDAG *SL = Ctor(this, &DAG, BB, FastISel);
   BB = SL->Run();
 
   if (ViewSUnitDAGs) SL->viewGraph();
@@ -5071,7 +5699,7 @@
       }
       
       // Add this to the output node.
-      MVT::ValueType IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
+      MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
       Ops.push_back(DAG.getTargetConstant(4/*MEM*/ | (SelOps.size() << 3),
                                           IntPtrTy));
       Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp Sun Jul  6 15:45:41 2008
@@ -18,6 +18,7 @@
 #include "llvm/CodeGen/ScheduleDAG.h"
 #include "llvm/CodeGen/MachineConstantPool.h"
 #include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/GraphWriter.h"
@@ -48,7 +49,7 @@
     template<typename EdgeIter>
     static std::string getEdgeAttributes(const void *Node, EdgeIter EI) {
       SDOperand Op = EI.getNode()->getOperand(EI.getOperand());
-      MVT::ValueType VT = Op.getValueType();
+      MVT VT = Op.getValueType();
       if (VT == MVT::Flag)
         return "color=red,style=bold";
       else if (VT == MVT::Other)
@@ -90,7 +91,7 @@
     if (Node->getValueType(i) == MVT::Other)
       Op += ":ch";
     else
-      Op = Op + ":" + MVT::getValueTypeString(Node->getValueType(i));
+      Op = Op + ":" + Node->getValueType(i).getMVTString();
     
   if (const ConstantSDNode *CSDN = dyn_cast<ConstantSDNode>(Node)) {
     Op += ": " + utostr(CSDN->getValue());
@@ -138,6 +139,13 @@
     } else {
       Op += " #" + utostr(R->getReg());
     }
+  } else if (const DbgStopPointSDNode *D = dyn_cast<DbgStopPointSDNode>(Node)) {
+    Op += ": " + D->getCompileUnit()->getFileName();
+    Op += ":" + utostr(D->getLine());
+    if (D->getColumn() != 0)
+      Op += ":" + utostr(D->getColumn());
+  } else if (const LabelSDNode *L = dyn_cast<LabelSDNode>(Node)) {
+    Op += ": LabelID=" + utostr(L->getLabelID());
   } else if (const ExternalSymbolSDNode *ES =
              dyn_cast<ExternalSymbolSDNode>(Node)) {
     Op += "'" + std::string(ES->getSymbol()) + "'";
@@ -154,9 +162,7 @@
   } else if (const ARG_FLAGSSDNode *N = dyn_cast<ARG_FLAGSSDNode>(Node)) {
     Op = Op + " AF=" + N->getArgFlags().getArgFlagsString();
   } else if (const VTSDNode *N = dyn_cast<VTSDNode>(Node)) {
-    Op = Op + " VT=" + MVT::getValueTypeString(N->getVT());
-  } else if (const StringSDNode *N = dyn_cast<StringSDNode>(Node)) {
-    Op = Op + "\"" + N->getValue() + "\"";
+    Op = Op + " VT=" + N->getVT().getMVTString();
   } else if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(Node)) {
     bool doExt = true;
     switch (LD->getExtensionType()) {
@@ -172,7 +178,7 @@
       break;
     }
     if (doExt)
-      Op += MVT::getValueTypeString(LD->getMemoryVT()) + ">";
+      Op += LD->getMemoryVT().getMVTString() + ">";
     if (LD->isVolatile())
       Op += "<V>";
     Op += LD->getIndexedModeName(LD->getAddressingMode());
@@ -180,7 +186,7 @@
       Op += " A=" + utostr(LD->getAlignment());
   } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(Node)) {
     if (ST->isTruncatingStore())
-      Op += "<trunc " + MVT::getValueTypeString(ST->getMemoryVT()) + ">";
+      Op += "<trunc " + ST->getMemoryVT().getMVTString() + ">";
     if (ST->isVolatile())
       Op += "<V>";
     Op += ST->getIndexedModeName(ST->getAddressingMode());
@@ -301,8 +307,9 @@
     static void addCustomGraphFeatures(ScheduleDAG *G,
                                        GraphWriter<ScheduleDAG*> &GW) {
       GW.emitSimpleNode(0, "plaintext=circle", "GraphRoot");
-      if (G->DAG.getRoot().Val)
-        GW.emitEdge(0, -1, G->SUnitMap[G->DAG.getRoot().Val].front(), -1, "");
+      const SDNode *N = G->DAG.getRoot().Val;
+      if (N && N->getNodeId() != -1)
+        GW.emitEdge(0, -1, &G->SUnits[N->getNodeId()], -1, "");
     }
   };
 }

Modified: llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp Sun Jul  6 15:45:41 2008
@@ -19,6 +19,7 @@
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/GlobalVariable.h"
 #include "llvm/DerivedTypes.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/SelectionDAG.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/STLExtras.h"
@@ -96,6 +97,7 @@
   Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
   Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
   Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
+  Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
   Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
   Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
   Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
@@ -107,6 +109,7 @@
   Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
   Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
   Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
+  Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
   Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
   Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
   Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
@@ -165,7 +168,7 @@
 
 TargetLowering::TargetLowering(TargetMachine &tm)
   : TM(tm), TD(TM.getTargetData()) {
-  assert(ISD::BUILTIN_OP_END <= 156 &&
+  assert(ISD::BUILTIN_OP_END <= OpActionsCapacity &&
          "Fixed size array in TargetLowering is not large enough!");
   // All operations default to being supported.
   memset(OpActions, 0, sizeof(OpActions));
@@ -179,12 +182,12 @@
     // Default all indexed load / store to expand.
     for (unsigned IM = (unsigned)ISD::PRE_INC;
          IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
-      setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
-      setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
+      setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand);
+      setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
     }
     
     // These operations default to expand.
-    setOperationAction(ISD::FGETSIGN, (MVT::ValueType)VT, Expand);
+    setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
   }
 
   // Most targets ignore the @llvm.prefetch intrinsic.
@@ -243,36 +246,40 @@
   // Everything defaults to needing one register.
   for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
     NumRegistersForVT[i] = 1;
-    RegisterTypeForVT[i] = TransformToType[i] = i;
+    RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
   }
   // ...except isVoid, which doesn't need any registers.
   NumRegistersForVT[MVT::isVoid] = 0;
 
   // Find the largest integer register class.
-  unsigned LargestIntReg = MVT::i128;
+  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
   for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
     assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
 
   // Every integer value type larger than this largest register takes twice as
   // many registers to represent as the previous ValueType.
-  for (MVT::ValueType ExpandedReg = LargestIntReg + 1;
-       MVT::isInteger(ExpandedReg); ++ExpandedReg) {
+  for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) {
+    MVT EVT = (MVT::SimpleValueType)ExpandedReg;
+    if (!EVT.isInteger())
+      break;
     NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
-    RegisterTypeForVT[ExpandedReg] = LargestIntReg;
-    TransformToType[ExpandedReg] = ExpandedReg - 1;
-    ValueTypeActions.setTypeAction(ExpandedReg, Expand);
+    RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
+    TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
+    ValueTypeActions.setTypeAction(EVT, Expand);
   }
 
   // Inspect all of the ValueType's smaller than the largest integer
   // register to see which ones need promotion.
-  MVT::ValueType LegalIntReg = LargestIntReg;
-  for (MVT::ValueType IntReg = LargestIntReg - 1;
-       IntReg >= MVT::i1; --IntReg) {
-    if (isTypeLegal(IntReg)) {
+  unsigned LegalIntReg = LargestIntReg;
+  for (unsigned IntReg = LargestIntReg - 1;
+       IntReg >= (unsigned)MVT::i1; --IntReg) {
+    MVT IVT = (MVT::SimpleValueType)IntReg;
+    if (isTypeLegal(IVT)) {
       LegalIntReg = IntReg;
     } else {
-      RegisterTypeForVT[IntReg] = TransformToType[IntReg] = LegalIntReg;
-      ValueTypeActions.setTypeAction(IntReg, Promote);
+      RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
+        (MVT::SimpleValueType)LegalIntReg;
+      ValueTypeActions.setTypeAction(IVT, Promote);
     }
   }
 
@@ -310,18 +317,19 @@
   }
   
   // Loop over all of the vector value types to see which need transformations.
-  for (MVT::ValueType i = MVT::FIRST_VECTOR_VALUETYPE;
-       i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
-    if (!isTypeLegal(i)) {
-      MVT::ValueType IntermediateVT, RegisterVT;
+  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
+       i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
+    MVT VT = (MVT::SimpleValueType)i;
+    if (!isTypeLegal(VT)) {
+      MVT IntermediateVT, RegisterVT;
       unsigned NumIntermediates;
       NumRegistersForVT[i] =
-        getVectorTypeBreakdown(i,
+        getVectorTypeBreakdown(VT,
                                IntermediateVT, NumIntermediates,
                                RegisterVT);
       RegisterTypeForVT[i] = RegisterVT;
       TransformToType[i] = MVT::Other; // this isn't actually used
-      ValueTypeActions.setTypeAction(i, Expand);
+      ValueTypeActions.setTypeAction(VT, Expand);
     }
   }
 }
@@ -331,8 +339,7 @@
 }
 
 
-MVT::ValueType
-TargetLowering::getSetCCResultType(const SDOperand &) const {
+MVT TargetLowering::getSetCCResultType(const SDOperand &) const {
   return getValueType(TD->getIntPtrType());
 }
 
@@ -346,13 +353,13 @@
 /// register.  It also returns the VT and quantity of the intermediate values
 /// before they are promoted/expanded.
 ///
-unsigned TargetLowering::getVectorTypeBreakdown(MVT::ValueType VT, 
-                                                MVT::ValueType &IntermediateVT,
+unsigned TargetLowering::getVectorTypeBreakdown(MVT VT,
+                                                MVT &IntermediateVT,
                                                 unsigned &NumIntermediates,
-                                      MVT::ValueType &RegisterVT) const {
+                                      MVT &RegisterVT) const {
   // Figure out the right, legal destination reg to copy into.
-  unsigned NumElts = MVT::getVectorNumElements(VT);
-  MVT::ValueType EltTy = MVT::getVectorElementType(VT);
+  unsigned NumElts = VT.getVectorNumElements();
+  MVT EltTy = VT.getVectorElementType();
   
   unsigned NumVectorRegs = 1;
   
@@ -365,24 +372,23 @@
   
   // Divide the input until we get to a supported size.  This will always
   // end with a scalar if the target doesn't support vectors.
-  while (NumElts > 1 &&
-         !isTypeLegal(MVT::getVectorType(EltTy, NumElts))) {
+  while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
     NumElts >>= 1;
     NumVectorRegs <<= 1;
   }
 
   NumIntermediates = NumVectorRegs;
   
-  MVT::ValueType NewVT = MVT::getVectorType(EltTy, NumElts);
+  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
   if (!isTypeLegal(NewVT))
     NewVT = EltTy;
   IntermediateVT = NewVT;
 
-  MVT::ValueType DestVT = getTypeToTransformTo(NewVT);
+  MVT DestVT = getTypeToTransformTo(NewVT);
   RegisterVT = DestVT;
-  if (DestVT < NewVT) {
+  if (DestVT.bitsLT(NewVT)) {
     // Value is expanded, e.g. i64 -> i16.
-    return NumVectorRegs*(MVT::getSizeInBits(NewVT)/MVT::getSizeInBits(DestVT));
+    return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
   } else {
     // Otherwise, promotion or legal types use the same number of registers as
     // the vector decimated to the appropriate level.
@@ -424,7 +430,7 @@
   case ISD::XOR:
     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
       if (C->getAPIntValue().intersects(~Demanded)) {
-        MVT::ValueType VT = Op.getValueType();
+        MVT VT = Op.getValueType();
         SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
                                     DAG.getConstant(Demanded &
                                                       C->getAPIntValue(), 
@@ -596,7 +602,7 @@
     //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
     if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known
       if ((KnownOne & KnownOne2) == KnownOne) {
-        MVT::ValueType VT = Op.getValueType();
+        MVT VT = Op.getValueType();
         SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0),
                                                  ANDC));
@@ -611,7 +617,7 @@
       // if we can expand it to have all bits set, do it
       if (Expanded.isAllOnesValue()) {
         if (Expanded != C->getAPIntValue()) {
-          MVT::ValueType VT = Op.getValueType();
+          MVT VT = Op.getValueType();
           SDOperand New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
                                           TLO.DAG.getConstant(Expanded, VT));
           return TLO.CombineTo(Op, New);
@@ -687,7 +693,7 @@
           
           SDOperand NewSA = 
             TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType());
-          MVT::ValueType VT = Op.getValueType();
+          MVT VT = Op.getValueType();
           return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT,
                                                    InOp.getOperand(0), NewSA));
         }
@@ -704,9 +710,9 @@
     break;
   case ISD::SRL:
     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
-      MVT::ValueType VT = Op.getValueType();
+      MVT VT = Op.getValueType();
       unsigned ShAmt = SA->getValue();
-      unsigned VTSize = MVT::getSizeInBits(VT);
+      unsigned VTSize = VT.getSizeInBits();
       SDOperand InOp = Op.getOperand(0);
       
       // If the shift count is an invalid immediate, don't do anything.
@@ -748,7 +754,7 @@
     break;
   case ISD::SRA:
     if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
-      MVT::ValueType VT = Op.getValueType();
+      MVT VT = Op.getValueType();
       unsigned ShAmt = SA->getValue();
       
       // If the shift count is an invalid immediate, don't do anything.
@@ -761,7 +767,7 @@
       // demand the input sign bit.
       APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
       if (HighBits.intersects(NewMask))
-        InDemandedMask |= APInt::getSignBit(MVT::getSizeInBits(VT));
+        InDemandedMask |= APInt::getSignBit(VT.getSizeInBits());
       
       if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
                                KnownZero, KnownOne, TLO, Depth+1))
@@ -784,22 +790,22 @@
     }
     break;
   case ISD::SIGN_EXTEND_INREG: {
-    MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+    MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
 
     // Sign extension.  Compute the demanded bits in the result that are not 
     // present in the input.
     APInt NewBits = APInt::getHighBitsSet(BitWidth,
-                                          BitWidth - MVT::getSizeInBits(EVT)) &
+                                          BitWidth - EVT.getSizeInBits()) &
                     NewMask;
     
     // If none of the extended bits are demanded, eliminate the sextinreg.
     if (NewBits == 0)
       return TLO.CombineTo(Op, Op.getOperand(0));
 
-    APInt InSignBit = APInt::getSignBit(MVT::getSizeInBits(EVT));
+    APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits());
     InSignBit.zext(BitWidth);
     APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth,
-                                                   MVT::getSizeInBits(EVT)) &
+                                                   EVT.getSizeInBits()) &
                               NewMask;
     
     // Since the sign extended bits are demanded, we know that the sign
@@ -851,8 +857,8 @@
     break;
   }
   case ISD::SIGN_EXTEND: {
-    MVT::ValueType InVT = Op.getOperand(0).getValueType();
-    unsigned InBits = MVT::getSizeInBits(InVT);
+    MVT InVT = Op.getOperand(0).getValueType();
+    unsigned InBits = InVT.getSizeInBits();
     APInt InMask    = APInt::getLowBitsSet(BitWidth, InBits);
     APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
     APInt NewBits   = ~InMask & NewMask;
@@ -947,9 +953,9 @@
     break;
   }
   case ISD::AssertZext: {
-    MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+    MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
     APInt InMask = APInt::getLowBitsSet(BitWidth,
-                                        MVT::getSizeInBits(VT));
+                                        VT.getSizeInBits());
     if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask,
                              KnownZero, KnownOne, TLO, Depth+1))
       return true;
@@ -961,7 +967,7 @@
 #if 0
     // If this is an FP->Int bitcast and if the sign bit is the only thing that
     // is demanded, turn this into a FGETSIGN.
-    if (NewMask == MVT::getIntVTSignBit(Op.getValueType()) &&
+    if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) &&
         MVT::isFloatingPoint(Op.getOperand(0).getValueType()) &&
         !MVT::isVector(Op.getOperand(0).getValueType())) {
       // Only do this xform if FGETSIGN is valid or if before legalize.
@@ -971,7 +977,7 @@
         // place.  We expect the SHL to be eliminated by other optimizations.
         SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), 
                                          Op.getOperand(0));
-        unsigned ShVal = MVT::getSizeInBits(Op.getValueType())-1;
+        unsigned ShVal = Op.getValueType().getSizeInBits()-1;
         SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy());
         return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(),
                                                  Sign, ShAmt));
@@ -979,17 +985,7 @@
     }
 #endif
     break;
-  case ISD::ADD:
-  case ISD::SUB:
-  case ISD::INTRINSIC_WO_CHAIN:
-  case ISD::INTRINSIC_W_CHAIN:
-  case ISD::INTRINSIC_VOID:
-  case ISD::CTTZ:
-  case ISD::CTLZ:
-  case ISD::CTPOP:
-  case ISD::LOAD:
-  case ISD::SETCC:
-  case ISD::FGETSIGN:
+  default:
     // Just use ComputeMaskedBits to compute output bits.
     TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth);
     break;
@@ -1039,7 +1035,7 @@
 /// SimplifySetCC - Try to simplify a setcc built with the specified operands 
 /// and cc. If it is unable to simplify it, return a null SDOperand.
 SDOperand
-TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1,
+TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1,
                               ISD::CondCode Cond, bool foldBooleans,
                               DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -1066,7 +1062,7 @@
           N0.getOperand(1).getOpcode() == ISD::Constant) {
         unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
         if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
-            ShAmt == Log2_32(MVT::getSizeInBits(N0.getValueType()))) {
+            ShAmt == Log2_32(N0.getValueType().getSizeInBits())) {
           if ((C1 == 0) == (Cond == ISD::SETEQ)) {
             // (srl (ctlz x), 5) == 0  -> X != 0
             // (srl (ctlz x), 5) != 1  -> X != 0
@@ -1084,7 +1080,7 @@
       
       // If the LHS is a ZERO_EXTEND, perform the comparison on the input.
       if (N0.getOpcode() == ISD::ZERO_EXTEND) {
-        unsigned InSize = MVT::getSizeInBits(N0.getOperand(0).getValueType());
+        unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits();
 
         // If the comparison constant has bits in the upper part, the
         // zero-extended value could never match.
@@ -1127,10 +1123,10 @@
         }
       } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
                  (Cond == ISD::SETEQ || Cond == ISD::SETNE)) {
-        MVT::ValueType ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
-        unsigned ExtSrcTyBits = MVT::getSizeInBits(ExtSrcTy);
-        MVT::ValueType ExtDstTy = N0.getValueType();
-        unsigned ExtDstTyBits = MVT::getSizeInBits(ExtDstTy);
+        MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
+        unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
+        MVT ExtDstTy = N0.getValueType();
+        unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
 
         // If the extended part has any inconsistent bits, it cannot ever
         // compare equal.  In other words, they have to be all ones or all
@@ -1141,7 +1137,7 @@
           return DAG.getConstant(Cond == ISD::SETNE, VT);
         
         SDOperand ZextOp;
-        MVT::ValueType Op0Ty = N0.getOperand(0).getValueType();
+        MVT Op0Ty = N0.getOperand(0).getValueType();
         if (Op0Ty == ExtSrcTy) {
           ZextOp = N0.getOperand(0);
         } else {
@@ -1170,7 +1166,7 @@
           // Invert the condition.
           ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
           CC = ISD::getSetCCInverse(CC, 
-                               MVT::isInteger(N0.getOperand(0).getValueType()));
+                                   N0.getOperand(0).getValueType().isInteger());
           return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC);
         }
         
@@ -1205,7 +1201,7 @@
       }
       
       APInt MinVal, MaxVal;
-      unsigned OperandBitSize = MVT::getSizeInBits(N1C->getValueType(0));
+      unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits();
       if (ISD::isSignedIntSetCC(Cond)) {
         MinVal = APInt::getSignedMinValue(OperandBitSize);
         MaxVal = APInt::getSignedMaxValue(OperandBitSize);
@@ -1322,7 +1318,7 @@
 
   if (N0 == N1) {
     // We can always fold X == X for integer setcc's.
-    if (MVT::isInteger(N0.getValueType()))
+    if (N0.getValueType().isInteger())
       return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT);
     unsigned UOF = ISD::getUnorderedFlavor(Cond);
     if (UOF == 2)   // FP operators that are undefined on NaNs.
@@ -1337,7 +1333,7 @@
   }
 
   if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
-      MVT::isInteger(N0.getValueType())) {
+      N0.getValueType().isInteger()) {
     if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB ||
         N0.getOpcode() == ISD::XOR) {
       // Simplify (X+Y) == (X+Z) -->  Y == Z
@@ -1488,6 +1484,75 @@
   return SDOperand();
 }
 
+/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
+/// node is a GlobalAddress + offset.
+bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA,
+                                    int64_t &Offset) const {
+  if (isa<GlobalAddressSDNode>(N)) {
+    GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N);
+    GA = GASD->getGlobal();
+    Offset += GASD->getOffset();
+    return true;
+  }
+
+  if (N->getOpcode() == ISD::ADD) {
+    SDOperand N1 = N->getOperand(0);
+    SDOperand N2 = N->getOperand(1);
+    if (isGAPlusOffset(N1.Val, GA, Offset)) {
+      ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
+      if (V) {
+        Offset += V->getSignExtended();
+        return true;
+      }
+    } else if (isGAPlusOffset(N2.Val, GA, Offset)) {
+      ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
+      if (V) {
+        Offset += V->getSignExtended();
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+
+/// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is
+/// loading 'Bytes' bytes from a location that is 'Dist' units away from the
+/// location that the 'Base' load is loading from.
+bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base,
+                                       unsigned Bytes, int Dist,
+                                       const MachineFrameInfo *MFI) const {
+  if (LD->getOperand(0).Val != Base->getOperand(0).Val)
+    return false;
+  MVT VT = LD->getValueType(0);
+  if (VT.getSizeInBits() / 8 != Bytes)
+    return false;
+
+  SDOperand Loc = LD->getOperand(1);
+  SDOperand BaseLoc = Base->getOperand(1);
+  if (Loc.getOpcode() == ISD::FrameIndex) {
+    if (BaseLoc.getOpcode() != ISD::FrameIndex)
+      return false;
+    int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
+    int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
+    int FS  = MFI->getObjectSize(FI);
+    int BFS = MFI->getObjectSize(BFI);
+    if (FS != BFS || FS != (int)Bytes) return false;
+    return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
+  }
+
+  GlobalValue *GV1 = NULL;
+  GlobalValue *GV2 = NULL;
+  int64_t Offset1 = 0;
+  int64_t Offset2 = 0;
+  bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
+  bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
+  if (isGA1 && isGA2 && GV1 == GV2)
+    return Offset1 == (Offset2 + Dist*Bytes);
+  return false;
+}
+
+
 SDOperand TargetLowering::
 PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
   // Default implementation: no optimization.
@@ -1498,6 +1563,7 @@
 //  Inline Assembler Implementation Methods
 //===----------------------------------------------------------------------===//
 
+
 TargetLowering::ConstraintType
 TargetLowering::getConstraintType(const std::string &Constraint) const {
   // FIXME: lots more standard ones to handle.
@@ -1534,14 +1600,12 @@
 /// LowerXConstraint - try to replace an X constraint, which matches anything,
 /// with another that has more specific requirements based on the type of the
 /// corresponding operand.
-void TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT, 
-                                      std::string& s) const {
-  if (MVT::isInteger(ConstraintVT))
-    s = "r";
-  else if (MVT::isFloatingPoint(ConstraintVT))
-    s = "f";      // works for many targets
-  else 
-    s = "";
+const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{
+  if (ConstraintVT.isInteger())
+    return "r";
+  if (ConstraintVT.isFloatingPoint())
+    return "f";      // works for many targets
+  return 0;
 }
 
 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
@@ -1549,7 +1613,7 @@
 void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
                                                   char ConstraintLetter,
                                                   std::vector<SDOperand> &Ops,
-                                                  SelectionDAG &DAG) {
+                                                  SelectionDAG &DAG) const {
   switch (ConstraintLetter) {
   default: break;
   case 'X':     // Allows any operand; labels (basic block) use this.
@@ -1604,14 +1668,14 @@
 
 std::vector<unsigned> TargetLowering::
 getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  MVT::ValueType VT) const {
+                                  MVT VT) const {
   return std::vector<unsigned>();
 }
 
 
 std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
 getRegForInlineAsmConstraint(const std::string &Constraint,
-                             MVT::ValueType VT) const {
+                             MVT VT) const {
   if (Constraint[0] != '{')
     return std::pair<unsigned, const TargetRegisterClass*>(0, 0);
   assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
@@ -1649,6 +1713,122 @@
 }
 
 //===----------------------------------------------------------------------===//
+// Constraint Selection.
+
+/// getConstraintGenerality - Return an integer indicating how general CT
+/// is.
+static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
+  switch (CT) {
+  default: assert(0 && "Unknown constraint type!");
+  case TargetLowering::C_Other:
+  case TargetLowering::C_Unknown:
+    return 0;
+  case TargetLowering::C_Register:
+    return 1;
+  case TargetLowering::C_RegisterClass:
+    return 2;
+  case TargetLowering::C_Memory:
+    return 3;
+  }
+}
+
+/// ChooseConstraint - If there are multiple different constraints that we
+/// could pick for this operand (e.g. "imr") try to pick the 'best' one.
+/// This is somewhat tricky: constraints fall into four classes:
+///    Other         -> immediates and magic values
+///    Register      -> one specific register
+///    RegisterClass -> a group of regs
+///    Memory        -> memory
+/// Ideally, we would pick the most specific constraint possible: if we have
+/// something that fits into a register, we would pick it.  The problem here
+/// is that if we have something that could either be in a register or in
+/// memory that use of the register could cause selection of *other*
+/// operands to fail: they might only succeed if we pick memory.  Because of
+/// this the heuristic we use is:
+///
+///  1) If there is an 'other' constraint, and if the operand is valid for
+///     that constraint, use it.  This makes us take advantage of 'i'
+///     constraints when available.
+///  2) Otherwise, pick the most general constraint present.  This prefers
+///     'm' over 'r', for example.
+///
+static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
+                             const TargetLowering &TLI,
+                             SDOperand Op, SelectionDAG *DAG) {
+  assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
+  unsigned BestIdx = 0;
+  TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
+  int BestGenerality = -1;
+  
+  // Loop over the options, keeping track of the most general one.
+  for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
+    TargetLowering::ConstraintType CType =
+      TLI.getConstraintType(OpInfo.Codes[i]);
+    
+    // If this is an 'other' constraint, see if the operand is valid for it.
+    // For example, on X86 we might have an 'rI' constraint.  If the operand
+    // is an integer in the range [0..31] we want to use I (saving a load
+    // of a register), otherwise we must use 'r'.
+    if (CType == TargetLowering::C_Other && Op.Val) {
+      assert(OpInfo.Codes[i].size() == 1 &&
+             "Unhandled multi-letter 'other' constraint");
+      std::vector<SDOperand> ResultOps;
+      TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0],
+                                       ResultOps, *DAG);
+      if (!ResultOps.empty()) {
+        BestType = CType;
+        BestIdx = i;
+        break;
+      }
+    }
+    
+    // This constraint letter is more general than the previous one, use it.
+    int Generality = getConstraintGenerality(CType);
+    if (Generality > BestGenerality) {
+      BestType = CType;
+      BestIdx = i;
+      BestGenerality = Generality;
+    }
+  }
+  
+  OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
+  OpInfo.ConstraintType = BestType;
+}
+
+/// ComputeConstraintToUse - Determines the constraint code and constraint
+/// type to use for the specific AsmOperandInfo, setting
+/// OpInfo.ConstraintCode and OpInfo.ConstraintType.
+void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
+                                            SDOperand Op, 
+                                            SelectionDAG *DAG) const {
+  assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
+  
+  // Single-letter constraints ('r') are very common.
+  if (OpInfo.Codes.size() == 1) {
+    OpInfo.ConstraintCode = OpInfo.Codes[0];
+    OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
+  } else {
+    ChooseConstraint(OpInfo, *this, Op, DAG);
+  }
+  
+  // 'X' matches anything.
+  if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
+    // Labels and constants are handled elsewhere ('X' is the only thing
+    // that matches labels).
+    if (isa<BasicBlock>(OpInfo.CallOperandVal) ||
+        isa<ConstantInt>(OpInfo.CallOperandVal))
+      return;
+    
+    // Otherwise, try to resolve it to something we know about by looking at
+    // the actual operand type.
+    if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) {
+      OpInfo.ConstraintCode = Repl;
+      OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
+    }
+  }
+}
+
+//===----------------------------------------------------------------------===//
 //  Loop Strength Reduction hooks
 //===----------------------------------------------------------------------===//
 
@@ -1866,7 +2046,7 @@
 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
 SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 
                                     std::vector<SDNode*>* Created) const {
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // Check to see if we can do this.
   if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
@@ -1907,7 +2087,7 @@
   }
   // Extract the sign bit and add it to the quotient
   SDOperand T =
-    DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(MVT::getSizeInBits(VT)-1,
+    DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(VT.getSizeInBits()-1,
                                                  getShiftAmountTy()));
   if (Created)
     Created->push_back(T.Val);
@@ -1920,7 +2100,7 @@
 /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html>
 SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG,
                                     std::vector<SDNode*>* Created) const {
-  MVT::ValueType VT = N->getValueType(0);
+  MVT VT = N->getValueType(0);
   
   // Check to see if we can do this.
   if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))

Modified: llvm/branches/non-call-eh/lib/CodeGen/ShadowStackCollector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/ShadowStackCollector.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/ShadowStackCollector.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/ShadowStackCollector.cpp Sun Jul  6 15:45:41 2008
@@ -66,11 +66,14 @@
     static GetElementPtrInst *CreateGEP(IRBuilder &B, Value *BasePtr,
                                         int Idx1, int Idx2, const char *Name);
   };
+
+}
   
-  CollectorRegistry::Add<ShadowStackCollector>
-  Y("shadow-stack",
-    "Very portable collector for uncooperative code generators");
+static CollectorRegistry::Add<ShadowStackCollector>
+Y("shadow-stack",
+  "Very portable collector for uncooperative code generators");
   
+namespace {
   /// EscapeEnumerator - This is a little algorithm to find all escape points
   /// from a function so that "finally"-style code can be inserted. In addition
   /// to finding the existing return and unwind instructions, it also (if
@@ -325,8 +328,7 @@
         if (Function *F = CI->getCalledFunction())
           if (F->getIntrinsicID() == Intrinsic::gcroot) {
             std::pair<CallInst*,AllocaInst*> Pair = std::make_pair(
-              CI, cast<AllocaInst>(
-                    IntrinsicInst::StripPointerCasts(CI->getOperand(1))));
+              CI, cast<AllocaInst>(CI->getOperand(1)->stripPointerCasts()));
             if (IsNullValue(CI->getOperand(2)))
               Roots.push_back(Pair);
             else

Modified: llvm/branches/non-call-eh/lib/CodeGen/SimpleRegisterCoalescing.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SimpleRegisterCoalescing.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SimpleRegisterCoalescing.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SimpleRegisterCoalescing.cpp Sun Jul  6 15:45:41 2008
@@ -17,7 +17,6 @@
 #include "VirtRegMap.h"
 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
 #include "llvm/Value.h"
-#include "llvm/CodeGen/LiveVariables.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineLoopInfo.h"
@@ -36,31 +35,35 @@
 using namespace llvm;
 
 STATISTIC(numJoins    , "Number of interval joins performed");
+STATISTIC(numSubJoins , "Number of subclass joins performed");
 STATISTIC(numCommutes , "Number of instruction commuting performed");
 STATISTIC(numExtends  , "Number of copies extended");
 STATISTIC(numPeep     , "Number of identity moves eliminated after coalescing");
 STATISTIC(numAborts   , "Number of times interval joining aborted");
 
 char SimpleRegisterCoalescing::ID = 0;
-namespace {
-  static cl::opt<bool>
-  EnableJoining("join-liveintervals",
-                cl::desc("Coalesce copies (default=true)"),
-                cl::init(true));
-
-  static cl::opt<bool>
-  NewHeuristic("new-coalescer-heuristic",
-                cl::desc("Use new coalescer heuristic"),
-                cl::init(false));
+static cl::opt<bool>
+EnableJoining("join-liveintervals",
+              cl::desc("Coalesce copies (default=true)"),
+              cl::init(true));
+
+static cl::opt<bool>
+NewHeuristic("new-coalescer-heuristic",
+             cl::desc("Use new coalescer heuristic"),
+             cl::init(false), cl::Hidden);
+
+static cl::opt<bool>
+CrossClassJoin("join-subclass-copies",
+               cl::desc("Coalesce copies to sub- register class"),
+               cl::init(false), cl::Hidden);
 
-  RegisterPass<SimpleRegisterCoalescing> 
-  X("simple-register-coalescing", "Simple Register Coalescing");
+static RegisterPass<SimpleRegisterCoalescing> 
+X("simple-register-coalescing", "Simple Register Coalescing");
 
-  // Declare that we implement the RegisterCoalescer interface
-  RegisterAnalysisGroup<RegisterCoalescer, true/*The Default*/> V(X);
-}
+// Declare that we implement the RegisterCoalescer interface
+static RegisterAnalysisGroup<RegisterCoalescer, true/*The Default*/> V(X);
 
-const PassInfo *llvm::SimpleRegisterCoalescingID = X.getPassInfo();
+const PassInfo *const llvm::SimpleRegisterCoalescingID = &X;
 
 void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addPreserved<LiveIntervals>();
@@ -68,7 +71,6 @@
   AU.addPreservedID(MachineDominatorsID);
   AU.addPreservedID(PHIEliminationID);
   AU.addPreservedID(TwoAddressInstructionPassID);
-  AU.addRequired<LiveVariables>();
   AU.addRequired<LiveIntervals>();
   AU.addRequired<MachineLoopInfo>();
   MachineFunctionPass::getAnalysisUsage(AU);
@@ -385,10 +387,21 @@
   // simply extend BLR if CopyMI doesn't end the range.
   DOUT << "\nExtending: "; IntB.print(DOUT, tri_);
 
-  IntB.removeValNo(BValNo);
+  // Remove val#'s defined by copies that will be coalesced away.
   for (unsigned i = 0, e = BDeadValNos.size(); i != e; ++i)
     IntB.removeValNo(BDeadValNos[i]);
-  VNInfo *ValNo = IntB.getNextValue(AValNo->def, 0, li_->getVNInfoAllocator());
+
+  // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
+  // is updated. Kills are also updated.
+  VNInfo *ValNo = BValNo;
+  ValNo->def = AValNo->def;
+  ValNo->copy = NULL;
+  for (unsigned j = 0, ee = ValNo->kills.size(); j != ee; ++j) {
+    unsigned Kill = ValNo->kills[j];
+    if (Kill != BLR->end)
+      BKills.push_back(Kill);
+  }
+  ValNo->kills.clear();
   for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
        AI != AE; ++AI) {
     if (AI->valno != AValNo) continue;
@@ -430,7 +443,7 @@
     LI.FindLiveRangeContaining(li_->getDefIndex(DefIdx));
   if (DstLR == LI.end())
     return false;
-  unsigned KillIdx = li_->getInstructionIndex(&MBB->back()) + InstrSlots::NUM;
+  unsigned KillIdx = li_->getMBBEndIdx(MBB) + 1;
   if (DstLR->valno->kills.size() == 1 &&
       DstLR->valno->kills[0] == KillIdx && DstLR->valno->hasPHIKill)
     return true;
@@ -639,7 +652,8 @@
     // first instruction index starts at > 0 value.
     assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
     // Live-in to the function but dead. Remove it from entry live-in set.
-    mf_->begin()->removeLiveIn(li.reg);
+    if (mf_->begin()->isLiveIn(li.reg))
+      mf_->begin()->removeLiveIn(li.reg);
     const LiveRange *LR = li.getLiveRangeContaining(CopyIdx);
     removeRange(li, LR->start, LR->end, li_, tri_);
     return removeIntervalIfEmpty(li, li_, tri_);
@@ -752,7 +766,7 @@
 /// identity copies so they will be removed.
 void SimpleRegisterCoalescing::RemoveCopiesFromValNo(LiveInterval &li,
                                                      VNInfo *VNI) {
-  MachineInstr *ImpDef = NULL;
+  SmallVector<MachineInstr*, 4> ImpDefs;
   MachineOperand *LastUse = NULL;
   unsigned LastUseIdx = li_->getUseIndex(VNI->def);
   for (MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg),
@@ -762,8 +776,7 @@
     ++RI;
     if (MO->isDef()) {
       if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
-        assert(!ImpDef && "Multiple implicit_def defining same register?");
-        ImpDef = MI;
+        ImpDefs.push_back(MI);
       }
       continue;
     }
@@ -791,9 +804,13 @@
   if (LastUse)
     LastUse->setIsKill();
   else {
-    // Remove dead implicit_def.
-    li_->RemoveMachineInstrFromMaps(ImpDef);
-    ImpDef->eraseFromParent();
+    // Remove dead implicit_def's.
+    while (!ImpDefs.empty()) {
+      MachineInstr *ImpDef = ImpDefs.back();
+      ImpDefs.pop_back();
+      li_->RemoveMachineInstrFromMaps(ImpDef);
+      ImpDef->eraseFromParent();
+    }
   }
 }
 
@@ -807,6 +824,41 @@
   return 0;
 }
 
+/// isProfitableToCoalesceToSubRC - Given that register class of DstReg is
+/// a subset of the register class of SrcReg, return true if it's profitable
+/// to coalesce the two registers.
+bool
+SimpleRegisterCoalescing::isProfitableToCoalesceToSubRC(unsigned SrcReg,
+                                                        unsigned DstReg,
+                                                        MachineBasicBlock *MBB){
+  if (!CrossClassJoin)
+    return false;
+
+  // First let's make sure all uses are in the same MBB.
+  for (MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(SrcReg),
+         RE = mri_->reg_end(); RI != RE; ++RI) {
+    MachineInstr &MI = *RI;
+    if (MI.getParent() != MBB)
+      return false;
+  }
+  for (MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(DstReg),
+         RE = mri_->reg_end(); RI != RE; ++RI) {
+    MachineInstr &MI = *RI;
+    if (MI.getParent() != MBB)
+      return false;
+  }
+
+  // Then make sure the intervals are *short*.
+  LiveInterval &SrcInt = li_->getInterval(SrcReg);
+  LiveInterval &DstInt = li_->getInterval(DstReg);
+  unsigned SrcSize = SrcInt.getSize() / InstrSlots::NUM;
+  unsigned DstSize = DstInt.getSize() / InstrSlots::NUM;
+  const TargetRegisterClass *RC = mri_->getRegClass(DstReg);
+  unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
+  return (SrcSize + DstSize) <= Threshold;
+}
+
+
 /// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
 /// which are the src/dst of the copy instruction CopyMI.  This returns true
 /// if the copy was successfully coalesced away. If it is not currently
@@ -867,6 +919,9 @@
     return false;  // Not coalescable.
   }
 
+  // Should be non-null only when coalescing to a sub-register class.
+  const TargetRegisterClass *SubRC = NULL;
+  MachineBasicBlock *CopyMBB = CopyMI->getParent();
   unsigned RealDstReg = 0;
   unsigned RealSrcReg = 0;
   if (isExtSubReg || isInsSubReg) {
@@ -941,9 +996,12 @@
       unsigned OldSubIdx = isExtSubReg ? CopyMI->getOperand(0).getSubReg()
         : CopyMI->getOperand(2).getSubReg();
       if (OldSubIdx) {
-        if (OldSubIdx == SubIdx)
+        if (OldSubIdx == SubIdx &&
+            !differingRegisterClasses(SrcReg, DstReg, SubRC))
           // r1024<2> = EXTRACT_SUBREG r1025, 2. Then r1024 has already been
           // coalesced to a larger register so the subreg indices cancel out.
+          // Also check if the other larger register is of the same register
+          // class as the would be resulting register.
           SubIdx = 0;
         else {
           DOUT << "\t Sub-register indices mismatch.\n";
@@ -963,17 +1021,17 @@
         // if this will cause a high use density interval to target a smaller
         // set of registers.
         if (SmallRegSize > Threshold || LargeRegSize > Threshold) {
-          LiveVariables::VarInfo &svi = lv_->getVarInfo(LargeReg);
-          LiveVariables::VarInfo &dvi = lv_->getVarInfo(SmallReg);
-          if ((float)dvi.NumUses / SmallRegSize <
-              (float)svi.NumUses / LargeRegSize) {
+          if ((float)std::distance(mri_->use_begin(SmallReg),
+                                   mri_->use_end()) / SmallRegSize <
+              (float)std::distance(mri_->use_begin(LargeReg),
+                                   mri_->use_end()) / LargeRegSize) {
             Again = true;  // May be possible to coalesce later.
             return false;
           }
         }
       }
     }
-  } else if (differingRegisterClasses(SrcReg, DstReg)) {
+  } else if (differingRegisterClasses(SrcReg, DstReg, SubRC)) {
     // FIXME: What if the resul of a EXTRACT_SUBREG is then coalesced
     // with another? If it's the resulting destination register, then
     // the subidx must be propagated to uses (but only those defined
@@ -981,14 +1039,16 @@
     // register, it should be safe because register is assumed to have
     // the register class of the super-register.
 
-    // If they are not of the same register class, we cannot join them.
-    DOUT << "\tSrc/Dest are different register classes.\n";
-    // Allow the coalescer to try again in case either side gets coalesced to
-    // a physical register that's compatible with the other side. e.g.
-    // r1024 = MOV32to32_ r1025
-    // but later r1024 is assigned EAX then r1025 may be coalesced with EAX.
-    Again = true;  // May be possible to coalesce later.
-    return false;
+    if (!SubRC || !isProfitableToCoalesceToSubRC(SrcReg, DstReg, CopyMBB)) {
+      // If they are not of the same register class, we cannot join them.
+      DOUT << "\tSrc/Dest are different register classes.\n";
+      // Allow the coalescer to try again in case either side gets coalesced to
+      // a physical register that's compatible with the other side. e.g.
+      // r1024 = MOV32to32_ r1025
+      // but later r1024 is assigned EAX then r1025 may be coalesced with EAX.
+      Again = true;  // May be possible to coalesce later.
+      return false;
+    }
   }
   
   LiveInterval &SrcInt = li_->getInterval(SrcReg);
@@ -1022,9 +1082,9 @@
       // do not join them, instead mark the physical register as its allocation
       // preference.
       unsigned Length = JoinVInt.getSize() / InstrSlots::NUM;
-      LiveVariables::VarInfo &vi = lv_->getVarInfo(JoinVReg);
       if (Length > Threshold &&
-          (((float)vi.NumUses / Length) < (1.0 / Threshold))) {
+          (((float)std::distance(mri_->use_begin(JoinVReg),
+                              mri_->use_end()) / Length) < (1.0 / Threshold))) {
         JoinVInt.preference = JoinPReg;
         ++numAborts;
         DOUT << "\tMay tie down a physical register, abort!\n";
@@ -1107,11 +1167,6 @@
     for (const unsigned *AS = tri_->getSubRegisters(DstReg); *AS; ++AS)
       li_->getOrCreateInterval(*AS).MergeInClobberRanges(*ResSrcInt,
                                                  li_->getVNInfoAllocator());
-  } else {
-    // Merge use info if the destination is a virtual register.
-    LiveVariables::VarInfo& dVI = lv_->getVarInfo(DstReg);
-    LiveVariables::VarInfo& sVI = lv_->getVarInfo(SrcReg);
-    dVI.NumUses += sVI.NumUses;
   }
 
   // If this is a EXTRACT_SUBREG, make sure the result of coalescing is the
@@ -1124,6 +1179,13 @@
     }
   }
 
+  // Coalescing to a virtual register that is of a sub-register class of the
+  // other. Make sure the resulting register is set to the right register class.
+  if (SubRC) {
+    mri_->setRegClass(DstReg, SubRC);
+    ++numSubJoins;
+  }
+
   if (NewHeuristic) {
     // Add all copies that define val# in the source interval into the queue.
     for (LiveInterval::const_vni_iterator i = ResSrcInt->vni_begin(),
@@ -1136,7 +1198,7 @@
       if (CopyMI &&
           JoinedCopies.count(CopyMI) == 0 &&
           tii_->isMoveInstr(*CopyMI, NewSrcReg, NewDstReg)) {
-        unsigned LoopDepth = loopInfo->getLoopDepth(CopyMI->getParent());
+        unsigned LoopDepth = loopInfo->getLoopDepth(CopyMBB);
         JoinQueue->push(CopyRec(CopyMI, LoopDepth,
                                 isBackEdgeCopy(CopyMI, DstReg)));
       }
@@ -1321,7 +1383,20 @@
         // Copy from the RHS?
         if (!RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg))
           return false;    // Nope, bail out.
-        
+
+        if (LHSIt->contains(RHSIt->valno->def))
+          // Here is an interesting situation:
+          // BB1:
+          //   vr1025 = copy vr1024
+          //   ..
+          // BB2:
+          //   vr1024 = op 
+          //          = vr1025
+          // Even though vr1025 is copied from vr1024, it's not safe to
+          // coalesced them since live range of vr1025 intersects the
+          // def of vr1024. This happens because vr1025 is assigned the
+          // value of the previous iteration of vr1024.
+          return false;
         EliminatedLHSVals.push_back(LHSIt->valno);
       }
       
@@ -1347,6 +1422,19 @@
           // Otherwise, if this is a copy from the RHS, mark it as being merged
           // in.
           if (RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg)) {
+            if (LHSIt->contains(RHSIt->valno->def))
+              // Here is an interesting situation:
+              // BB1:
+              //   vr1025 = copy vr1024
+              //   ..
+              // BB2:
+              //   vr1024 = op 
+              //          = vr1025
+              // Even though vr1025 is copied from vr1024, it's not safe to
+              // coalesced them since live range of vr1025 intersects the
+              // def of vr1024. This happens because vr1025 is assigned the
+              // value of the previous iteration of vr1024.
+              return false;
             EliminatedLHSVals.push_back(LHSIt->valno);
 
             // We know this entire LHS live range is okay, so skip it now.
@@ -1841,9 +1929,13 @@
 }
 
 /// Return true if the two specified registers belong to different register
-/// classes.  The registers may be either phys or virt regs.
-bool SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA,
-                                                        unsigned RegB) const {
+/// classes.  The registers may be either phys or virt regs. In the
+/// case where both registers are virtual registers, it would also returns
+/// true by reference the RegB register class in SubRC if it is a subset of
+/// RegA's register class.
+bool
+SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA, unsigned RegB,
+                                      const TargetRegisterClass *&SubRC) const {
 
   // Get the register classes for the first reg.
   if (TargetRegisterInfo::isPhysicalRegister(RegA)) {
@@ -1853,11 +1945,15 @@
   }
 
   // Compare against the regclass for the second reg.
-  const TargetRegisterClass *RegClass = mri_->getRegClass(RegA);
-  if (TargetRegisterInfo::isVirtualRegister(RegB))
-    return RegClass != mri_->getRegClass(RegB);
-  else
-    return !RegClass->contains(RegB);
+  const TargetRegisterClass *RegClassA = mri_->getRegClass(RegA);
+  if (TargetRegisterInfo::isVirtualRegister(RegB)) {
+    const TargetRegisterClass *RegClassB = mri_->getRegClass(RegB);
+    if (RegClassA == RegClassB)
+      return false;
+    SubRC = (RegClassA->hasSubClass(RegClassB)) ? RegClassB : NULL;
+    return true;
+  }
+  return !RegClassA->contains(RegB);
 }
 
 /// lastRegisterUse - Returns the last use of the specific register between
@@ -1981,7 +2077,6 @@
   tri_ = tm_->getRegisterInfo();
   tii_ = tm_->getInstrInfo();
   li_ = &getAnalysis<LiveIntervals>();
-  lv_ = &getAnalysis<LiveVariables>();
   loopInfo = &getAnalysis<MachineLoopInfo>();
 
   DOUT << "********** SIMPLE REGISTER COALESCING **********\n"
@@ -2002,27 +2097,6 @@
       I->second.print(DOUT, tri_);
       DOUT << "\n";
     }
-
-    // Delete all coalesced copies.
-    for (SmallPtrSet<MachineInstr*,32>::iterator I = JoinedCopies.begin(),
-           E = JoinedCopies.end(); I != E; ++I) {
-      MachineInstr *CopyMI = *I;
-      unsigned SrcReg, DstReg;
-      if (!tii_->isMoveInstr(*CopyMI, SrcReg, DstReg)) {
-        assert((CopyMI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
-                CopyMI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) &&
-               "Unrecognized copy instruction");
-        DstReg = CopyMI->getOperand(0).getReg();
-      }
-      if (CopyMI->registerDefIsDead(DstReg)) {
-        LiveInterval &li = li_->getInterval(DstReg);
-        if (!ShortenDeadCopySrcLiveRange(li, CopyMI))
-          ShortenDeadCopyLiveRange(li, CopyMI);
-      }
-      li_->RemoveMachineInstrFromMaps(*I);
-      (*I)->eraseFromParent();
-      ++numPeep;
-    }
   }
 
   // Perform a final pass over the instructions and compute spill weights
@@ -2034,15 +2108,35 @@
 
     for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
          mii != mie; ) {
-      // if the move will be an identity move delete it
-      unsigned srcReg, dstReg;
-      bool isMove = tii_->isMoveInstr(*mii, srcReg, dstReg);
-      if (isMove && srcReg == dstReg) {
-        if (li_->hasInterval(srcReg)) {
-          LiveInterval &RegInt = li_->getInterval(srcReg);
+      MachineInstr *MI = mii;
+      unsigned SrcReg, DstReg;
+      if (JoinedCopies.count(MI)) {
+        // Delete all coalesced copies.
+        if (!tii_->isMoveInstr(*MI, SrcReg, DstReg)) {
+          assert((MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
+                  MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) &&
+                 "Unrecognized copy instruction");
+          DstReg = MI->getOperand(0).getReg();
+        }
+        if (MI->registerDefIsDead(DstReg)) {
+          LiveInterval &li = li_->getInterval(DstReg);
+          if (!ShortenDeadCopySrcLiveRange(li, MI))
+            ShortenDeadCopyLiveRange(li, MI);
+        }
+        li_->RemoveMachineInstrFromMaps(MI);
+        mii = mbbi->erase(mii);
+        ++numPeep;
+        continue;
+      }
+
+      // If the move will be an identity move delete it
+      bool isMove = tii_->isMoveInstr(*mii, SrcReg, DstReg);
+      if (isMove && SrcReg == DstReg) {
+        if (li_->hasInterval(SrcReg)) {
+          LiveInterval &RegInt = li_->getInterval(SrcReg);
           // If def of this move instruction is dead, remove its live range
           // from the dstination register's live interval.
-          if (mii->registerDefIsDead(dstReg)) {
+          if (mii->registerDefIsDead(DstReg)) {
             if (!ShortenDeadCopySrcLiveRange(RegInt, mii))
               ShortenDeadCopyLiveRange(RegInt, mii);
           }
@@ -2050,7 +2144,7 @@
         li_->RemoveMachineInstrFromMaps(mii);
         mii = mbbi->erase(mii);
         ++numPeep;
-      } else if (!isMove || !TurnCopyIntoImpDef(mii, mbb, dstReg, srcReg)) {
+      } else if (!isMove || !TurnCopyIntoImpDef(mii, mbb, DstReg, SrcReg)) {
         SmallSet<unsigned, 4> UniqueUses;
         for (unsigned i = 0, e = mii->getNumOperands(); i != e; ++i) {
           const MachineOperand &mop = mii->getOperand(i);

Modified: llvm/branches/non-call-eh/lib/CodeGen/SimpleRegisterCoalescing.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/SimpleRegisterCoalescing.h?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/SimpleRegisterCoalescing.h (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/SimpleRegisterCoalescing.h Sun Jul  6 15:45:41 2008
@@ -83,7 +83,6 @@
     const TargetRegisterInfo* tri_;
     const TargetInstrInfo* tii_;
     LiveIntervals *li_;
-    LiveVariables *lv_;
     const MachineLoopInfo* loopInfo;
     
     BitVector allocatableRegs_;
@@ -166,9 +165,13 @@
     /// joins them and returns true.
     bool SimpleJoin(LiveInterval &LHS, LiveInterval &RHS);
     
-    /// Return true if the two specified registers belong to different
-    /// register classes.  The registers may be either phys or virt regs.
-    bool differingRegisterClasses(unsigned RegA, unsigned RegB) const;
+    /// Return true if the two specified registers belong to different register
+    /// classes.  The registers may be either phys or virt regs. In the
+    /// case where both registers are virtual registers, it would also returns
+    /// true by reference the RegB register class in SubRC if it is a subset of
+    /// RegA's register class.
+    bool differingRegisterClasses(unsigned RegA, unsigned RegB,
+                                  const TargetRegisterClass *&SubRC) const;
 
 
     /// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy. If
@@ -206,6 +209,12 @@
     /// identity copies so they will be removed.
     void RemoveCopiesFromValNo(LiveInterval &li, VNInfo *VNI);
 
+    /// isProfitableToCoalesceToSubRC - Given that register class of DstReg is
+    /// a subset of the register class of SrcReg, return true if it's profitable
+    /// to coalesce the two registers.
+    bool isProfitableToCoalesceToSubRC(unsigned SrcReg, unsigned DstReg,
+                                       MachineBasicBlock *MBB);
+
     /// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
     /// the specified live interval is defined by a copy from the specified
     /// register.

Added: llvm/branches/non-call-eh/lib/CodeGen/StackSlotColoring.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/StackSlotColoring.cpp?rev=53163&view=auto

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/StackSlotColoring.cpp (added)
+++ llvm/branches/non-call-eh/lib/CodeGen/StackSlotColoring.cpp Sun Jul  6 15:45:41 2008
@@ -0,0 +1,265 @@
+//===-- StackSlotColoring.cpp - Stack slot coloring pass. -----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the stack slot coloring pass.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "stackcoloring"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include <vector>
+using namespace llvm;
+
+static cl::opt<bool>
+DisableSharing("no-stack-slot-sharing",
+             cl::init(false), cl::Hidden,
+             cl::desc("Surpress slot sharing during stack coloring"));
+
+STATISTIC(NumEliminated,   "Number of stack slots eliminated due to coloring");
+
+namespace {
+  class VISIBILITY_HIDDEN StackSlotColoring : public MachineFunctionPass {
+    LiveStacks* LS;
+    MachineFrameInfo *MFI;
+
+    // SSIntervals - Spill slot intervals.
+    std::vector<LiveInterval*> SSIntervals;
+
+    // OrigAlignments - Alignments of stack objects before coloring.
+    SmallVector<unsigned, 16> OrigAlignments;
+
+    // OrigSizes - Sizess of stack objects before coloring.
+    SmallVector<unsigned, 16> OrigSizes;
+
+    // AllColors - If index is set, it's a spill slot, i.e. color.
+    // FIXME: This assumes PEI locate spill slot with smaller indices
+    // closest to stack pointer / frame pointer. Therefore, smaller
+    // index == better color.
+    BitVector AllColors;
+
+    // NextColor - Next "color" that's not yet used.
+    int NextColor;
+
+    // UsedColors - "Colors" that have been assigned.
+    BitVector UsedColors;
+
+    // Assignments - Color to intervals mapping.
+    SmallVector<SmallVector<LiveInterval*,4>,16> Assignments;
+
+  public:
+    static char ID; // Pass identification
+    StackSlotColoring() : MachineFunctionPass((intptr_t)&ID), NextColor(-1) {}
+    
+    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+      AU.addRequired<LiveStacks>();
+      MachineFunctionPass::getAnalysisUsage(AU);
+    }
+
+    virtual bool runOnMachineFunction(MachineFunction &MF);
+    virtual const char* getPassName() const {
+      return "Stack Slot Coloring";
+    }
+
+  private:
+    bool InitializeSlots();
+    bool OverlapWithAssignments(LiveInterval *li, int Color) const;
+    int ColorSlot(LiveInterval *li);
+    bool ColorSlots(MachineFunction &MF);
+  };
+} // end anonymous namespace
+
+char StackSlotColoring::ID = 0;
+
+static RegisterPass<StackSlotColoring>
+X("stack-slot-coloring", "Stack Slot Coloring");
+
+FunctionPass *llvm::createStackSlotColoringPass() {
+  return new StackSlotColoring();
+}
+
+namespace {
+  // IntervalSorter - Comparison predicate that sort live intervals by
+  // their weight.
+  struct IntervalSorter {
+    bool operator()(LiveInterval* LHS, LiveInterval* RHS) const {
+      return LHS->weight > RHS->weight;
+    }
+  };
+}
+
+/// InitializeSlots - Process all spill stack slot liveintervals and add them
+/// to a sorted (by weight) list.
+bool StackSlotColoring::InitializeSlots() {
+  if (LS->getNumIntervals() < 2)
+    return false;
+
+  int LastFI = MFI->getObjectIndexEnd();
+  OrigAlignments.resize(LastFI);
+  OrigSizes.resize(LastFI);
+  AllColors.resize(LastFI);
+  UsedColors.resize(LastFI);
+  Assignments.resize(LastFI);
+
+  // Gather all spill slots into a list.
+  for (LiveStacks::iterator i = LS->begin(), e = LS->end(); i != e; ++i) {
+    LiveInterval &li = i->second;
+    int FI = li.getStackSlotIndex();
+    if (MFI->isDeadObjectIndex(FI))
+      continue;
+    SSIntervals.push_back(&li);
+    OrigAlignments[FI] = MFI->getObjectAlignment(FI);
+    OrigSizes[FI]      = MFI->getObjectSize(FI);
+    AllColors.set(FI);
+  }
+
+  // Sort them by weight.
+  std::stable_sort(SSIntervals.begin(), SSIntervals.end(), IntervalSorter());
+
+  // Get first "color".
+  NextColor = AllColors.find_first();
+  return true;
+}
+
+/// OverlapWithAssignments - Return true if LiveInterval overlaps with any
+/// LiveIntervals that have already been assigned to the specified color.
+bool
+StackSlotColoring::OverlapWithAssignments(LiveInterval *li, int Color) const {
+  const SmallVector<LiveInterval*,4> &OtherLIs = Assignments[Color];
+  for (unsigned i = 0, e = OtherLIs.size(); i != e; ++i) {
+    LiveInterval *OtherLI = OtherLIs[i];
+    if (OtherLI->overlaps(*li))
+      return true;
+  }
+  return false;
+}
+
+/// ColorSlot - Assign a "color" (stack slot) to the specified stack slot.
+///
+int StackSlotColoring::ColorSlot(LiveInterval *li) {
+  int Color = -1;
+  bool Share = false;
+  if (!DisableSharing) {
+    // Check if it's possible to reuse any of the used colors.
+    Color = UsedColors.find_first();
+    while (Color != -1) {
+      if (!OverlapWithAssignments(li, Color)) {
+        Share = true;
+        ++NumEliminated;
+        break;
+      }
+      Color = UsedColors.find_next(Color);
+    }
+  }
+
+  // Assign it to the first available color (assumed to be the best) if it's
+  // not possible to share a used color with other objects.
+  if (!Share) {
+    assert(NextColor != -1 && "No more spill slots?");
+    Color = NextColor;
+    UsedColors.set(Color);
+    NextColor = AllColors.find_next(NextColor);
+  }
+
+  // Record the assignment.
+  Assignments[Color].push_back(li);
+  int FI = li->getStackSlotIndex();
+  DOUT << "Assigning fi #" << FI << " to fi #" << Color << "\n";
+
+  // Change size and alignment of the allocated slot. If there are multiple
+  // objects sharing the same slot, then make sure the size and alignment
+  // are large enough for all.
+  unsigned Align = OrigAlignments[FI];
+  if (!Share || Align > MFI->getObjectAlignment(Color))
+    MFI->setObjectAlignment(Color, Align);
+  int64_t Size = OrigSizes[FI];
+  if (!Share || Size > MFI->getObjectSize(Color))
+    MFI->setObjectSize(Color, Size);
+  return Color;
+}
+
+/// Colorslots - Color all spill stack slots and rewrite all frameindex machine
+/// operands in the function.
+bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
+  unsigned NumObjs = MFI->getObjectIndexEnd();
+  std::vector<int> SlotMapping(NumObjs, -1);
+
+  bool Changed = false;
+  for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i) {
+    LiveInterval *li = SSIntervals[i];
+    int SS = li->getStackSlotIndex();
+    int NewSS = ColorSlot(li);
+    SlotMapping[SS] = NewSS;
+    Changed |= (SS != NewSS);
+  }
+
+  if (!Changed)
+    return false;
+
+  // Rewrite all MO_FrameIndex operands.
+  // FIXME: Need the equivalent of MachineRegisterInfo for frameindex operands.
+  for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
+       MBB != E; ++MBB) {
+    for (MachineBasicBlock::iterator MII = MBB->begin(), EE = MBB->end();
+         MII != EE; ++MII) {
+      MachineInstr &MI = *MII;
+      for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+        MachineOperand &MO = MI.getOperand(i);
+        if (!MO.isFrameIndex())
+          continue;
+        int FI = MO.getIndex();
+        if (FI < 0)
+          continue;
+        FI = SlotMapping[FI];
+        if (FI == -1)
+          continue;
+        MO.setIndex(FI);
+      }
+    }
+  }
+
+  // Delete unused stack slots.
+  while (NextColor != -1) {
+    DOUT << "Removing unused stack object fi #" << NextColor << "\n";
+    MFI->RemoveStackObject(NextColor);
+    NextColor = AllColors.find_next(NextColor);
+  }
+
+  return true;
+}
+
+bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
+  DOUT << "******** Stack Slot Coloring ********\n";
+
+  MFI = MF.getFrameInfo();
+  LS = &getAnalysis<LiveStacks>();
+
+  bool Changed = false;
+  if (InitializeSlots())
+    Changed = ColorSlots(MF);
+
+  NextColor = -1;
+  SSIntervals.clear();
+  OrigAlignments.clear();
+  OrigSizes.clear();
+  AllColors.clear();
+  UsedColors.clear();
+  for (unsigned i = 0, e = Assignments.size(); i != e; ++i)
+    Assignments[i].clear();
+  Assignments.clear();
+
+  return Changed;
+}

Modified: llvm/branches/non-call-eh/lib/CodeGen/StrongPHIElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/StrongPHIElimination.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/StrongPHIElimination.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/StrongPHIElimination.cpp Sun Jul  6 15:45:41 2008
@@ -27,6 +27,7 @@
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineLoopInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterCoalescer.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/ADT/DepthFirstIterator.h"
@@ -34,7 +35,6 @@
 #include "llvm/Support/Compiler.h"
 using namespace llvm;
 
-
 namespace {
   struct VISIBILITY_HIDDEN StrongPHIElimination : public MachineFunctionPass {
     static char ID; // Pass identification, replacement for typeid
@@ -52,8 +52,9 @@
     // used as operands to another another PHI node
     std::set<unsigned> UsedByAnother;
     
-    // RenameSets are the sets of operands (and their VNInfo IDs) to a PHI
-    // (the defining instruction of the key) that can be renamed without copies.
+    // RenameSets are the is a map from a PHI-defined register
+    // to the input registers to be coalesced along with the index
+    // of the input registers.
     std::map<unsigned, std::map<unsigned, unsigned> > RenameSets;
     
     // PhiValueNumber holds the ID numbers of the VNs for each phi that we're
@@ -74,6 +75,7 @@
       
       // TODO: Actually make this true.
       AU.addPreserved<LiveIntervals>();
+      AU.addPreserved<RegisterCoalescer>();
       MachineFunctionPass::getAnalysisUsage(AU);
     }
     
@@ -140,13 +142,14 @@
                       SmallPtrSet<MachineBasicBlock*, 16>& v);
     void mergeLiveIntervals(unsigned primary, unsigned secondary, unsigned VN);
   };
-
-  char StrongPHIElimination::ID = 0;
-  RegisterPass<StrongPHIElimination> X("strong-phi-node-elimination",
-                  "Eliminate PHI nodes for register allocation, intelligently");
 }
 
-const PassInfo *llvm::StrongPHIEliminationID = X.getPassInfo();
+char StrongPHIElimination::ID = 0;
+static RegisterPass<StrongPHIElimination>
+X("strong-phi-node-elimination",
+  "Eliminate PHI nodes for register allocation, intelligently");
+
+const PassInfo *const llvm::StrongPHIEliminationID = &X;
 
 /// computeDFS - Computes the DFS-in and DFS-out numbers of the dominator tree
 /// of the given MachineFunction.  These numbers are then used in other parts
@@ -192,6 +195,8 @@
   }
 }
 
+namespace {
+
 /// PreorderSorter - a helper class that is used to sort registers
 /// according to the preorder number of their defining blocks
 class PreorderSorter {
@@ -219,6 +224,8 @@
   }
 };
 
+}
+
 /// computeDomForest - compute the subforest of the DomTree corresponding
 /// to the defining blocks of the registers in question
 std::vector<StrongPHIElimination::DomForestNode*>
@@ -460,13 +467,11 @@
         UsedByAnother.insert(SrcReg);
       } else {
         // Otherwise, add it to the renaming set
-        LiveInterval& I = LI.getOrCreateInterval(SrcReg);
-        unsigned idx = LI.getMBBEndIdx(P->getOperand(i).getMBB());
-        VNInfo* VN = I.getLiveRangeContaining(idx)->valno;
-        
-        assert(VN && "No VNInfo for register?");
+        // We need to subtract one from the index because live ranges are open
+        // at the end.
+        unsigned idx = LI.getMBBEndIdx(P->getOperand(i).getMBB()) - 1;
         
-        PHIUnion.insert(std::make_pair(SrcReg, VN->id));
+        PHIUnion.insert(std::make_pair(SrcReg, idx));
         UnionedBlocks.insert(MRI.getVRegDef(SrcReg)->getParent());
       }
     }
@@ -626,7 +631,7 @@
 /// of Static Single Assignment Form" by Briggs, et al.
 void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
                                           std::set<unsigned>& pushed) {
-  // FIXME: This function needs to update LiveVariables
+  // FIXME: This function needs to update LiveIntervals
   std::map<unsigned, unsigned>& copy_set= Waiting[MBB];
   
   std::map<unsigned, unsigned> worklist;
@@ -655,6 +660,8 @@
   MachineRegisterInfo& MRI = MF->getRegInfo();
   const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
   
+  SmallVector<std::pair<unsigned, MachineInstr*>, 4> InsertedPHIDests;
+  
   // Iterate over the worklist, inserting copies
   while (!worklist.empty() || !copy_set.empty()) {
     while (!worklist.empty()) {
@@ -685,6 +692,11 @@
                         map[curr.first], RC, RC);
       map[curr.first] = curr.second;
       
+      // Push this copy onto InsertedPHICopies so we can
+      // update LiveIntervals with it.
+      MachineBasicBlock::iterator MI = MBB->getFirstTerminator();
+      InsertedPHIDests.push_back(std::make_pair(curr.second, --MI));
+      
       // If curr.first is a destination in copy_set...
       for (std::map<unsigned, unsigned>::iterator I = copy_set.begin(),
            E = copy_set.end(); I != E; )
@@ -717,6 +729,21 @@
       worklist.insert(curr);
     }
   }
+  
+  // Renumber the instructions so that we can perform the index computations
+  // needed to create new live intervals.
+  LI.computeNumbering();
+  
+  // For copies that we inserted at the ends of predecessors, we construct
+  // live intervals.  This is pretty easy, since we know that the destination
+  // register cannot have be in live at that point previously.  We just have
+  // to make sure that, for registers that serve as inputs to more than one
+  // PHI, we don't create multiple overlapping live intervals.
+  std::set<unsigned> RegHandled;
+  for (SmallVector<std::pair<unsigned, MachineInstr*>, 4>::iterator I =
+       InsertedPHIDests.begin(), E = InsertedPHIDests.end(); I != E; ++I)
+    if (!RegHandled.count(I->first))
+      LI.addLiveRangeToEndOfBlock(I->first, I->second);
 }
 
 /// InsertCopies - insert copies into MBB and all of its successors
@@ -751,106 +778,23 @@
     Stacks[*I].pop_back();
 }
 
-/// ComputeUltimateVN - Assuming we are going to join two live intervals,
-/// compute what the resultant value numbers for each value in the input two
-/// ranges will be.  This is complicated by copies between the two which can
-/// and will commonly cause multiple value numbers to be merged into one.
-///
-/// VN is the value number that we're trying to resolve.  InstDefiningValue
-/// keeps track of the new InstDefiningValue assignment for the result
-/// LiveInterval.  ThisFromOther/OtherFromThis are sets that keep track of
-/// whether a value in this or other is a copy from the opposite set.
-/// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
-/// already been assigned.
-///
-/// ThisFromOther[x] - If x is defined as a copy from the other interval, this
-/// contains the value number the copy is from.
-///
-static unsigned ComputeUltimateVN(VNInfo *VNI,
-                                  SmallVector<VNInfo*, 16> &NewVNInfo,
-                                  DenseMap<VNInfo*, VNInfo*> &ThisFromOther,
-                                  DenseMap<VNInfo*, VNInfo*> &OtherFromThis,
-                                  SmallVector<int, 16> &ThisValNoAssignments,
-                                  SmallVector<int, 16> &OtherValNoAssignments) {
-  unsigned VN = VNI->id;
-
-  // If the VN has already been computed, just return it.
-  if (ThisValNoAssignments[VN] >= 0)
-    return ThisValNoAssignments[VN];
-//  assert(ThisValNoAssignments[VN] != -2 && "Cyclic case?");
-
-  // If this val is not a copy from the other val, then it must be a new value
-  // number in the destination.
-  DenseMap<VNInfo*, VNInfo*>::iterator I = ThisFromOther.find(VNI);
-  if (I == ThisFromOther.end()) {
-    NewVNInfo.push_back(VNI);
-    return ThisValNoAssignments[VN] = NewVNInfo.size()-1;
-  }
-  VNInfo *OtherValNo = I->second;
-
-  // Otherwise, this *is* a copy from the RHS.  If the other side has already
-  // been computed, return it.
-  if (OtherValNoAssignments[OtherValNo->id] >= 0)
-    return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id];
-  
-  // Mark this value number as currently being computed, then ask what the
-  // ultimate value # of the other value is.
-  ThisValNoAssignments[VN] = -2;
-  unsigned UltimateVN =
-    ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther,
-                      OtherValNoAssignments, ThisValNoAssignments);
-  return ThisValNoAssignments[VN] = UltimateVN;
-}
-
 void StrongPHIElimination::mergeLiveIntervals(unsigned primary,
                                               unsigned secondary,
-                                              unsigned secondaryVN) {
+                                              unsigned secondaryIdx) {
   
   LiveIntervals& LI = getAnalysis<LiveIntervals>();
   LiveInterval& LHS = LI.getOrCreateInterval(primary);
   LiveInterval& RHS = LI.getOrCreateInterval(secondary);
   
-  // Compute the final value assignment, assuming that the live ranges can be
-  // coalesced.
-  SmallVector<int, 16> LHSValNoAssignments;
-  SmallVector<int, 16> RHSValNoAssignments;
-  SmallVector<VNInfo*, 16> NewVNInfo;
-  
-  LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
-  RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
-  NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
-  
-  for (LiveInterval::vni_iterator I = LHS.vni_begin(), E = LHS.vni_end();
-       I != E; ++I) {
-    VNInfo *VNI = *I;
-    unsigned VN = VNI->id;
-    if (LHSValNoAssignments[VN] >= 0 || VNI->def == ~1U) 
-      continue;
-    
-    NewVNInfo.push_back(VNI);
-    LHSValNoAssignments[VN] = NewVNInfo.size()-1;
-  }
-  
-  for (LiveInterval::vni_iterator I = RHS.vni_begin(), E = RHS.vni_end();
-       I != E; ++I) {
-    VNInfo *VNI = *I;
-    unsigned VN = VNI->id;
-    if (RHSValNoAssignments[VN] >= 0 || VNI->def == ~1U)
-      continue;
-      
-    NewVNInfo.push_back(VNI);
-    RHSValNoAssignments[VN] = NewVNInfo.size()-1;
-  }
-
-  // If we get here, we know that we can coalesce the live ranges.  Ask the
-  // intervals to coalesce themselves now.
-
-  LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo);
-  LI.removeInterval(secondary);
+  LI.computeNumbering();
   
-  // The valno that was previously the input to the PHI node
-  // now has a PHIKill.
-  LHS.getValNumInfo(RHSValNoAssignments[secondaryVN])->hasPHIKill = true;
+  const LiveRange* RangeMergingIn = RHS.getLiveRangeContaining(secondaryIdx);
+  VNInfo* NewVN = LHS.getNextValue(secondaryIdx, RangeMergingIn->valno->copy,
+                  LI.getVNInfoAllocator());
+  NewVN->hasPHIKill = true;
+  LiveRange NewRange(RangeMergingIn->start, RangeMergingIn->end, NewVN);
+  LHS.addRange(NewRange);
+  RHS.removeRange(RangeMergingIn->start, RangeMergingIn->end, true);
 }
 
 bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
@@ -866,7 +810,7 @@
       processBlock(I);
   
   // Insert copies
-  // FIXME: This process should probably preserve LiveVariables
+  // FIXME: This process should probably preserve LiveIntervals
   SmallPtrSet<MachineBasicBlock*, 16> visited;
   InsertCopies(Fn.begin(), visited);
   
@@ -916,5 +860,7 @@
     PInstr->eraseFromParent();
   }
   
+  LI.computeNumbering();
+  
   return true;
 }

Modified: llvm/branches/non-call-eh/lib/CodeGen/TargetInstrInfoImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/TargetInstrInfoImpl.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/TargetInstrInfoImpl.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/TargetInstrInfoImpl.cpp Sun Jul  6 15:45:41 2008
@@ -14,24 +14,39 @@
 
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
 using namespace llvm;
 
 // commuteInstruction - The default implementation of this method just exchanges
 // operand 1 and 2.
-MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI) const {
+MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
+                                                      bool NewMI) const {
   assert(MI->getOperand(1).isRegister() && MI->getOperand(2).isRegister() &&
          "This only knows how to commute register operands so far");
   unsigned Reg1 = MI->getOperand(1).getReg();
   unsigned Reg2 = MI->getOperand(2).getReg();
   bool Reg1IsKill = MI->getOperand(1).isKill();
   bool Reg2IsKill = MI->getOperand(2).isKill();
+  bool ChangeReg0 = false;
   if (MI->getOperand(0).getReg() == Reg1) {
     // Must be two address instruction!
     assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) &&
            "Expecting a two-address instruction!");
     Reg2IsKill = false;
-    MI->getOperand(0).setReg(Reg2);
+    ChangeReg0 = true;
+  }
+
+  if (NewMI) {
+    // Create a new instruction.
+    unsigned Reg0 = ChangeReg0 ? Reg2 : MI->getOperand(0).getReg();
+    bool Reg0IsDead = MI->getOperand(0).isDead();
+    return BuildMI(MI->getDesc()).addReg(Reg0, true, false, false, Reg0IsDead)
+      .addReg(Reg2, false, false, Reg2IsKill)
+      .addReg(Reg1, false, false, Reg1IsKill);
   }
+
+  if (ChangeReg0)
+    MI->getOperand(0).setReg(Reg2);
   MI->getOperand(2).setReg(Reg1);
   MI->getOperand(1).setReg(Reg2);
   MI->getOperand(2).setIsKill(Reg1IsKill);

Modified: llvm/branches/non-call-eh/lib/CodeGen/TwoAddressInstructionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/TwoAddressInstructionPass.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/TwoAddressInstructionPass.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/TwoAddressInstructionPass.cpp Sun Jul  6 15:45:41 2008
@@ -39,6 +39,9 @@
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/STLExtras.h"
 using namespace llvm;
@@ -47,50 +50,52 @@
 STATISTIC(NumCommuted        , "Number of instructions commuted to coalesce");
 STATISTIC(NumConvertedTo3Addr, "Number of instructions promoted to 3-address");
 STATISTIC(Num3AddrSunk,        "Number of 3-address instructions sunk");
+STATISTIC(NumReMats,           "Number of instructions re-materialized");
 
 namespace {
-  struct VISIBILITY_HIDDEN TwoAddressInstructionPass
-   : public MachineFunctionPass {
+  class VISIBILITY_HIDDEN TwoAddressInstructionPass
+    : public MachineFunctionPass {
     const TargetInstrInfo *TII;
     const TargetRegisterInfo *TRI;
     MachineRegisterInfo *MRI;
     LiveVariables *LV;
 
+    bool Sink3AddrInstruction(MachineBasicBlock *MBB, MachineInstr *MI,
+                              unsigned Reg,
+                              MachineBasicBlock::iterator OldPos);
+
+    bool isSafeToReMat(unsigned DstReg, MachineInstr *MI);
+    bool isProfitableToReMat(unsigned Reg, const TargetRegisterClass *RC,
+                             MachineInstr *MI, MachineInstr *DefMI,
+                             MachineBasicBlock *MBB, unsigned Loc,
+                             DenseMap<MachineInstr*, unsigned> &DistanceMap);
   public:
     static char ID; // Pass identification, replacement for typeid
     TwoAddressInstructionPass() : MachineFunctionPass((intptr_t)&ID) {}
 
-    virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+      AU.addPreserved<LiveVariables>();
+      AU.addPreservedID(MachineLoopInfoID);
+      AU.addPreservedID(MachineDominatorsID);
+      AU.addPreservedID(PHIEliminationID);
+      MachineFunctionPass::getAnalysisUsage(AU);
+    }
 
-    /// runOnMachineFunction - pass entry point
+    /// runOnMachineFunction - Pass entry point.
     bool runOnMachineFunction(MachineFunction&);
-
-  private:
-    bool Sink3AddrInstruction(MachineBasicBlock *MBB, MachineInstr *MI,
-                              unsigned Reg,
-                              MachineBasicBlock::iterator OldPos);
   };
-
-  char TwoAddressInstructionPass::ID = 0;
-  RegisterPass<TwoAddressInstructionPass>
-  X("twoaddressinstruction", "Two-Address instruction pass");
 }
 
-const PassInfo *llvm::TwoAddressInstructionPassID = X.getPassInfo();
+char TwoAddressInstructionPass::ID = 0;
+static RegisterPass<TwoAddressInstructionPass>
+X("twoaddressinstruction", "Two-Address instruction pass");
 
-void TwoAddressInstructionPass::getAnalysisUsage(AnalysisUsage &AU) const {
-  AU.addRequired<LiveVariables>();
-  AU.addPreserved<LiveVariables>();
-  AU.addPreservedID(MachineLoopInfoID);
-  AU.addPreservedID(MachineDominatorsID);
-  AU.addPreservedID(PHIEliminationID);
-  MachineFunctionPass::getAnalysisUsage(AU);
-}
+const PassInfo *const llvm::TwoAddressInstructionPassID = &X;
 
 /// Sink3AddrInstruction - A two-address instruction has been converted to a
 /// three-address instruction to avoid clobbering a register. Try to sink it
-/// past the instruction that would kill the above mentioned register to
-/// reduce register pressure.
+/// past the instruction that would kill the above mentioned register to reduce
+/// register pressure.
 bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
                                            MachineInstr *MI, unsigned SavedReg,
                                            MachineBasicBlock::iterator OldPos) {
@@ -101,6 +106,7 @@
 
   unsigned DefReg = 0;
   SmallSet<unsigned, 4> UseRegs;
+
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
     const MachineOperand &MO = MI->getOperand(i);
     if (!MO.isRegister())
@@ -131,19 +137,26 @@
     KillMI = UseMO.getParent();
     break;
   }
+
   if (!KillMI || KillMI->getParent() != MBB)
     return false;
 
-  // If any of the definitions are used  by another instruction between
-  // the position and the kill use, then it's not safe to sink it.
-  // FIXME: This can be sped up if there is an easy way to query whether
-  // an instruction if before or after another instruction. Then we can
-  // use MachineRegisterInfo def / use instead.
+  // If any of the definitions are used by another instruction between the
+  // position and the kill use, then it's not safe to sink it.
+  // 
+  // FIXME: This can be sped up if there is an easy way to query whether an
+  // instruction is before or after another instruction. Then we can use
+  // MachineRegisterInfo def / use instead.
   MachineOperand *KillMO = NULL;
   MachineBasicBlock::iterator KillPos = KillMI;
   ++KillPos;
+
+  unsigned NumVisited = 0;
   for (MachineBasicBlock::iterator I = next(OldPos); I != KillPos; ++I) {
     MachineInstr *OtherMI = I;
+    if (NumVisited > 30)  // FIXME: Arbitrary limit to reduce compile time cost.
+      return false;
+    ++NumVisited;
     for (unsigned i = 0, e = OtherMI->getNumOperands(); i != e; ++i) {
       MachineOperand &MO = OtherMI->getOperand(i);
       if (!MO.isRegister())
@@ -153,10 +166,11 @@
         continue;
       if (DefReg == MOReg)
         return false;
+
       if (MO.isKill()) {
         if (OtherMI == KillMI && MOReg == SavedReg)
-          // Save the operand that kills the register. We want unset the kill
-          // marker is we can sink MI past it.
+          // Save the operand that kills the register. We want to unset the kill
+          // marker if we can sink MI past it.
           KillMO = &MO;
         else if (UseRegs.count(MOReg))
           // One of the uses is killed before the destination.
@@ -169,9 +183,9 @@
   KillMO->setIsKill(false);
   KillMO = MI->findRegisterUseOperand(SavedReg, false, TRI);
   KillMO->setIsKill(true);
-  LiveVariables::VarInfo& VarInfo = LV->getVarInfo(SavedReg);
-  VarInfo.removeKill(KillMI);
-  VarInfo.Kills.push_back(MI);
+  
+  if (LV)
+    LV->replaceKillInstruction(SavedReg, KillMI, MI);
 
   // Move instruction to its destination.
   MBB->remove(MI);
@@ -181,8 +195,86 @@
   return true;
 }
 
-/// runOnMachineFunction - Reduce two-address instructions to two
-/// operands.
+/// isSafeToReMat - Return true if it's safe to rematerialize the specified
+/// instruction which defined the specified register instead of copying it.
+bool
+TwoAddressInstructionPass::isSafeToReMat(unsigned DstReg, MachineInstr *MI) {
+  const TargetInstrDesc &TID = MI->getDesc();
+  if (!TID.isAsCheapAsAMove())
+    return false;
+  bool SawStore = false;
+  if (!MI->isSafeToMove(TII, SawStore))
+    return false;
+  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isRegister())
+      continue;
+    // FIXME: For now, do not remat any instruction with register operands.
+    // Later on, we can loosen the restriction is the register operands have
+    // not been modified between the def and use. Note, this is different from
+    // MachineSink because the code in no longer in two-address form (at least
+    // partially).
+    if (MO.isUse())
+      return false;
+    else if (!MO.isDead() && MO.getReg() != DstReg)
+      return false;
+  }
+  return true;
+}
+
+/// isTwoAddrUse - Return true if the specified MI is using the specified
+/// register as a two-address operand.
+static bool isTwoAddrUse(MachineInstr *UseMI, unsigned Reg) {
+  const TargetInstrDesc &TID = UseMI->getDesc();
+  for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = UseMI->getOperand(i);
+    if (MO.isRegister() && MO.getReg() == Reg &&
+        (MO.isDef() || TID.getOperandConstraint(i, TOI::TIED_TO) != -1))
+      // Earlier use is a two-address one.
+      return true;
+  }
+  return false;
+}
+
+/// isProfitableToReMat - Return true if the heuristics determines it is likely
+/// to be profitable to re-materialize the definition of Reg rather than copy
+/// the register.
+bool
+TwoAddressInstructionPass::isProfitableToReMat(unsigned Reg,
+                                const TargetRegisterClass *RC,
+                                MachineInstr *MI, MachineInstr *DefMI,
+                                MachineBasicBlock *MBB, unsigned Loc,
+                                DenseMap<MachineInstr*, unsigned> &DistanceMap){
+  bool OtherUse = false;
+  for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
+         UE = MRI->use_end(); UI != UE; ++UI) {
+    MachineOperand &UseMO = UI.getOperand();
+    if (!UseMO.isUse())
+      continue;
+    MachineInstr *UseMI = UseMO.getParent();
+    MachineBasicBlock *UseMBB = UseMI->getParent();
+    if (UseMBB == MBB) {
+      DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UseMI);
+      if (DI != DistanceMap.end() && DI->second == Loc)
+        continue;  // Current use.
+      OtherUse = true;
+      // There is at least one other use in the MBB that will clobber the
+      // register. 
+      if (isTwoAddrUse(UseMI, Reg))
+        return true;
+    }
+  }
+
+  // If other uses in MBB are not two-address uses, then don't remat.
+  if (OtherUse)
+    return false;
+
+  // No other uses in the same block, remat if it's defined in the same
+  // block so it does not unnecessarily extend the live range.
+  return MBB == DefMI->getParent();
+}
+
+/// runOnMachineFunction - Reduce two-address instructions to two operands.
 ///
 bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
   DOUT << "Machine Function\n";
@@ -190,21 +282,32 @@
   MRI = &MF.getRegInfo();
   TII = TM.getInstrInfo();
   TRI = TM.getRegisterInfo();
-  LV = &getAnalysis<LiveVariables>();
+  LV = getAnalysisToUpdate<LiveVariables>();
 
   bool MadeChange = false;
 
   DOUT << "********** REWRITING TWO-ADDR INSTRS **********\n";
   DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
 
+  // ReMatRegs - Keep track of the registers whose def's are remat'ed.
+  BitVector ReMatRegs;
+  ReMatRegs.resize(MRI->getLastVirtReg()+1);
+
+  // DistanceMap - Keep track the distance of a MI from the start of the
+  // current basic block.
+  DenseMap<MachineInstr*, unsigned> DistanceMap;
+
   for (MachineFunction::iterator mbbi = MF.begin(), mbbe = MF.end();
        mbbi != mbbe; ++mbbi) {
+    unsigned Dist = 0;
+    DistanceMap.clear();
     for (MachineBasicBlock::iterator mi = mbbi->begin(), me = mbbi->end();
          mi != me; ) {
       MachineBasicBlock::iterator nmi = next(mi);
       const TargetInstrDesc &TID = mi->getDesc();
-
       bool FirstTied = true;
+
+      DistanceMap.insert(std::make_pair(mi, ++Dist));
       for (unsigned si = 1, e = TID.getNumOperands(); si < e; ++si) {
         int ti = TID.getOperandConstraint(si, TOI::TIED_TO);
         if (ti == -1)
@@ -214,15 +317,16 @@
           ++NumTwoAddressInstrs;
           DOUT << '\t'; DEBUG(mi->print(*cerr.stream(), &TM));
         }
+
         FirstTied = false;
 
         assert(mi->getOperand(si).isRegister() && mi->getOperand(si).getReg() &&
                mi->getOperand(si).isUse() && "two address instruction invalid");
 
-        // if the two operands are the same we just remove the use
+        // If the two operands are the same we just remove the use
         // and mark the def as def&use, otherwise we have to insert a copy.
         if (mi->getOperand(ti).getReg() != mi->getOperand(si).getReg()) {
-          // rewrite:
+          // Rewrite:
           //     a = b op c
           // to:
           //     a = b
@@ -257,19 +361,25 @@
               assert(mi->getOperand(3-si).isRegister() &&
                      "Not a proper commutative instruction!");
               unsigned regC = mi->getOperand(3-si).getReg();
+
               if (mi->killsRegister(regC)) {
                 DOUT << "2addr: COMMUTING  : " << *mi;
                 MachineInstr *NewMI = TII->commuteInstruction(mi);
+
                 if (NewMI == 0) {
                   DOUT << "2addr: COMMUTING FAILED!\n";
                 } else {
                   DOUT << "2addr: COMMUTED TO: " << *NewMI;
                   // If the instruction changed to commute it, update livevar.
                   if (NewMI != mi) {
-                    LV->instructionChanged(mi, NewMI); // Update live variables
+                    if (LV)
+                      // Update live variables
+                      LV->replaceKillInstruction(regC, mi, NewMI);
+                    
                     mbbi->insert(mi, NewMI);           // Insert the new inst
                     mbbi->erase(mi);                   // Nuke the old inst.
                     mi = NewMI;
+                    DistanceMap.insert(std::make_pair(NewMI, Dist));
                   }
 
                   ++NumCommuted;
@@ -285,49 +395,70 @@
               // FIXME: This assumes there are no more operands which are tied
               // to another register.
 #ifndef NDEBUG
-              for (unsigned i = si+1, e = TID.getNumOperands(); i < e; ++i)
+              for (unsigned i = si + 1, e = TID.getNumOperands(); i < e; ++i)
                 assert(TID.getOperandConstraint(i, TOI::TIED_TO) == -1);
 #endif
 
-              if (MachineInstr *New=TII->convertToThreeAddress(mbbi, mi, *LV)) {
+              MachineInstr *NewMI = TII->convertToThreeAddress(mbbi, mi, LV);
+              if (NewMI) {
                 DOUT << "2addr: CONVERTING 2-ADDR: " << *mi;
-                DOUT << "2addr:         TO 3-ADDR: " << *New;
+                DOUT << "2addr:         TO 3-ADDR: " << *NewMI;
                 bool Sunk = false;
-                if (New->findRegisterUseOperand(regB, false, TRI))
+
+                if (NewMI->findRegisterUseOperand(regB, false, TRI))
                   // FIXME: Temporary workaround. If the new instruction doesn't
                   // uses regB, convertToThreeAddress must have created more
                   // then one instruction.
-                  Sunk = Sink3AddrInstruction(mbbi, New, regB, mi);
-                mbbi->erase(mi);                 // Nuke the old inst.
+                  Sunk = Sink3AddrInstruction(mbbi, NewMI, regB, mi);
+
+                mbbi->erase(mi); // Nuke the old inst.
+
                 if (!Sunk) {
-                  mi = New;
+                  DistanceMap.insert(std::make_pair(NewMI, Dist));
+                  mi = NewMI;
                   nmi = next(mi);
                 }
+
                 ++NumConvertedTo3Addr;
-                // Done with this instruction.
-                break;
+                break; // Done with this instruction.
               }
             }
           }
 
         InstructionRearranged:
-          const TargetRegisterClass* rc = MF.getRegInfo().getRegClass(regA);
-          TII->copyRegToReg(*mbbi, mi, regA, regB, rc, rc);
+          const TargetRegisterClass* rc = MRI->getRegClass(regA);
+          MachineInstr *DefMI = MRI->getVRegDef(regB);
+          // If it's safe and profitable, remat the definition instead of
+          // copying it.
+          if (DefMI &&
+              isSafeToReMat(regB, DefMI) &&
+              isProfitableToReMat(regB, rc, mi, DefMI, mbbi, Dist,DistanceMap)){
+            DEBUG(cerr << "2addr: REMATTING : " << *DefMI << "\n");
+            TII->reMaterialize(*mbbi, mi, regA, DefMI);
+            ReMatRegs.set(regB);
+            ++NumReMats;
+          } else {
+            TII->copyRegToReg(*mbbi, mi, regA, regB, rc, rc);
+          }
 
           MachineBasicBlock::iterator prevMi = prior(mi);
           DOUT << "\t\tprepend:\t"; DEBUG(prevMi->print(*cerr.stream(), &TM));
 
-          // update live variables for regB
-          LiveVariables::VarInfo& varInfoB = LV->getVarInfo(regB);
-          // regB is used in this BB.
-          varInfoB.UsedBlocks[mbbi->getNumber()] = true;
-          if (LV->removeVirtualRegisterKilled(regB, mbbi, mi))
-            LV->addVirtualRegisterKilled(regB, prevMi);
+          // Update live variables for regB.
+          if (LV) {
+            LiveVariables::VarInfo& varInfoB = LV->getVarInfo(regB);
+
+            // regB is used in this BB.
+            varInfoB.UsedBlocks[mbbi->getNumber()] = true;
 
-          if (LV->removeVirtualRegisterDead(regB, mbbi, mi))
-            LV->addVirtualRegisterDead(regB, prevMi);
+            if (LV->removeVirtualRegisterKilled(regB,  mi))
+              LV->addVirtualRegisterKilled(regB, prevMi);
 
-          // replace all occurences of regB with regA
+            if (LV->removeVirtualRegisterDead(regB, mi))
+              LV->addVirtualRegisterDead(regB, prevMi);
+          }
+          
+          // Replace all occurences of regB with regA.
           for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
             if (mi->getOperand(i).isRegister() &&
                 mi->getOperand(i).getReg() == regB)
@@ -341,9 +472,20 @@
 
         DOUT << "\t\trewrite to:\t"; DEBUG(mi->print(*cerr.stream(), &TM));
       }
+
       mi = nmi;
     }
   }
 
+  // Some remat'ed instructions are dead.
+  int VReg = ReMatRegs.find_first();
+  while (VReg != -1) {
+    if (MRI->use_empty(VReg)) {
+      MachineInstr *DefMI = MRI->getVRegDef(VReg);
+      DefMI->eraseFromParent();
+    }
+    VReg = ReMatRegs.find_next(VReg);
+  }
+
   return MadeChange;
 }

Modified: llvm/branches/non-call-eh/lib/CodeGen/UnreachableBlockElim.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/UnreachableBlockElim.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/UnreachableBlockElim.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/UnreachableBlockElim.cpp Sun Jul  6 15:45:41 2008
@@ -38,10 +38,10 @@
     static char ID; // Pass identification, replacement for typeid
     UnreachableBlockElim() : FunctionPass((intptr_t)&ID) {}
   };
-  char UnreachableBlockElim::ID = 0;
-  RegisterPass<UnreachableBlockElim>
-  X("unreachableblockelim", "Remove unreachable blocks from the CFG");
 }
+char UnreachableBlockElim::ID = 0;
+static RegisterPass<UnreachableBlockElim>
+X("unreachableblockelim", "Remove unreachable blocks from the CFG");
 
 FunctionPass *llvm::createUnreachableBlockEliminationPass() {
   return new UnreachableBlockElim();

Modified: llvm/branches/non-call-eh/lib/CodeGen/VirtRegMap.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/VirtRegMap.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/VirtRegMap.cpp (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/VirtRegMap.cpp Sun Jul  6 15:45:41 2008
@@ -29,36 +29,38 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallSet.h"
 #include <algorithm>
 using namespace llvm;
 
-STATISTIC(NumSpills, "Number of register spills");
-STATISTIC(NumPSpills,"Number of physical register spills");
-STATISTIC(NumReMats, "Number of re-materialization");
-STATISTIC(NumDRM   , "Number of re-materializable defs elided");
-STATISTIC(NumStores, "Number of stores added");
-STATISTIC(NumLoads , "Number of loads added");
-STATISTIC(NumReused, "Number of values reused");
-STATISTIC(NumDSE   , "Number of dead stores elided");
-STATISTIC(NumDCE   , "Number of copies elided");
-STATISTIC(NumDSS   , "Number of dead spill slots removed");
+STATISTIC(NumSpills  , "Number of register spills");
+STATISTIC(NumPSpills , "Number of physical register spills");
+STATISTIC(NumReMats  , "Number of re-materialization");
+STATISTIC(NumDRM     , "Number of re-materializable defs elided");
+STATISTIC(NumStores  , "Number of stores added");
+STATISTIC(NumLoads   , "Number of loads added");
+STATISTIC(NumReused  , "Number of values reused");
+STATISTIC(NumDSE     , "Number of dead stores elided");
+STATISTIC(NumDCE     , "Number of copies elided");
+STATISTIC(NumDSS     , "Number of dead spill slots removed");
+STATISTIC(NumCommutes, "Number of instructions commuted");
 
 namespace {
   enum SpillerName { simple, local };
-
-  static cl::opt<SpillerName>
-  SpillerOpt("spiller",
-             cl::desc("Spiller to use: (default: local)"),
-             cl::Prefix,
-             cl::values(clEnumVal(simple, "  simple spiller"),
-                        clEnumVal(local,  "  local spiller"),
-                        clEnumValEnd),
-             cl::init(local));
 }
 
+static cl::opt<SpillerName>
+SpillerOpt("spiller",
+           cl::desc("Spiller to use: (default: local)"),
+           cl::Prefix,
+           cl::values(clEnumVal(simple, "  simple spiller"),
+                      clEnumVal(local,  "  local spiller"),
+                      clEnumValEnd),
+           cl::init(local));
+
 //===----------------------------------------------------------------------===//
 //  VirtRegMap implementation
 //===----------------------------------------------------------------------===//
@@ -147,8 +149,14 @@
 
 void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) {
   if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) {
-    assert(FI >= 0 && "Spill slot index should not be negative!");
-    SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI);
+    // If FI < LowSpillSlot, this stack reference was produced by
+    // instruction selection and is not a spill
+    if (FI >= LowSpillSlot) {
+      assert(FI >= 0 && "Spill slot index should not be negative!");
+      assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
+             && "Invalid spill slot");
+      SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI);
+    }
   }
 }
 
@@ -179,6 +187,12 @@
     int FI = MO.getIndex();
     if (MF.getFrameInfo()->isFixedObjectIndex(FI))
       continue;
+    // This stack reference was produced by instruction selection and
+    // is not a spill
+    if (FI < LowSpillSlot)
+      continue;
+    assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
+           && "Invalid spill slot");
     SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI);
   }
   MI2VirtMap.erase(MI);
@@ -302,6 +316,7 @@
     MachineRegisterInfo *RegInfo;
     const TargetRegisterInfo *TRI;
     const TargetInstrInfo *TII;
+    DenseMap<MachineInstr*, unsigned> DistanceMap;
   public:
     bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
       RegInfo = &MF.getRegInfo(); 
@@ -333,12 +348,22 @@
       return true;
     }
   private:
+    void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
+                          unsigned Reg, BitVector &RegKills,
+                          std::vector<MachineOperand*> &KillOps);
     bool PrepForUnfoldOpti(MachineBasicBlock &MBB,
                            MachineBasicBlock::iterator &MII,
                            std::vector<MachineInstr*> &MaybeDeadStores,
                            AvailableSpills &Spills, BitVector &RegKills,
                            std::vector<MachineOperand*> &KillOps,
                            VirtRegMap &VRM);
+    bool CommuteToFoldReload(MachineBasicBlock &MBB,
+                             MachineBasicBlock::iterator &MII,
+                             unsigned VirtReg, unsigned SrcReg, int SS,
+                             BitVector &RegKills,
+                             std::vector<MachineOperand*> &KillOps,
+                             const TargetRegisterInfo *TRI,
+                             VirtRegMap &VRM);
     void SpillRegToStackSlot(MachineBasicBlock &MBB,
                              MachineBasicBlock::iterator &MII,
                              int Idx, unsigned PhysReg, int StackSlot,
@@ -857,12 +882,12 @@
 /// This enables unfolding optimization for a subsequent instruction which will
 /// also eliminate the newly introduced store instruction.
 bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
-                                     MachineBasicBlock::iterator &MII,
+                                    MachineBasicBlock::iterator &MII,
                                     std::vector<MachineInstr*> &MaybeDeadStores,
-                                     AvailableSpills &Spills,
-                                     BitVector &RegKills,
-                                     std::vector<MachineOperand*> &KillOps,
-                                     VirtRegMap &VRM) {
+                                    AvailableSpills &Spills,
+                                    BitVector &RegKills,
+                                    std::vector<MachineOperand*> &KillOps,
+                                    VirtRegMap &VRM) {
   MachineFunction &MF = *MBB.getParent();
   MachineInstr &MI = *MII;
   unsigned UnfoldedOpc = 0;
@@ -943,6 +968,7 @@
           VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
         VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
         MII = MBB.insert(MII, FoldedMI);
+        InvalidateKills(MI, RegKills, KillOps);
         VRM.RemoveMachineInstrFromMaps(&MI);
         MBB.erase(&MI);
         return true;
@@ -953,6 +979,92 @@
   return false;
 }
 
+/// CommuteToFoldReload -
+/// Look for
+/// r1 = load fi#1
+/// r1 = op r1, r2<kill>
+/// store r1, fi#1
+///
+/// If op is commutable and r2 is killed, then we can xform these to
+/// r2 = op r2, fi#1
+/// store r2, fi#1
+bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB,
+                                    MachineBasicBlock::iterator &MII,
+                                    unsigned VirtReg, unsigned SrcReg, int SS,
+                                    BitVector &RegKills,
+                                    std::vector<MachineOperand*> &KillOps,
+                                    const TargetRegisterInfo *TRI,
+                                    VirtRegMap &VRM) {
+  if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
+    return false;
+
+  MachineFunction &MF = *MBB.getParent();
+  MachineInstr &MI = *MII;
+  MachineBasicBlock::iterator DefMII = prior(MII);
+  MachineInstr *DefMI = DefMII;
+  const TargetInstrDesc &TID = DefMI->getDesc();
+  unsigned NewDstIdx;
+  if (DefMII != MBB.begin() &&
+      TID.isCommutable() &&
+      TII->CommuteChangesDestination(DefMI, NewDstIdx)) {
+    MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
+    unsigned NewReg = NewDstMO.getReg();
+    if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
+      return false;
+    MachineInstr *ReloadMI = prior(DefMII);
+    int FrameIdx;
+    unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
+    if (DestReg != SrcReg || FrameIdx != SS)
+      return false;
+    int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
+    if (UseIdx == -1)
+      return false;
+    int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO);
+    if (DefIdx == -1)
+      return false;
+    assert(DefMI->getOperand(DefIdx).isRegister() &&
+           DefMI->getOperand(DefIdx).getReg() == SrcReg);
+
+    // Now commute def instruction.
+    MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
+    if (!CommutedMI)
+      return false;
+    SmallVector<unsigned, 2> Ops;
+    Ops.push_back(NewDstIdx);
+    MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
+    delete CommutedMI;  // Not needed since foldMemoryOperand returns new MI.
+    if (!FoldedMI)
+      return false;
+
+    VRM.addSpillSlotUse(SS, FoldedMI);
+    VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
+    // Insert new def MI and spill MI.
+    const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
+    TII->storeRegToStackSlot(MBB, MI, NewReg, true, SS, RC);
+    MII = prior(MII);
+    MachineInstr *StoreMI = MII;
+    VRM.addSpillSlotUse(SS, StoreMI);
+    VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
+    MII = MBB.insert(MII, FoldedMI);  // Update MII to backtrack.
+
+    // Delete all 3 old instructions.
+    InvalidateKills(*ReloadMI, RegKills, KillOps);
+    VRM.RemoveMachineInstrFromMaps(ReloadMI);
+    MBB.erase(ReloadMI);
+    InvalidateKills(*DefMI, RegKills, KillOps);
+    VRM.RemoveMachineInstrFromMaps(DefMI);
+    MBB.erase(DefMI);
+    InvalidateKills(MI, RegKills, KillOps);
+    VRM.RemoveMachineInstrFromMaps(&MI);
+    MBB.erase(&MI);
+
+    ++NumCommutes;
+    return true;
+  }
+
+  return false;
+}
+
 /// findSuperReg - Find the SubReg's super-register of given register class
 /// where its SubIdx sub-register is SubReg.
 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
@@ -1026,6 +1138,49 @@
   ++NumStores;
 }
 
+/// TransferDeadness - A identity copy definition is dead and it's being
+/// removed. Find the last def or use and mark it as dead / kill.
+void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
+                                    unsigned Reg, BitVector &RegKills,
+                                    std::vector<MachineOperand*> &KillOps) {
+  int LastUDDist = -1;
+  MachineInstr *LastUDMI = NULL;
+  for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
+         RE = RegInfo->reg_end(); RI != RE; ++RI) {
+    MachineInstr *UDMI = &*RI;
+    if (UDMI->getParent() != MBB)
+      continue;
+    DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
+    if (DI == DistanceMap.end() || DI->second > CurDist)
+      continue;
+    if ((int)DI->second < LastUDDist)
+      continue;
+    LastUDDist = DI->second;
+    LastUDMI = UDMI;
+  }
+
+  if (LastUDMI) {
+    const TargetInstrDesc &TID = LastUDMI->getDesc();
+    MachineOperand *LastUD = NULL;
+    for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
+      MachineOperand &MO = LastUDMI->getOperand(i);
+      if (!MO.isRegister() || MO.getReg() != Reg)
+        continue;
+      if (!LastUD || (LastUD->isUse() && MO.isDef()))
+        LastUD = &MO;
+      if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1)
+        return;
+    }
+    if (LastUD->isDef())
+      LastUD->setIsDead();
+    else {
+      LastUD->setIsKill();
+      RegKills.set(Reg);
+      KillOps[Reg] = LastUD;
+    }
+  }
+}
+
 /// rewriteMBB - Keep track of which spills are available even after the
 /// register allocator is done with them.  If possible, avid reloading vregs.
 void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
@@ -1054,6 +1209,8 @@
   std::vector<MachineOperand*>  KillOps;
   KillOps.resize(TRI->getNumRegs(), NULL);
 
+  unsigned Dist = 0;
+  DistanceMap.clear();
   for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
        MII != E; ) {
     MachineBasicBlock::iterator NextMII = MII; ++NextMII;
@@ -1431,6 +1588,7 @@
               InvalidateKill(InReg, RegKills, KillOps);
             }
 
+            InvalidateKills(MI, RegKills, KillOps);
             VRM.RemoveMachineInstrFromMaps(&MI);
             MBB.erase(&MI);
             Erased = true;
@@ -1442,6 +1600,7 @@
           if (PhysReg &&
               TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
             MBB.insert(MII, NewMIs[0]);
+            InvalidateKills(MI, RegKills, KillOps);
             VRM.RemoveMachineInstrFromMaps(&MI);
             MBB.erase(&MI);
             Erased = true;
@@ -1465,20 +1624,26 @@
           // the value and there isn't an earlier def that has already clobbered
           // the physreg.
           if (PhysReg &&
-              !TII->isStoreToStackSlot(&MI, SS) && // Not profitable!
-              DeadStore->killsRegister(PhysReg) &&
-              TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
-            MBB.insert(MII, NewMIs[0]);
-            NewStore = NewMIs[1];
-            MBB.insert(MII, NewStore);
-            VRM.addSpillSlotUse(SS, NewStore);
-            VRM.RemoveMachineInstrFromMaps(&MI);
-            MBB.erase(&MI);
-            Erased = true;
-            --NextMII;
-            --NextMII;  // backtrack to the unfolded instruction.
-            BackTracked = true;
-            isDead = true;
+              !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
+            MachineOperand *KillOpnd =
+              DeadStore->findRegisterUseOperand(PhysReg, true);
+            // Note, if the store is storing a sub-register, it's possible the
+            // super-register is needed below.
+            if (KillOpnd && !KillOpnd->getSubReg() &&
+                TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
+              MBB.insert(MII, NewMIs[0]);
+              NewStore = NewMIs[1];
+              MBB.insert(MII, NewStore);
+              VRM.addSpillSlotUse(SS, NewStore);
+              InvalidateKills(MI, RegKills, KillOps);
+              VRM.RemoveMachineInstrFromMaps(&MI);
+              MBB.erase(&MI);
+              Erased = true;
+              --NextMII;
+              --NextMII;  // backtrack to the unfolded instruction.
+              BackTracked = true;
+              isDead = true;
+            }
           }
         }
 
@@ -1516,15 +1681,23 @@
           if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
             assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
                    "Src hasn't been allocated yet?");
+
+            if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
+                                    RegKills, KillOps, TRI, VRM)) {
+              NextMII = next(MII);
+              BackTracked = true;
+              goto ProcessNextInst;
+            }
+
             // Okay, this is certainly a store of SrcReg to [StackSlot].  Mark
             // this as a potentially dead store in case there is a subsequent
             // store into the stack slot without a read from it.
             MaybeDeadStores[StackSlot] = &MI;
 
             // If the stack slot value was previously available in some other
-            // register, change it now.  Otherwise, make the register available,
-            // in PhysReg.
-            Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/);
+            // register, change it now.  Otherwise, make the register
+            // available in PhysReg.
+            Spills.addAvailable(StackSlot, &MI, SrcReg, false/*!clobber*/);
           }
         }
       }
@@ -1544,6 +1717,13 @@
         if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
           ++NumDCE;
           DOUT << "Removing now-noop copy: " << MI;
+          SmallVector<unsigned, 2> KillRegs;
+          InvalidateKills(MI, RegKills, KillOps, &KillRegs);
+          if (MO.isDead() && !KillRegs.empty()) {
+            assert(KillRegs[0] == Dst);
+            // Last def is now dead.
+            TransferDeadness(&MBB, Dist, Src, RegKills, KillOps);
+          }
           VRM.RemoveMachineInstrFromMaps(&MI);
           MBB.erase(&MI);
           Erased = true;
@@ -1621,6 +1801,7 @@
           if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
             ++NumDCE;
             DOUT << "Removing now-noop copy: " << MI;
+            InvalidateKills(MI, RegKills, KillOps);
             VRM.RemoveMachineInstrFromMaps(&MI);
             MBB.erase(&MI);
             Erased = true;
@@ -1631,6 +1812,7 @@
       }    
     }
   ProcessNextInst:
+    DistanceMap.insert(std::make_pair(&MI, Dist++));
     if (!Erased && !BackTracked) {
       for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II)
         UpdateKills(*II, RegKills, KillOps);

Modified: llvm/branches/non-call-eh/lib/CodeGen/VirtRegMap.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/CodeGen/VirtRegMap.h?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/CodeGen/VirtRegMap.h (original)
+++ llvm/branches/non-call-eh/lib/CodeGen/VirtRegMap.h Sun Jul  6 15:45:41 2008
@@ -19,7 +19,6 @@
 
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/IndexedMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/Support/Streams.h"

Modified: llvm/branches/non-call-eh/lib/Debugger/ProgramInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Debugger/ProgramInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/Debugger/ProgramInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Debugger/ProgramInfo.cpp Sun Jul  6 15:45:41 2008
@@ -14,6 +14,7 @@
 
 #include "llvm/Debugger/ProgramInfo.h"
 #include "llvm/Constants.h"
+#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/Intrinsics.h"
 #include "llvm/IntrinsicInst.h"
@@ -115,8 +116,10 @@
         if (ConstantInt *CUI = dyn_cast<ConstantInt>(CS->getOperand(1)))
           Version = CUI->getZExtValue();
 
-        BaseName  = CS->getOperand(3)->getStringValue();
-        Directory = CS->getOperand(4)->getStringValue();
+        if (!GetConstantStringInfo(CS->getOperand(3), BaseName))
+          BaseName = "";
+        if (!GetConstantStringInfo(CS->getOperand(4), Directory))
+          Directory = "";
       }
 }
 
@@ -156,7 +159,8 @@
           SourceFile = &PI.getSourceFile(GV);
 
         // Entry #2 is the function name.
-        Name = CS->getOperand(2)->getStringValue();
+        if (!GetConstantStringInfo(CS->getOperand(2), Name))
+          Name = "";
       }
 }
 

Modified: llvm/branches/non-call-eh/lib/Debugger/SourceLanguage-Unknown.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Debugger/SourceLanguage-Unknown.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/Debugger/SourceLanguage-Unknown.cpp (original)
+++ llvm/branches/non-call-eh/lib/Debugger/SourceLanguage-Unknown.cpp Sun Jul  6 15:45:41 2008
@@ -59,7 +59,7 @@
 //
 
 namespace {
-  struct SLU : public SourceLanguage {
+  static struct SLU : public SourceLanguage {
     //===------------------------------------------------------------------===//
     // Implement the miscellaneous methods...
     //

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/ExecutionEngine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/ExecutionEngine.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/ExecutionEngine.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/ExecutionEngine.cpp Sun Jul  6 15:45:41 2008
@@ -40,6 +40,7 @@
 
 ExecutionEngine::ExecutionEngine(ModuleProvider *P) : LazyFunctionCreator(0) {
   LazyCompilationDisabled = false;
+  SymbolSearchingDisabled = false;
   Modules.push_back(P);
   assert(P && "ModuleProvider is null?");
 }
@@ -59,6 +60,7 @@
     ModuleProvider *MP = *I;
     if (MP == P) {
       Modules.erase(I);
+      clearGlobalMappingsFromModule(MP->getModule());
       return MP->releaseModule(ErrInfo);
     }
   }
@@ -106,6 +108,22 @@
   state.getGlobalAddressReverseMap(locked).clear();
 }
 
+/// clearGlobalMappingsFromModule - Clear all global mappings that came from a
+/// particular module, because it has been removed from the JIT.
+void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
+  MutexGuard locked(lock);
+  
+  for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI) {
+    state.getGlobalAddressMap(locked).erase(FI);
+    state.getGlobalAddressReverseMap(locked).erase(FI);
+  }
+  for (Module::global_iterator GI = M->global_begin(), GE = M->global_end(); 
+       GI != GE; ++GI) {
+    state.getGlobalAddressMap(locked).erase(GI);
+    state.getGlobalAddressReverseMap(locked).erase(GI);
+  }
+}
+
 /// updateGlobalMapping - Replace an existing mapping for GV with a new
 /// address.  This updates both maps as required.  If "Addr" is null, the
 /// entry for the global is removed from the mappings.
@@ -812,35 +830,26 @@
   } else if (isa<ConstantAggregateZero>(Init)) {
     memset(Addr, 0, (size_t)getTargetData()->getABITypeSize(Init->getType()));
     return;
-  } else if (Init->getType()->isFirstClassType()) {
-    GenericValue Val = getConstantValue(Init);
-    StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
-    return;
-  }
-
-  switch (Init->getType()->getTypeID()) {
-  case Type::ArrayTyID: {
-    const ConstantArray *CPA = cast<ConstantArray>(Init);
+  } else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
     unsigned ElementSize =
       getTargetData()->getABITypeSize(CPA->getType()->getElementType());
     for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
       InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
     return;
-  }
-
-  case Type::StructTyID: {
-    const ConstantStruct *CPS = cast<ConstantStruct>(Init);
+  } else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
     const StructLayout *SL =
       getTargetData()->getStructLayout(cast<StructType>(CPS->getType()));
     for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
       InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
     return;
+  } else if (Init->getType()->isFirstClassType()) {
+    GenericValue Val = getConstantValue(Init);
+    StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
+    return;
   }
 
-  default:
-    cerr << "Bad Type: " << *Init->getType() << "\n";
-    assert(0 && "Unknown constant type to initialize memory with!");
-  }
+  cerr << "Bad Type: " << *Init->getType() << "\n";
+  assert(0 && "Unknown constant type to initialize memory with!");
 }
 
 /// EmitGlobals - Emit all of the global variables to memory, storing their
@@ -884,7 +893,7 @@
           continue;
         
         // Otherwise, we know it's linkonce/weak, replace it if this is a strong
-        // symbol.
+        // symbol.  FIXME is this right for common?
         if (GV->hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
           GVEntry = GV;
       }

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/ExecutionEngineBindings.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/ExecutionEngineBindings.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/ExecutionEngineBindings.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/ExecutionEngineBindings.cpp Sun Jul  6 15:45:41 2008
@@ -104,7 +104,7 @@
                           char **OutError) {
   std::string Error;
   if (ExecutionEngine *Interp =
-      ExecutionEngine::create(unwrap(MP), false, &Error)) {
+      ExecutionEngine::create(unwrap(MP), true, &Error)) {
     *OutInterp = wrap(Interp);
     return 0;
   }
@@ -192,3 +192,8 @@
 LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
   return wrap(unwrap(EE)->getTargetData());
 }
+
+void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
+                          void* Addr) {
+  unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr);
+}

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/Interpreter/Interpreter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/Interpreter/Interpreter.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/Interpreter/Interpreter.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/Interpreter/Interpreter.cpp Sun Jul  6 15:45:41 2008
@@ -21,10 +21,14 @@
 #include <cstring>
 using namespace llvm;
 
+namespace {
+
 static struct RegisterInterp {
   RegisterInterp() { Interpreter::Register(); }
 } InterpRegistrator;
 
+}
+
 namespace llvm {
   void LinkInInterpreter() {
   }

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/Intercept.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/Intercept.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/Intercept.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/Intercept.cpp Sun Jul  6 15:45:41 2008
@@ -16,6 +16,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "JIT.h"
+#include "llvm/Support/Streams.h"
 #include "llvm/System/DynamicLibrary.h"
 #include "llvm/Config/config.h"
 using namespace llvm;
@@ -90,44 +91,46 @@
 /// for resolving library symbols, not code generated symbols.
 ///
 void *JIT::getPointerToNamedFunction(const std::string &Name) {
-  // Check to see if this is one of the functions we want to intercept.  Note,
-  // we cast to intptr_t here to silence a -pedantic warning that complains
-  // about casting a function pointer to a normal pointer.
-  if (Name == "exit") return (void*)(intptr_t)&jit_exit;
-  if (Name == "atexit") return (void*)(intptr_t)&jit_atexit;
-
-  const char *NameStr = Name.c_str();
-  // If this is an asm specifier, skip the sentinal.
-  if (NameStr[0] == 1) ++NameStr;
-  
-  // If it's an external function, look it up in the process image...
-  void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
-  if (Ptr) return Ptr;
-  
-  // If it wasn't found and if it starts with an underscore ('_') character, and
-  // has an asm specifier, try again without the underscore.
-  if (Name[0] == 1 && NameStr[0] == '_') {
-    Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
+  if (!isSymbolSearchingDisabled()) {
+    // Check to see if this is one of the functions we want to intercept.  Note,
+    // we cast to intptr_t here to silence a -pedantic warning that complains
+    // about casting a function pointer to a normal pointer.
+    if (Name == "exit") return (void*)(intptr_t)&jit_exit;
+    if (Name == "atexit") return (void*)(intptr_t)&jit_atexit;
+
+    const char *NameStr = Name.c_str();
+    // If this is an asm specifier, skip the sentinal.
+    if (NameStr[0] == 1) ++NameStr;
+    
+    // If it's an external function, look it up in the process image...
+    void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
     if (Ptr) return Ptr;
-  }
-  
-  // darwin/ppc adds $LDBLStub suffixes to various symbols like printf.  These
-  // are references to hidden visibility symbols that dlsym cannot resolve.  If
-  // we have one of these, strip off $LDBLStub and try again.
+    
+    // If it wasn't found and if it starts with an underscore ('_') character,
+    // and has an asm specifier, try again without the underscore.
+    if (Name[0] == 1 && NameStr[0] == '_') {
+      Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1);
+      if (Ptr) return Ptr;
+    }
+    
+    // Darwin/PPC adds $LDBLStub suffixes to various symbols like printf.  These
+    // are references to hidden visibility symbols that dlsym cannot resolve.
+    // If we have one of these, strip off $LDBLStub and try again.
 #if defined(__APPLE__) && defined(__ppc__)
-  if (Name.size() > 9 && Name[Name.size()-9] == '$' &&
-      memcmp(&Name[Name.size()-8], "LDBLStub", 8) == 0) {
-    // First try turning $LDBLStub into $LDBL128.  If that fails, strip it off.
-    // This mirrors logic in libSystemStubs.a.
-    std::string Prefix = std::string(Name.begin(), Name.end()-9);
-    if (void *Ptr = getPointerToNamedFunction(Prefix+"$LDBL128"))
-      return Ptr;
-    if (void *Ptr = getPointerToNamedFunction(Prefix))
-      return Ptr;
-  }
+    if (Name.size() > 9 && Name[Name.size()-9] == '$' &&
+        memcmp(&Name[Name.size()-8], "LDBLStub", 8) == 0) {
+      // First try turning $LDBLStub into $LDBL128. If that fails, strip it off.
+      // This mirrors logic in libSystemStubs.a.
+      std::string Prefix = std::string(Name.begin(), Name.end()-9);
+      if (void *Ptr = getPointerToNamedFunction(Prefix+"$LDBL128"))
+        return Ptr;
+      if (void *Ptr = getPointerToNamedFunction(Prefix))
+        return Ptr;
+    }
 #endif
+  }
   
-  /// If a LazyFunctionCreator is installed, use it to get/create the function. 
+  /// If a LazyFunctionCreator is installed, use it to get/create the function.
   if (LazyFunctionCreator)
     if (void *RP = LazyFunctionCreator(Name))
       return RP;

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JIT.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JIT.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JIT.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JIT.cpp Sun Jul  6 15:45:41 2008
@@ -20,7 +20,6 @@
 #include "llvm/Instructions.h"
 #include "llvm/ModuleProvider.h"
 #include "llvm/CodeGen/MachineCodeEmitter.h"
-#include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/ExecutionEngine/GenericValue.h"
 #include "llvm/Support/MutexGuard.h"
 #include "llvm/System/DynamicLibrary.h"
@@ -52,10 +51,14 @@
 extern void *__dso_handle __attribute__ ((__visibility__ ("hidden")));
 #endif
 
+namespace {
+
 static struct RegisterJIT {
   RegisterJIT() { JIT::Register(); }
 } JITRegistrator;
 
+}
+
 namespace llvm {
   void LinkInJIT() {
   }
@@ -87,15 +90,17 @@
 
 JIT::JIT(ModuleProvider *MP, TargetMachine &tm, TargetJITInfo &tji,
          JITMemoryManager *JMM)
-  : ExecutionEngine(MP), TM(tm), TJI(tji), jitstate(MP) {
+  : ExecutionEngine(MP), TM(tm), TJI(tji) {
   setTargetData(TM.getTargetData());
 
+  jitstate = new JITState(MP);
+
   // Initialize MCE
   MCE = createEmitter(*this, JMM);
 
   // Add target data
   MutexGuard locked(lock);
-  FunctionPassManager &PM = jitstate.getPM(locked);
+  FunctionPassManager &PM = jitstate->getPM(locked);
   PM.add(new TargetData(*TM.getTargetData()));
 
   // Turn the machine code intermediate representation into bytes in memory that
@@ -110,10 +115,54 @@
 }
 
 JIT::~JIT() {
+  delete jitstate;
   delete MCE;
   delete &TM;
 }
 
+/// addModuleProvider - Add a new ModuleProvider to the JIT.  If we previously
+/// removed the last ModuleProvider, we need re-initialize jitstate with a valid
+/// ModuleProvider.
+void JIT::addModuleProvider(ModuleProvider *MP) {
+  MutexGuard locked(lock);
+
+  if (Modules.empty()) {
+    assert(!jitstate && "jitstate should be NULL if Modules vector is empty!");
+
+    jitstate = new JITState(MP);
+
+    FunctionPassManager &PM = jitstate->getPM(locked);
+    PM.add(new TargetData(*TM.getTargetData()));
+
+    // Turn the machine code intermediate representation into bytes in memory
+    // that may be executed.
+    if (TM.addPassesToEmitMachineCode(PM, *MCE, false /*fast*/)) {
+      cerr << "Target does not support machine code emission!\n";
+      abort();
+    }
+    
+    // Initialize passes.
+    PM.doInitialization();
+  }
+  
+  ExecutionEngine::addModuleProvider(MP);
+}
+
+/// removeModuleProvider - If we are removing the last ModuleProvider, 
+/// invalidate the jitstate since the PassManager it contains references a
+/// released ModuleProvider.
+Module *JIT::removeModuleProvider(ModuleProvider *MP, std::string *E) {
+  Module *result = ExecutionEngine::removeModuleProvider(MP, E);
+  
+  MutexGuard locked(lock);
+  if (Modules.empty()) {
+    delete jitstate;
+    jitstate = 0;
+  }
+  
+  return result;
+}
+
 /// run - Start execution with the specified function and arguments.
 ///
 GenericValue JIT::runFunction(Function *F,
@@ -261,12 +310,13 @@
     Args.push_back(C);
   }
 
-  CallInst *TheCall = CallInst::Create(F, Args.begin(), Args.end(), "", StubBB);
+  CallInst *TheCall = CallInst::Create(F, Args.begin(), Args.end(),
+                                       "", StubBB);
   TheCall->setTailCall();
   if (TheCall->getType() != Type::VoidTy)
-    ReturnInst::Create(TheCall, StubBB);             // Return result of the call.
+    ReturnInst::Create(TheCall, StubBB);    // Return result of the call.
   else
-    ReturnInst::Create(StubBB);                      // Just return void.
+    ReturnInst::Create(StubBB);             // Just return void.
 
   // Finally, return the value returned by our nullary stub function.
   return runFunction(Stub, std::vector<GenericValue>());
@@ -284,15 +334,15 @@
 
   // JIT the function
   isAlreadyCodeGenerating = true;
-  jitstate.getPM(locked).run(*F);
+  jitstate->getPM(locked).run(*F);
   isAlreadyCodeGenerating = false;
 
   // If the function referred to a global variable that had not yet been
   // emitted, it allocates memory for the global, but doesn't emit it yet.  Emit
   // all of these globals now.
-  while (!jitstate.getPendingGlobals(locked).empty()) {
-    const GlobalVariable *GV = jitstate.getPendingGlobals(locked).back();
-    jitstate.getPendingGlobals(locked).pop_back();
+  while (!jitstate->getPendingGlobals(locked).empty()) {
+    const GlobalVariable *GV = jitstate->getPendingGlobals(locked).back();
+    jitstate->getPendingGlobals(locked).pop_back();
     EmitGlobalVariable(GV);
   }
 }
@@ -382,7 +432,7 @@
       unsigned MisAligned = ((intptr_t)Ptr & (A-1));
       Ptr = (char*)Ptr + (MisAligned ? (A-MisAligned) : 0);
     }
-    jitstate.getPendingGlobals(locked).push_back(GV);
+    jitstate->getPendingGlobals(locked).push_back(GV);
   }
   addGlobalMapping(GV, Ptr);
   return Ptr;

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JIT.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JIT.h?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JIT.h (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JIT.h Sun Jul  6 15:45:41 2008
@@ -54,7 +54,7 @@
   TargetJITInfo &TJI;      // The JITInfo for the target we are compiling to
   MachineCodeEmitter *MCE; // MCE object
 
-  JITState jitstate;
+  JITState *jitstate;
 
   JIT(ModuleProvider *MP, TargetMachine &tm, TargetJITInfo &tji, 
       JITMemoryManager *JMM);
@@ -76,7 +76,11 @@
     return createJIT(MP, Err, 0);
   }
 
-  /// run - Start execution with the specified function and arguments.
+  virtual void addModuleProvider(ModuleProvider *MP);
+  virtual Module *removeModuleProvider(ModuleProvider *MP,
+                                       std::string *ErrInfo = 0);
+
+  /// runFunction - Start execution with the specified function and arguments.
   ///
   virtual GenericValue runFunction(Function *F,
                                    const std::vector<GenericValue> &ArgValues);

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp Sun Jul  6 15:45:41 2008
@@ -172,6 +172,8 @@
   return LSize < RSize;
 }
 
+namespace {
+
 struct KeyInfo {
   static inline unsigned getEmptyKey() { return -1U; }
   static inline unsigned getTombstoneKey() { return -2U; }
@@ -205,6 +207,8 @@
   unsigned Action;
 };
 
+}
+
 unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
                                          unsigned char* StartFunction,
                                          unsigned char* EndFunction) const {
@@ -321,7 +325,7 @@
         I != E; ++I) {
     for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
           MI != E; ++MI) {
-      if (MI->getOpcode() != TargetInstrInfo::LABEL) {
+      if (!MI->isLabel()) {
         MayThrow |= MI->getDesc().isCall();
         continue;
       }
@@ -360,7 +364,7 @@
 
       // Try to merge with the previous call-site.
       if (CallSites.size()) {
-        CallSiteEntry &Prev = CallSites[CallSites.size()-1];
+        CallSiteEntry &Prev = CallSites.back();
         if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
           // Extend the range of the previous entry.
           Prev.EndLabel = Site.EndLabel;
@@ -936,7 +940,7 @@
         I != E; ++I) {
     for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
           MI != E; ++MI) {
-      if (MI->getOpcode() != TargetInstrInfo::LABEL) {
+      if (!MI->isLabel()) {
         MayThrow |= MI->getDesc().isCall();
         continue;
       }
@@ -975,7 +979,7 @@
 
       // Try to merge with the previous call-site.
       if (CallSites.size()) {
-        CallSiteEntry &Prev = CallSites[CallSites.size()-1];
+        CallSiteEntry &Prev = CallSites.back();
         if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
           // Extend the range of the previous entry.
           Prev.EndLabel = Site.EndLabel;

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITEmitter.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITEmitter.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITEmitter.cpp Sun Jul  6 15:45:41 2008
@@ -32,6 +32,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/MutexGuard.h"
 #include "llvm/System/Disassembler.h"
+#include "llvm/System/Memory.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/ADT/Statistic.h"
 #include <algorithm>
@@ -145,20 +146,6 @@
 
 JITResolver *JITResolver::TheJITResolver = 0;
 
-#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
-    defined(__APPLE__)
-extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
-#endif
-
-/// synchronizeICache - On some targets, the JIT emitted code must be
-/// explicitly refetched to ensure correct execution.
-static void synchronizeICache(const void *Addr, size_t len) {
-#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
-    defined(__APPLE__)
-  sys_icache_invalidate(Addr, len);
-#endif
-}
-
 /// getFunctionStub - This returns a pointer to a function stub, creating
 /// one on demand as needed.
 void *JITResolver::getFunctionStub(Function *F) {
@@ -308,7 +295,7 @@
 // have useful answers.  However, we don't go crazy with atomic operations, we
 // just do a "reasonable effort".
 #ifdef __APPLE__ 
-#define ENABLE_JIT_SYMBOL_TABLE 1
+#define ENABLE_JIT_SYMBOL_TABLE 0
 #endif
 
 /// JitSymbolEntry - Each function that is JIT compiled results in one of these
@@ -421,7 +408,7 @@
   --SymTabPtr->NumSymbols;
 
   // Finally, if we deleted the final symbol, deallocate the table itself.
-  if (SymTabPtr->NumSymbols == 0) 
+  if (SymTabPtr->NumSymbols != 0) 
     return;
   
   *SymTabPtrPtr = 0;
@@ -564,6 +551,8 @@
     /// global immediately instead of queuing it for codegen later!
     return TheJIT->getOrEmitGlobalVariable(GV);
   }
+  if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
+    return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal());
 
   // If we have already compiled the function, return a pointer to its body.
   Function *F = cast<Function>(V);
@@ -756,7 +745,7 @@
   }
 
   // Invalidate the icache if necessary.
-  synchronizeICache(FnStart, FnEnd-FnStart);
+  sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart);
   
   // Add it to the JIT symbol table if the host wants it.
   AddFunctionToSymbolTable(F.getFunction()->getNameStart(),

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITMemoryManager.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITMemoryManager.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/JITMemoryManager.cpp Sun Jul  6 15:45:41 2008
@@ -369,7 +369,7 @@
   // Allocate a 16M block of memory for functions.
   sys::MemoryBlock MemBlock = getNewMemoryBlock(16 << 20);
 
-  unsigned char *MemBase = reinterpret_cast<unsigned char*>(MemBlock.base());
+  unsigned char *MemBase = static_cast<unsigned char*>(MemBlock.base());
 
   // Allocate stubs backwards from the base, allocate functions forward
   // from the base.

Modified: llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/TargetSelect.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/TargetSelect.cpp?rev=53163&r1=53162&r2=53163&view=diff

==============================================================================
--- llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/TargetSelect.cpp (original)
+++ llvm/branches/non-call-eh/lib/ExecutionEngine/JIT/TargetSelect.cpp Sun Jul  6 15:45:41 2008
@@ -15,6 +15,7 @@
 #include "JIT.h"
 #include "llvm/Module.h"
 #include "llvm/ModuleProvider.h"
+#include "llvm/Support/Streams.h"
 #include "llvm/Target/SubtargetFeature.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetMachineRegistry.h"





More information about the llvm-commits mailing list