[llvm-commits] [llvm] r172246 - in /llvm/trunk: include/llvm/CodeGen/Passes.h include/llvm/Target/TargetLowering.h lib/CodeGen/BasicTargetTransformInfo.cpp lib/CodeGen/CMakeLists.txt lib/CodeGen/DwarfEHPrepare.cpp lib/CodeGen/IfConversion.cpp lib/CodeGen/MachineBlockPlacement.cpp lib/CodeGen/MachineLICM.cpp lib/CodeGen/SelectionDAG/TargetLowering.cpp lib/CodeGen/SjLjEHPrepare.cpp lib/CodeGen/StackProtector.cpp lib/CodeGen/TargetLoweringBase.cpp

Chandler Carruth chandlerc at google.com
Fri Jan 11 12:56:42 PST 2013


On Fri, Jan 11, 2013 at 12:05 PM, Benjamin Kramer
<benny.kra at googlemail.com>wrote:

> Author: d0k
> Date: Fri Jan 11 14:05:37 2013
> New Revision: 172246
>
> URL: http://llvm.org/viewvc/llvm-project?rev=172246&view=rev
> Log:
> Split TargetLowering into a CodeGen and a SelectionDAG part.
>
> This fixes some of the cycles between libCodeGen and libSelectionDAG. It's
> still
> a complete mess but as long as the edges consist of virtual call it doesn't
> cause breakage. BasicTTI did static calls and thus broke some build
> configurations.
>

I have been working on fixing this for several days, and I don't think this
is the right fix. There shouldn't be any inheritance here, and I don't
think we want the Target ISel stuff to depend on SelectionDAG in all
cases....

The fix I have almost working actually splits the interface between
TargetLowering and TargetSelectionDAGInfo which seems to be the desired way
to partition between CodeGen and SelectionDAG.

But I don't know whether to even bother with it at this point. It's a very
substantial change, and now that this has been committed will require a
massive rebasing that I just don't have time for....


>
> Added:
>     llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
> Modified:
>     llvm/trunk/include/llvm/CodeGen/Passes.h
>     llvm/trunk/include/llvm/Target/TargetLowering.h
>     llvm/trunk/lib/CodeGen/BasicTargetTransformInfo.cpp
>     llvm/trunk/lib/CodeGen/CMakeLists.txt
>     llvm/trunk/lib/CodeGen/DwarfEHPrepare.cpp
>     llvm/trunk/lib/CodeGen/IfConversion.cpp
>     llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
>     llvm/trunk/lib/CodeGen/MachineLICM.cpp
>     llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
>     llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp
>     llvm/trunk/lib/CodeGen/StackProtector.cpp
>
> Modified: llvm/trunk/include/llvm/CodeGen/Passes.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/Passes.h?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/include/llvm/CodeGen/Passes.h (original)
> +++ llvm/trunk/include/llvm/CodeGen/Passes.h Fri Jan 11 14:05:37 2013
> @@ -25,6 +25,7 @@
>    class MachineFunctionPass;
>    class PassInfo;
>    class PassManagerBase;
> +  class TargetLoweringBase;
>    class TargetLowering;
>    class TargetRegisterClass;
>    class raw_ostream;
> @@ -284,7 +285,8 @@
>    ///
>    /// This pass implements the target transform info analysis using the
> target
>    /// independent information available to the LLVM code generator.
> -  ImmutablePass *createBasicTargetTransformInfoPass(const TargetLowering
> *TLI);
> +  ImmutablePass *
> +  createBasicTargetTransformInfoPass(const TargetLoweringBase *TLI);
>
>    /// createUnreachableBlockEliminationPass - The LLVM code generator
> does not
>    /// work well with unreachable basic blocks (what live ranges make
> sense for a
> @@ -481,7 +483,7 @@
>
>    /// createStackProtectorPass - This pass adds stack protectors to
> functions.
>    ///
> -  FunctionPass *createStackProtectorPass(const TargetLowering *tli);
> +  FunctionPass *createStackProtectorPass(const TargetLoweringBase *tli);
>
>    /// createMachineVerifierPass - This pass verifies cenerated machine
> code
>    /// instructions for correctness.
> @@ -495,7 +497,7 @@
>    /// createSjLjEHPreparePass - This pass adapts exception handling code
> to use
>    /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control
> flow.
>    ///
> -  FunctionPass *createSjLjEHPreparePass(const TargetLowering *tli);
> +  FunctionPass *createSjLjEHPreparePass(const TargetLoweringBase *tli);
>
>    /// LocalStackSlotAllocation - This pass assigns local frame indices to
> stack
>    /// slots relative to one another and allocates base registers to
> access them
>
> Modified: llvm/trunk/include/llvm/Target/TargetLowering.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetLowering.h?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/include/llvm/Target/TargetLowering.h (original)
> +++ llvm/trunk/include/llvm/Target/TargetLowering.h Fri Jan 11 14:05:37
> 2013
> @@ -68,17 +68,12 @@
>      };
>    }
>
>
> -//===----------------------------------------------------------------------===//
> -/// TargetLowering - This class defines information used to lower LLVM
> code to
> -/// legal SelectionDAG operators that the target instruction selector can
> accept
> -/// natively.
> -///
> -/// This class also defines callbacks that targets must implement to lower
> -/// target-specific constructs to SelectionDAG operators.
> -///
> -class TargetLowering {
> -  TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
> -  void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
> +/// TargetLoweringBase - This base class for TargetLowering contains the
> +/// SelectionDAG-independent parts that can be used from the rest of
> CodeGen.
> +class TargetLoweringBase {
> +  TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
> +  void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
> +
>  public:
>    /// LegalizeAction - This enum indicates whether operations are valid
> for a
>    /// target, and if not, what action should be used to make them valid.
> @@ -136,9 +131,9 @@
>    }
>
>    /// NOTE: The constructor takes ownership of TLOF.
> -  explicit TargetLowering(const TargetMachine &TM,
> -                          const TargetLoweringObjectFile *TLOF);
> -  virtual ~TargetLowering();
> +  explicit TargetLoweringBase(const TargetMachine &TM,
> +                              const TargetLoweringObjectFile *TLOF);
> +  virtual ~TargetLoweringBase();
>
>    const TargetMachine &getTargetMachine() const { return TM; }
>    const DataLayout *getDataLayout() const { return TD; }
> @@ -829,55 +824,6 @@
>      return InsertFencesForAtomic;
>    }
>
> -  /// getPreIndexedAddressParts - returns true by value, base pointer and
> -  /// offset pointer and addressing mode by reference if the node's
> address
> -  /// can be legally represented as pre-indexed load / store address.
> -  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue
> &/*Base*/,
> -                                         SDValue &/*Offset*/,
> -                                         ISD::MemIndexedMode &/*AM*/,
> -                                         SelectionDAG &/*DAG*/) const {
> -    return false;
> -  }
> -
> -  /// getPostIndexedAddressParts - returns true by value, base pointer and
> -  /// offset pointer and addressing mode by reference if this node can be
> -  /// combined with a load / store to form a post-indexed load / store.
> -  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
> -                                          SDValue &/*Base*/, SDValue
> &/*Offset*/,
> -                                          ISD::MemIndexedMode &/*AM*/,
> -                                          SelectionDAG &/*DAG*/) const {
> -    return false;
> -  }
> -
> -  /// getJumpTableEncoding - Return the entry encoding for a jump table
> in the
> -  /// current function.  The returned value is a member of the
> -  /// MachineJumpTableInfo::JTEntryKind enum.
> -  virtual unsigned getJumpTableEncoding() const;
> -
> -  virtual const MCExpr *
> -  LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
> -                            const MachineBasicBlock * /*MBB*/, unsigned
> /*uid*/,
> -                            MCContext &/*Ctx*/) const {
> -    llvm_unreachable("Need to implement this hook if target has custom
> JTIs");
> -  }
> -
> -  /// getPICJumpTableRelocaBase - Returns relocation base for the given
> PIC
> -  /// jumptable.
> -  virtual SDValue getPICJumpTableRelocBase(SDValue Table,
> -                                           SelectionDAG &DAG) const;
> -
> -  /// getPICJumpTableRelocBaseExpr - This returns the relocation base for
> the
> -  /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
> -  /// MCExpr.
> -  virtual const MCExpr *
> -  getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
> -                               unsigned JTI, MCContext &Ctx) const;
> -
> -  /// isOffsetFoldingLegal - Return true if folding a constant offset
> -  /// with the given GlobalAddress is legal.  It is frequently not legal
> in
> -  /// PIC relocation models.
> -  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
> -
>    /// getStackCookieLocation - Return true if the target stores stack
>    /// protector cookies at a fixed offset in some non-standard address
>    /// space, and populates the address space and offset as
> @@ -906,152 +852,6 @@
>    /// @}
>
>
>  //===--------------------------------------------------------------------===//
> -  // TargetLowering Optimization Methods
> -  //
> -
> -  /// TargetLoweringOpt - A convenience struct that encapsulates a DAG,
> and two
> -  /// SDValues for returning information from TargetLowering to its
> clients
> -  /// that want to combine
> -  struct TargetLoweringOpt {
> -    SelectionDAG &DAG;
> -    bool LegalTys;
> -    bool LegalOps;
> -    SDValue Old;
> -    SDValue New;
> -
> -    explicit TargetLoweringOpt(SelectionDAG &InDAG,
> -                               bool LT, bool LO) :
> -      DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
> -
> -    bool LegalTypes() const { return LegalTys; }
> -    bool LegalOperations() const { return LegalOps; }
> -
> -    bool CombineTo(SDValue O, SDValue N) {
> -      Old = O;
> -      New = N;
> -      return true;
> -    }
> -
> -    /// ShrinkDemandedConstant - Check to see if the specified operand of
> the
> -    /// specified instruction is a constant integer.  If so, check to see
> if
> -    /// there are any bits set in the constant that are not demanded.  If
> so,
> -    /// shrink the constant and return true.
> -    bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
> -
> -    /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if
> the
> -    /// casts are free.  This uses isZExtFree and ZERO_EXTEND for the
> widening
> -    /// cast, but it could be generalized for targets with other types of
> -    /// implicit widening casts.
> -    bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt
> &Demanded,
> -                          DebugLoc dl);
> -  };
> -
> -  /// SimplifyDemandedBits - Look at Op.  At this point, we know that
> only the
> -  /// DemandedMask bits of the result of Op are ever used downstream.  If
> we can
> -  /// use this information to simplify Op, create a new simplified DAG
> node and
> -  /// return true, returning the original and new nodes in Old and New.
> -  /// Otherwise, analyze the expression and return a mask of KnownOne and
> -  /// KnownZero bits for the expression (used to simplify the caller).
> -  /// The KnownZero/One bits may only be accurate for those bits in the
> -  /// DemandedMask.
> -  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
> -                            APInt &KnownZero, APInt &KnownOne,
> -                            TargetLoweringOpt &TLO, unsigned Depth = 0)
> const;
> -
> -  /// computeMaskedBitsForTargetNode - Determine which of the bits
> specified in
> -  /// Mask are known to be either zero or one and return them in the
> -  /// KnownZero/KnownOne bitsets.
> -  virtual void computeMaskedBitsForTargetNode(const SDValue Op,
> -                                              APInt &KnownZero,
> -                                              APInt &KnownOne,
> -                                              const SelectionDAG &DAG,
> -                                              unsigned Depth = 0) const;
> -
> -  /// ComputeNumSignBitsForTargetNode - This method can be implemented by
> -  /// targets that want to expose additional information about sign bits
> to the
> -  /// DAG Combiner.
> -  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
> -                                                   unsigned Depth = 0)
> const;
> -
> -  struct DAGCombinerInfo {
> -    void *DC;  // The DAG Combiner object.
> -    CombineLevel Level;
> -    bool CalledByLegalizer;
> -  public:
> -    SelectionDAG &DAG;
> -
> -    DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void
> *dc)
> -      : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
> -
> -    bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
> -    bool isBeforeLegalizeOps() const { return Level <
> AfterLegalizeVectorOps; }
> -    bool isAfterLegalizeVectorOps() const {
> -      return Level == AfterLegalizeDAG;
> -    }
> -    CombineLevel getDAGCombineLevel() { return Level; }
> -    bool isCalledByLegalizer() const { return CalledByLegalizer; }
> -
> -    void AddToWorklist(SDNode *N);
> -    void RemoveFromWorklist(SDNode *N);
> -    SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
> -                      bool AddTo = true);
> -    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
> -    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo =
> true);
> -
> -    void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
> -  };
> -
> -  /// SimplifySetCC - Try to simplify a setcc built with the specified
> operands
> -  /// and cc. If it is unable to simplify it, return a null SDValue.
> -  SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
> -                          ISD::CondCode Cond, bool foldBooleans,
> -                          DAGCombinerInfo &DCI, DebugLoc dl) const;
> -
> -  /// isGAPlusOffset - Returns true (and the GlobalValue and the offset)
> if the
> -  /// node is a GlobalAddress + offset.
> -  virtual bool
> -  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset)
> const;
> -
> -  /// PerformDAGCombine - This method will be invoked for all target
> nodes and
> -  /// for any target-independent nodes that the target has registered with
> -  /// invoke it for.
> -  ///
> -  /// The semantics are as follows:
> -  /// Return Value:
> -  ///   SDValue.Val == 0   - No change was made
> -  ///   SDValue.Val == N   - N was replaced, is dead, and is already
> handled.
> -  ///   otherwise          - N should be replaced by the returned Operand.
> -  ///
> -  /// In addition, methods provided by DAGCombinerInfo may be used to
> perform
> -  /// more complex transformations.
> -  ///
> -  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
> const;
> -
> -  /// isTypeDesirableForOp - Return true if the target has native support
> for
> -  /// the specified value type and it is 'desirable' to use the type for
> the
> -  /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
> -  /// instruction encodings are longer and some i16 instructions are slow.
> -  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
> -    // By default, assume all legal types are desirable.
> -    return isTypeLegal(VT);
> -  }
> -
> -  /// isDesirableToPromoteOp - Return true if it is profitable for dag
> combiner
> -  /// to transform a floating point op of specified opcode to a
> equivalent op of
> -  /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM.
> -  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
> -                                                 EVT /*VT*/) const {
> -    return false;
> -  }
> -
> -  /// IsDesirableToPromoteOp - This method query the target whether it is
> -  /// beneficial for dag combiner to promote the specified node. If true,
> it
> -  /// should return the desired promotion type by reference.
> -  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const
> {
> -    return false;
> -  }
> -
> -
>  //===--------------------------------------------------------------------===//
>    // TargetLowering Configuration Methods - These methods should be
> invoked by
>    // the derived class constructor to configure this object for the
> target.
>    //
> @@ -1302,666 +1102,263 @@
>
>  public:
>
>  //===--------------------------------------------------------------------===//
> -  // Lowering methods - These methods must be implemented by targets so
> that
> -  // the SelectionDAGBuilder code knows how to lower these.
> +  // Addressing mode description hooks (used by LSR etc).
>    //
>
> -  /// LowerFormalArguments - This hook must be implemented to lower the
> -  /// incoming (formal) arguments, described by the Ins array, into the
> -  /// specified DAG. The implementation should fill in the InVals array
> -  /// with legal-type argument values, and return the resulting token
> -  /// chain value.
> -  ///
> -  virtual SDValue
> -    LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
> -                         bool /*isVarArg*/,
> -                         const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
> -                         DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
> -                         SmallVectorImpl<SDValue> &/*InVals*/) const {
> -    llvm_unreachable("Not Implemented");
> +  /// GetAddrModeArguments - CodeGenPrepare sinks address calculations
> into the
> +  /// same BB as Load/Store instructions reading the address.  This
> allows as
> +  /// much computation as possible to be done in the address mode for that
> +  /// operand.  This hook lets targets also pass back when this should be
> done
> +  /// on intrinsics which load/store.
> +  virtual bool GetAddrModeArguments(IntrinsicInst *I,
> +                                    SmallVectorImpl<Value*> &Ops,
> +                                    Type *&AccessTy) const {
> +    return false;
>    }
>
> -  struct ArgListEntry {
> -    SDValue Node;
> -    Type* Ty;
> -    bool isSExt  : 1;
> -    bool isZExt  : 1;
> -    bool isInReg : 1;
> -    bool isSRet  : 1;
> -    bool isNest  : 1;
> -    bool isByVal : 1;
> -    uint16_t Alignment;
> -
> -    ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
> -      isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
> -  };
> -  typedef std::vector<ArgListEntry> ArgListTy;
> -
> -  /// CallLoweringInfo - This structure contains all information that is
> -  /// necessary for lowering calls. It is passed to TLI::LowerCallTo when
> the
> -  /// SelectionDAG builder needs to lower a call, and targets will see
> this
> -  /// struct in their LowerCall implementation.
> -  struct CallLoweringInfo {
> -    SDValue Chain;
> -    Type *RetTy;
> -    bool RetSExt           : 1;
> -    bool RetZExt           : 1;
> -    bool IsVarArg          : 1;
> -    bool IsInReg           : 1;
> -    bool DoesNotReturn     : 1;
> -    bool IsReturnValueUsed : 1;
> -
> -    // IsTailCall should be modified by implementations of
> -    // TargetLowering::LowerCall that perform tail call conversions.
> -    bool IsTailCall;
> -
> -    unsigned NumFixedArgs;
> -    CallingConv::ID CallConv;
> -    SDValue Callee;
> -    ArgListTy &Args;
> -    SelectionDAG &DAG;
> -    DebugLoc DL;
> -    ImmutableCallSite *CS;
> -    SmallVector<ISD::OutputArg, 32> Outs;
> -    SmallVector<SDValue, 32> OutVals;
> -    SmallVector<ISD::InputArg, 32> Ins;
> -
> -
> -    /// CallLoweringInfo - Constructs a call lowering context based on the
> -    /// ImmutableCallSite \p cs.
> -    CallLoweringInfo(SDValue chain, Type *retTy,
> -                     FunctionType *FTy, bool isTailCall, SDValue callee,
> -                     ArgListTy &args, SelectionDAG &dag, DebugLoc dl,
> -                     ImmutableCallSite &cs)
> -    : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0,
> Attribute::SExt)),
> -      RetZExt(cs.paramHasAttr(0, Attribute::ZExt)),
> IsVarArg(FTy->isVarArg()),
> -      IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
> -      DoesNotReturn(cs.doesNotReturn()),
> -      IsReturnValueUsed(!cs.getInstruction()->use_empty()),
> -      IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
> -      CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
> -      DL(dl), CS(&cs) {}
> -
> -    /// CallLoweringInfo - Constructs a call lowering context based on the
> -    /// provided call information.
> -    CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool
> retZExt,
> -                     bool isVarArg, bool isInReg, unsigned numFixedArgs,
> -                     CallingConv::ID callConv, bool isTailCall,
> -                     bool doesNotReturn, bool isReturnValueUsed, SDValue
> callee,
> -                     ArgListTy &args, SelectionDAG &dag, DebugLoc dl)
> -    : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
> -      IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
> -      IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
> -      NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
> -      Args(args), DAG(dag), DL(dl), CS(NULL) {}
> +  /// AddrMode - This represents an addressing mode of:
> +  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
> +  /// If BaseGV is null,  there is no BaseGV.
> +  /// If BaseOffs is zero, there is no base offset.
> +  /// If HasBaseReg is false, there is no base register.
> +  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg
> with
> +  /// no scale.
> +  ///
> +  struct AddrMode {
> +    GlobalValue *BaseGV;
> +    int64_t      BaseOffs;
> +    bool         HasBaseReg;
> +    int64_t      Scale;
> +    AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
>    };
>
> -  /// LowerCallTo - This function lowers an abstract call to a function
> into an
> -  /// actual call.  This returns a pair of operands.  The first element
> is the
> -  /// return value for the function (if RetTy is not VoidTy).  The second
> -  /// element is the outgoing token chain. It calls LowerCall to do the
> actual
> -  /// lowering.
> -  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
> +  /// isLegalAddressingMode - Return true if the addressing mode
> represented by
> +  /// AM is legal for this target, for a load/store of the specified type.
> +  /// The type may be VoidTy, in which case only return true if the
> addressing
> +  /// mode is legal for a load/store of any legal type.
> +  /// TODO: Handle pre/postinc as well.
> +  virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
>
> -  /// LowerCall - This hook must be implemented to lower calls into the
> -  /// the specified DAG. The outgoing arguments to the call are described
> -  /// by the Outs array, and the values to be returned by the call are
> -  /// described by the Ins array. The implementation should fill in the
> -  /// InVals array with legal-type return values from the call, and return
> -  /// the resulting token chain value.
> -  virtual SDValue
> -    LowerCall(CallLoweringInfo &/*CLI*/,
> -              SmallVectorImpl<SDValue> &/*InVals*/) const {
> -    llvm_unreachable("Not Implemented");
> +  /// isLegalICmpImmediate - Return true if the specified immediate is
> legal
> +  /// icmp immediate, that is the target has icmp instructions which can
> compare
> +  /// a register against the immediate without having to materialize the
> +  /// immediate into a register.
> +  virtual bool isLegalICmpImmediate(int64_t) const {
> +    return true;
>    }
>
> -  /// HandleByVal - Target-specific cleanup for formal ByVal parameters.
> -  virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
> -
> -  /// CanLowerReturn - This hook should be implemented to check whether
> the
> -  /// return values described by the Outs array can fit into the return
> -  /// registers.  If false is returned, an sret-demotion is performed.
> -  ///
> -  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
> -                              MachineFunction &/*MF*/, bool /*isVarArg*/,
> -               const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
> -               LLVMContext &/*Context*/) const
> -  {
> -    // Return true by default to get preexisting behavior.
> +  /// isLegalAddImmediate - Return true if the specified immediate is
> legal
> +  /// add immediate, that is the target has add instructions which can add
> +  /// a register with the immediate without having to materialize the
> +  /// immediate into a register.
> +  virtual bool isLegalAddImmediate(int64_t) const {
>      return true;
>    }
>
> -  /// LowerReturn - This hook must be implemented to lower outgoing
> -  /// return values, described by the Outs array, into the specified
> -  /// DAG. The implementation should return the resulting token chain
> -  /// value.
> -  ///
> -  virtual SDValue
> -    LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
> -                bool /*isVarArg*/,
> -                const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
> -                const SmallVectorImpl<SDValue> &/*OutVals*/,
> -                DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const {
> -    llvm_unreachable("Not Implemented");
> +  /// isTruncateFree - Return true if it's free to truncate a value of
> +  /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value
> in
> +  /// register EAX to i16 by referencing its sub-register AX.
> +  virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
> +    return false;
>    }
>
> -  /// isUsedByReturnOnly - Return true if result of the specified node is
> used
> -  /// by a return node only. It also compute and return the input chain
> for the
> -  /// tail call.
> -  /// This is used to determine whether it is possible
> -  /// to codegen a libcall as tail call at legalization time.
> -  virtual bool isUsedByReturnOnly(SDNode *, SDValue &Chain) const {
> +  virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
>      return false;
>    }
>
> -  /// mayBeEmittedAsTailCall - Return true if the target may be able emit
> the
> -  /// call instruction as a tail call. This is used by optimization
> passes to
> -  /// determine if it's profitable to duplicate return instructions to
> enable
> -  /// tailcall optimization.
> -  virtual bool mayBeEmittedAsTailCall(CallInst *) const {
> +  /// isZExtFree - Return true if any actual instruction that defines a
> +  /// value of type Ty1 implicitly zero-extends the value to Ty2 in the
> result
> +  /// register. This does not necessarily include registers defined in
> +  /// unknown ways, such as incoming arguments, or copies from unknown
> +  /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
> +  /// does not necessarily apply to truncate instructions. e.g. on x86-64,
> +  /// all instructions that define 32-bit values implicit zero-extend the
> +  /// result out to 64 bits.
> +  virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
>      return false;
>    }
>
> -  /// getTypeForExtArgOrReturn - Return the type that should be used to
> zero or
> -  /// sign extend a zeroext/signext integer argument or return value.
> -  /// FIXME: Most C calling convention requires the return type to be
> promoted,
> -  /// but this is not true all the time, e.g. i1 on x86-64. It is also not
> -  /// necessary for non-C calling conventions. The frontend should handle
> this
> -  /// and include all of the necessary information.
> -  virtual MVT getTypeForExtArgOrReturn(MVT VT,
> -                                       ISD::NodeType /*ExtendKind*/)
> const {
> -    MVT MinVT = getRegisterType(MVT::i32);
> -    return VT.bitsLT(MinVT) ? MinVT : VT;
> +  virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
> +    return false;
>    }
>
> -  /// LowerOperationWrapper - This callback is invoked by the type
> legalizer
> -  /// to legalize nodes with an illegal operand type but legal result
> types.
> -  /// It replaces the LowerOperation callback in the type Legalizer.
> -  /// The reason we can not do away with LowerOperation entirely is that
> -  /// LegalizeDAG isn't yet ready to use this callback.
> -  /// TODO: Consider merging with ReplaceNodeResults.
> -
> -  /// The target places new result values for the node in Results (their
> number
> -  /// and types must exactly match those of the original return values of
> -  /// the node), or leaves Results empty, which indicates that the node
> is not
> -  /// to be custom lowered after all.
> -  /// The default implementation calls LowerOperation.
> -  virtual void LowerOperationWrapper(SDNode *N,
> -                                     SmallVectorImpl<SDValue> &Results,
> -                                     SelectionDAG &DAG) const;
> +  /// isZExtFree - Return true if zero-extending the specific node Val to
> type
> +  /// VT2 is free (either because it's implicitly zero-extended such as
> ARM
> +  /// ldrb / ldrh or because it's folded such as X86 zero-extending
> loads).
> +  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
> +    return isZExtFree(Val.getValueType(), VT2);
> +  }
>
> -  /// LowerOperation - This callback is invoked for operations that are
> -  /// unsupported by the target, which are registered to use 'custom'
> lowering,
> -  /// and whose defined values are all legal.
> -  /// If the target has no operations that require custom lowering, it
> need not
> -  /// implement this.  The default implementation of this aborts.
> -  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
> +  /// isFNegFree - Return true if an fneg operation is free to the point
> where
> +  /// it is never worthwhile to replace it with a bitwise operation.
> +  virtual bool isFNegFree(EVT) const {
> +    return false;
> +  }
>
> -  /// ReplaceNodeResults - This callback is invoked when a node result
> type is
> -  /// illegal for the target, and the operation was registered to use
> 'custom'
> -  /// lowering for that result type.  The target places new result values
> for
> -  /// the node in Results (their number and types must exactly match
> those of
> -  /// the original return values of the node), or leaves Results empty,
> which
> -  /// indicates that the node is not to be custom lowered after all.
> -  ///
> -  /// If the target has no operations that require custom lowering, it
> need not
> -  /// implement this.  The default implementation aborts.
> -  virtual void ReplaceNodeResults(SDNode * /*N*/,
> -                                  SmallVectorImpl<SDValue> &/*Results*/,
> -                                  SelectionDAG &/*DAG*/) const {
> -    llvm_unreachable("ReplaceNodeResults not implemented for this
> target!");
> +  /// isFAbsFree - Return true if an fneg operation is free to the point
> where
> +  /// it is never worthwhile to replace it with a bitwise operation.
> +  virtual bool isFAbsFree(EVT) const {
> +    return false;
>    }
>
> -  /// getTargetNodeName() - This method returns the name of a target
> specific
> -  /// DAG node.
> -  virtual const char *getTargetNodeName(unsigned Opcode) const;
> +  /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is
> faster than
> +  /// a pair of mul and add instructions. fmuladd intrinsics will be
> expanded to
> +  /// FMAs when this method returns true (and FMAs are legal), otherwise
> fmuladd
> +  /// is expanded to mul + add.
> +  virtual bool isFMAFasterThanMulAndAdd(EVT) const {
> +    return false;
> +  }
>
> -  /// createFastISel - This method returns a target specific FastISel
> object,
> -  /// or null if the target does not support "fast" ISel.
> -  virtual FastISel *createFastISel(FunctionLoweringInfo &,
> -                                   const TargetLibraryInfo *) const {
> -    return 0;
> +  /// isNarrowingProfitable - Return true if it's profitable to narrow
> +  /// operations of type VT1 to VT2. e.g. on x86, it's profitable to
> narrow
> +  /// from i32 to i8 but not from i32 to i16.
> +  virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
> +    return false;
>    }
>
>
>  //===--------------------------------------------------------------------===//
> -  // Inline Asm Support hooks
> +  // Runtime Library hooks
>    //
>
> -  /// ExpandInlineAsm - This hook allows the target to expand an inline
> asm
> -  /// call to be explicit llvm code if it wants to.  This is useful for
> -  /// turning simple inline asms into LLVM intrinsics, which gives the
> -  /// compiler more information about the behavior of the code.
> -  virtual bool ExpandInlineAsm(CallInst *) const {
> -    return false;
> +  /// setLibcallName - Rename the default libcall routine name for the
> specified
> +  /// libcall.
> +  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
> +    LibcallRoutineNames[Call] = Name;
>    }
>
> -  enum ConstraintType {
> -    C_Register,            // Constraint represents specific register(s).
> -    C_RegisterClass,       // Constraint represents any of register(s) in
> class.
> -    C_Memory,              // Memory constraint.
> -    C_Other,               // Something else.
> -    C_Unknown              // Unsupported constraint.
> -  };
> +  /// getLibcallName - Get the libcall routine name for the specified
> libcall.
> +  ///
> +  const char *getLibcallName(RTLIB::Libcall Call) const {
> +    return LibcallRoutineNames[Call];
> +  }
>
> -  enum ConstraintWeight {
> -    // Generic weights.
> -    CW_Invalid  = -1,     // No match.
> -    CW_Okay     = 0,      // Acceptable.
> -    CW_Good     = 1,      // Good weight.
> -    CW_Better   = 2,      // Better weight.
> -    CW_Best     = 3,      // Best weight.
> -
> -    // Well-known weights.
> -    CW_SpecificReg  = CW_Okay,    // Specific register operands.
> -    CW_Register     = CW_Good,    // Register operands.
> -    CW_Memory       = CW_Better,  // Memory operands.
> -    CW_Constant     = CW_Best,    // Constant operand.
> -    CW_Default      = CW_Okay     // Default or don't know type.
> -  };
> +  /// setCmpLibcallCC - Override the default CondCode to be used to test
> the
> +  /// result of the comparison libcall against zero.
> +  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
> +    CmpLibcallCCs[Call] = CC;
> +  }
>
> -  /// AsmOperandInfo - This contains information for each constraint that
> we are
> -  /// lowering.
> -  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
> -    /// ConstraintCode - This contains the actual string for the code,
> like "m".
> -    /// TargetLowering picks the 'best' code from ConstraintInfo::Codes
> that
> -    /// most closely matches the operand.
> -    std::string ConstraintCode;
> +  /// getCmpLibcallCC - Get the CondCode that's to be used to test the
> result of
> +  /// the comparison libcall against zero.
> +  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
> +    return CmpLibcallCCs[Call];
> +  }
>
> -    /// ConstraintType - Information about the constraint code, e.g.
> Register,
> -    /// RegisterClass, Memory, Other, Unknown.
> -    TargetLowering::ConstraintType ConstraintType;
> +  /// setLibcallCallingConv - Set the CallingConv that should be used for
> the
> +  /// specified libcall.
> +  void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
> +    LibcallCallingConvs[Call] = CC;
> +  }
>
> -    /// CallOperandval - If this is the result output operand or a
> -    /// clobber, this is null, otherwise it is the incoming operand to the
> -    /// CallInst.  This gets modified as the asm is processed.
> -    Value *CallOperandVal;
> +  /// getLibcallCallingConv - Get the CallingConv that should be used for
> the
> +  /// specified libcall.
> +  CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
> +    return LibcallCallingConvs[Call];
> +  }
>
> -    /// ConstraintVT - The ValueType for the operand value.
> -    MVT ConstraintVT;
> +private:
> +  const TargetMachine &TM;
> +  const DataLayout *TD;
> +  const TargetLoweringObjectFile &TLOF;
>
> -    /// isMatchingInputConstraint - Return true of this is an input
> operand that
> -    /// is a matching constraint like "4".
> -    bool isMatchingInputConstraint() const;
> +  /// PointerTy - The type to use for pointers for the default address
> space,
> +  /// usually i32 or i64.
> +  ///
> +  MVT PointerTy;
>
> -    /// getMatchedOperand - If this is an input matching constraint, this
> method
> -    /// returns the output operand it matches.
> -    unsigned getMatchedOperand() const;
> +  /// IsLittleEndian - True if this is a little endian target.
> +  ///
> +  bool IsLittleEndian;
>
> -    /// Copy constructor for copying from an AsmOperandInfo.
> -    AsmOperandInfo(const AsmOperandInfo &info)
> -      : InlineAsm::ConstraintInfo(info),
> -        ConstraintCode(info.ConstraintCode),
> -        ConstraintType(info.ConstraintType),
> -        CallOperandVal(info.CallOperandVal),
> -        ConstraintVT(info.ConstraintVT) {
> -    }
> +  /// SelectIsExpensive - Tells the code generator not to expand
> operations
> +  /// into sequences that use the select operations if possible.
> +  bool SelectIsExpensive;
>
> -    /// Copy constructor for copying from a ConstraintInfo.
> -    AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
> -      : InlineAsm::ConstraintInfo(info),
> -        ConstraintType(TargetLowering::C_Unknown),
> -        CallOperandVal(0), ConstraintVT(MVT::Other) {
> -    }
> -  };
> +  /// IntDivIsCheap - Tells the code generator not to expand integer
> divides by
> +  /// constants into a sequence of muls, adds, and shifts.  This is a
> hack until
> +  /// a real cost model is in place.  If we ever optimize for size, this
> will be
> +  /// set to true unconditionally.
> +  bool IntDivIsCheap;
>
> -  typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
> +  /// BypassSlowDivMap - Tells the code generator to bypass slow divide or
> +  /// remainder instructions. For example, BypassSlowDivWidths[32,8]
> tells the
> +  /// code generator to bypass 32-bit integer div/rem with an 8-bit
> unsigned
> +  /// integer div/rem when the operands are positive and less than 256.
> +  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
>
> -  /// ParseConstraints - Split up the constraint string from the inline
> -  /// assembly value into the specific constraints and their prefixes,
> -  /// and also tie in the associated operand values.
> -  /// If this returns an empty vector, and if the constraint string itself
> -  /// isn't empty, there was an error parsing.
> -  virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS)
> const;
> +  /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
> +  /// srl/add/sra for a signed divide by power of two, and let the target
> handle
> +  /// it.
> +  bool Pow2DivIsCheap;
>
> -  /// Examine constraint type and operand type and determine a weight
> value.
> -  /// The operand object must already have been set up with the operand
> type.
> -  virtual ConstraintWeight getMultipleConstraintMatchWeight(
> -      AsmOperandInfo &info, int maIndex) const;
> +  /// JumpIsExpensive - Tells the code generator that it shouldn't
> generate
> +  /// extra flow control instructions and should attempt to combine flow
> +  /// control instructions via predication.
> +  bool JumpIsExpensive;
>
> -  /// Examine constraint string and operand type and determine a weight
> value.
> -  /// The operand object must already have been set up with the operand
> type.
> -  virtual ConstraintWeight getSingleConstraintMatchWeight(
> -      AsmOperandInfo &info, const char *constraint) const;
> +  /// UseUnderscoreSetJmp - This target prefers to use _setjmp to
> implement
> +  /// llvm.setjmp.  Defaults to false.
> +  bool UseUnderscoreSetJmp;
>
> -  /// ComputeConstraintToUse - Determines the constraint code and
> constraint
> -  /// type to use for the specific AsmOperandInfo, setting
> -  /// OpInfo.ConstraintCode and OpInfo.ConstraintType.  If the actual
> operand
> -  /// being passed in is available, it can be passed in as Op, otherwise
> an
> -  /// empty SDValue can be passed.
> -  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
> -                                      SDValue Op,
> -                                      SelectionDAG *DAG = 0) const;
> +  /// UseUnderscoreLongJmp - This target prefers to use _longjmp to
> implement
> +  /// llvm.longjmp.  Defaults to false.
> +  bool UseUnderscoreLongJmp;
>
> -  /// getConstraintType - Given a constraint, return the type of
> constraint it
> -  /// is for this target.
> -  virtual ConstraintType getConstraintType(const std::string &Constraint)
> const;
> +  /// SupportJumpTables - Whether the target can generate code for
> jumptables.
> +  /// If it's not true, then each jumptable must be lowered into
> if-then-else's.
> +  bool SupportJumpTables;
>
> -  /// getRegForInlineAsmConstraint - Given a physical register constraint
> (e.g.
> -  /// {edx}), return the register number and the register class for the
> -  /// register.
> -  ///
> -  /// Given a register class constraint, like 'r', if this corresponds
> directly
> -  /// to an LLVM register class, return a register of 0 and the register
> class
> -  /// pointer.
> -  ///
> -  /// This should only be used for C_Register constraints.  On error,
> -  /// this returns a register number of 0 and a null register class
> pointer..
> -  virtual std::pair<unsigned, const TargetRegisterClass*>
> -    getRegForInlineAsmConstraint(const std::string &Constraint,
> -                                 EVT VT) const;
> +  /// MinimumJumpTableEntries - Number of blocks threshold to use jump
> tables.
> +  int MinimumJumpTableEntries;
>
> -  /// LowerXConstraint - try to replace an X constraint, which matches
> anything,
> -  /// with another that has more specific requirements based on the type
> of the
> -  /// corresponding operand.  This returns null if there is no
> replacement to
> -  /// make.
> -  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
> +  /// BooleanContents - Information about the contents of the high-bits in
> +  /// boolean values held in a type wider than i1.  See
> getBooleanContents.
> +  BooleanContent BooleanContents;
> +  /// BooleanVectorContents - Information about the contents of the
> high-bits
> +  /// in boolean vector values when the element type is wider than i1.
>  See
> +  /// getBooleanContents.
> +  BooleanContent BooleanVectorContents;
>
> -  /// LowerAsmOperandForConstraint - Lower the specified operand into the
> Ops
> -  /// vector.  If it is invalid, don't add anything to Ops.
> -  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string
> &Constraint,
> -                                            std::vector<SDValue> &Ops,
> -                                            SelectionDAG &DAG) const;
> +  /// SchedPreferenceInfo - The target scheduling preference: shortest
> possible
> +  /// total cycles or lowest register usage.
> +  Sched::Preference SchedPreferenceInfo;
>
> -
>  //===--------------------------------------------------------------------===//
> -  // Instruction Emitting Hooks
> -  //
> +  /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
> +  unsigned JumpBufSize;
>
> -  // EmitInstrWithCustomInserter - This method should be implemented by
> targets
> -  // that mark instructions with the 'usesCustomInserter' flag.  These
> -  // instructions are special in various ways, which require special
> support to
> -  // insert.  The specified MachineInstr is created but not inserted into
> any
> -  // basic blocks, and this method is called to expand it into a sequence
> of
> -  // instructions, potentially also creating new basic blocks and control
> flow.
> -  virtual MachineBasicBlock *
> -    EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB)
> const;
> +  /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
> +  /// buffers
> +  unsigned JumpBufAlignment;
>
> -  /// AdjustInstrPostInstrSelection - This method should be implemented by
> -  /// targets that mark instructions with the 'hasPostISelHook' flag.
> These
> -  /// instructions must be adjusted after instruction selection by target
> hooks.
> -  /// e.g. To fill in optional defs for ARM 's' setting instructions.
> -  virtual void
> -  AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
> +  /// MinStackArgumentAlignment - The minimum alignment that any argument
> +  /// on the stack needs to have.
> +  ///
> +  unsigned MinStackArgumentAlignment;
>
> -
>  //===--------------------------------------------------------------------===//
> -  // Addressing mode description hooks (used by LSR etc).
> -  //
> +  /// MinFunctionAlignment - The minimum function alignment (used when
> +  /// optimizing for size, and to prevent explicitly provided alignment
> +  /// from leading to incorrect code).
> +  ///
> +  unsigned MinFunctionAlignment;
>
> -  /// GetAddrModeArguments - CodeGenPrepare sinks address calculations
> into the
> -  /// same BB as Load/Store instructions reading the address.  This
> allows as
> -  /// much computation as possible to be done in the address mode for that
> -  /// operand.  This hook lets targets also pass back when this should be
> done
> -  /// on intrinsics which load/store.
> -  virtual bool GetAddrModeArguments(IntrinsicInst *I,
> -                                    SmallVectorImpl<Value*> &Ops,
> -                                    Type *&AccessTy) const {
> -    return false;
> -  }
> +  /// PrefFunctionAlignment - The preferred function alignment (used when
> +  /// alignment unspecified and optimizing for speed).
> +  ///
> +  unsigned PrefFunctionAlignment;
>
> -  /// AddrMode - This represents an addressing mode of:
> -  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
> -  /// If BaseGV is null,  there is no BaseGV.
> -  /// If BaseOffs is zero, there is no base offset.
> -  /// If HasBaseReg is false, there is no base register.
> -  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg
> with
> -  /// no scale.
> +  /// PrefLoopAlignment - The preferred loop alignment.
>    ///
> -  struct AddrMode {
> -    GlobalValue *BaseGV;
> -    int64_t      BaseOffs;
> -    bool         HasBaseReg;
> -    int64_t      Scale;
> -    AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
> -  };
> +  unsigned PrefLoopAlignment;
>
> -  /// isLegalAddressingMode - Return true if the addressing mode
> represented by
> -  /// AM is legal for this target, for a load/store of the specified type.
> -  /// The type may be VoidTy, in which case only return true if the
> addressing
> -  /// mode is legal for a load/store of any legal type.
> -  /// TODO: Handle pre/postinc as well.
> -  virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
> +  /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions
> should
> +  /// be folded into the enclosed atomic intrinsic instruction by the
> +  /// combiner.
> +  bool ShouldFoldAtomicFences;
>
> -  /// isLegalICmpImmediate - Return true if the specified immediate is
> legal
> -  /// icmp immediate, that is the target has icmp instructions which can
> compare
> -  /// a register against the immediate without having to materialize the
> -  /// immediate into a register.
> -  virtual bool isLegalICmpImmediate(int64_t) const {
> -    return true;
> -  }
> -
> -  /// isLegalAddImmediate - Return true if the specified immediate is
> legal
> -  /// add immediate, that is the target has add instructions which can add
> -  /// a register with the immediate without having to materialize the
> -  /// immediate into a register.
> -  virtual bool isLegalAddImmediate(int64_t) const {
> -    return true;
> -  }
> -
> -  /// isTruncateFree - Return true if it's free to truncate a value of
> -  /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value
> in
> -  /// register EAX to i16 by referencing its sub-register AX.
> -  virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
> -    return false;
> -  }
> -
> -  virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
> -    return false;
> -  }
> -
> -  /// isZExtFree - Return true if any actual instruction that defines a
> -  /// value of type Ty1 implicitly zero-extends the value to Ty2 in the
> result
> -  /// register. This does not necessarily include registers defined in
> -  /// unknown ways, such as incoming arguments, or copies from unknown
> -  /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
> -  /// does not necessarily apply to truncate instructions. e.g. on x86-64,
> -  /// all instructions that define 32-bit values implicit zero-extend the
> -  /// result out to 64 bits.
> -  virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
> -    return false;
> -  }
> -
> -  virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
> -    return false;
> -  }
> -
> -  /// isZExtFree - Return true if zero-extending the specific node Val to
> type
> -  /// VT2 is free (either because it's implicitly zero-extended such as
> ARM
> -  /// ldrb / ldrh or because it's folded such as X86 zero-extending
> loads).
> -  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
> -    return isZExtFree(Val.getValueType(), VT2);
> -  }
> -
> -  /// isFNegFree - Return true if an fneg operation is free to the point
> where
> -  /// it is never worthwhile to replace it with a bitwise operation.
> -  virtual bool isFNegFree(EVT) const {
> -    return false;
> -  }
> -
> -  /// isFAbsFree - Return true if an fneg operation is free to the point
> where
> -  /// it is never worthwhile to replace it with a bitwise operation.
> -  virtual bool isFAbsFree(EVT) const {
> -    return false;
> -  }
> -
> -  /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is
> faster than
> -  /// a pair of mul and add instructions. fmuladd intrinsics will be
> expanded to
> -  /// FMAs when this method returns true (and FMAs are legal), otherwise
> fmuladd
> -  /// is expanded to mul + add.
> -  virtual bool isFMAFasterThanMulAndAdd(EVT) const {
> -    return false;
> -  }
> -
> -  /// isNarrowingProfitable - Return true if it's profitable to narrow
> -  /// operations of type VT1 to VT2. e.g. on x86, it's profitable to
> narrow
> -  /// from i32 to i8 but not from i32 to i16.
> -  virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
> -    return false;
> -  }
> -
> -
>  //===--------------------------------------------------------------------===//
> -  // Div utility functions
> -  //
> -  SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl,
> -                         SelectionDAG &DAG) const;
> -  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool
> IsAfterLegalization,
> -                      std::vector<SDNode*> *Created) const;
> -  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool
> IsAfterLegalization,
> -                      std::vector<SDNode*> *Created) const;
> -
> -
> -
>  //===--------------------------------------------------------------------===//
> -  // Runtime Library hooks
> -  //
> -
> -  /// setLibcallName - Rename the default libcall routine name for the
> specified
> -  /// libcall.
> -  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
> -    LibcallRoutineNames[Call] = Name;
> -  }
> -
> -  /// getLibcallName - Get the libcall routine name for the specified
> libcall.
> -  ///
> -  const char *getLibcallName(RTLIB::Libcall Call) const {
> -    return LibcallRoutineNames[Call];
> -  }
> -
> -  /// setCmpLibcallCC - Override the default CondCode to be used to test
> the
> -  /// result of the comparison libcall against zero.
> -  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
> -    CmpLibcallCCs[Call] = CC;
> -  }
> -
> -  /// getCmpLibcallCC - Get the CondCode that's to be used to test the
> result of
> -  /// the comparison libcall against zero.
> -  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
> -    return CmpLibcallCCs[Call];
> -  }
> -
> -  /// setLibcallCallingConv - Set the CallingConv that should be used for
> the
> -  /// specified libcall.
> -  void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
> -    LibcallCallingConvs[Call] = CC;
> -  }
> -
> -  /// getLibcallCallingConv - Get the CallingConv that should be used for
> the
> -  /// specified libcall.
> -  CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
> -    return LibcallCallingConvs[Call];
> -  }
> -
> -  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
> -                            SDValue &Chain) const;
> -
> -  void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
> -                           SDValue &NewLHS, SDValue &NewRHS,
> -                           ISD::CondCode &CCCode, DebugLoc DL) const;
> -
> -  SDValue makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
> -                      const SDValue *Ops, unsigned NumOps,
> -                      bool isSigned, DebugLoc dl) const;
> -
> -private:
> -  const TargetMachine &TM;
> -  const DataLayout *TD;
> -  const TargetLoweringObjectFile &TLOF;
> -
> -  /// PointerTy - The type to use for pointers for the default address
> space,
> -  /// usually i32 or i64.
> -  ///
> -  MVT PointerTy;
> -
> -  /// IsLittleEndian - True if this is a little endian target.
> -  ///
> -  bool IsLittleEndian;
> -
> -  /// SelectIsExpensive - Tells the code generator not to expand
> operations
> -  /// into sequences that use the select operations if possible.
> -  bool SelectIsExpensive;
> -
> -  /// IntDivIsCheap - Tells the code generator not to expand integer
> divides by
> -  /// constants into a sequence of muls, adds, and shifts.  This is a
> hack until
> -  /// a real cost model is in place.  If we ever optimize for size, this
> will be
> -  /// set to true unconditionally.
> -  bool IntDivIsCheap;
> -
> -  /// BypassSlowDivMap - Tells the code generator to bypass slow divide or
> -  /// remainder instructions. For example, BypassSlowDivWidths[32,8]
> tells the
> -  /// code generator to bypass 32-bit integer div/rem with an 8-bit
> unsigned
> -  /// integer div/rem when the operands are positive and less than 256.
> -  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
> -
> -  /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
> -  /// srl/add/sra for a signed divide by power of two, and let the target
> handle
> -  /// it.
> -  bool Pow2DivIsCheap;
> -
> -  /// JumpIsExpensive - Tells the code generator that it shouldn't
> generate
> -  /// extra flow control instructions and should attempt to combine flow
> -  /// control instructions via predication.
> -  bool JumpIsExpensive;
> -
> -  /// UseUnderscoreSetJmp - This target prefers to use _setjmp to
> implement
> -  /// llvm.setjmp.  Defaults to false.
> -  bool UseUnderscoreSetJmp;
> -
> -  /// UseUnderscoreLongJmp - This target prefers to use _longjmp to
> implement
> -  /// llvm.longjmp.  Defaults to false.
> -  bool UseUnderscoreLongJmp;
> -
> -  /// SupportJumpTables - Whether the target can generate code for
> jumptables.
> -  /// If it's not true, then each jumptable must be lowered into
> if-then-else's.
> -  bool SupportJumpTables;
> -
> -  /// MinimumJumpTableEntries - Number of blocks threshold to use jump
> tables.
> -  int MinimumJumpTableEntries;
> -
> -  /// BooleanContents - Information about the contents of the high-bits in
> -  /// boolean values held in a type wider than i1.  See
> getBooleanContents.
> -  BooleanContent BooleanContents;
> -  /// BooleanVectorContents - Information about the contents of the
> high-bits
> -  /// in boolean vector values when the element type is wider than i1.
>  See
> -  /// getBooleanContents.
> -  BooleanContent BooleanVectorContents;
> -
> -  /// SchedPreferenceInfo - The target scheduling preference: shortest
> possible
> -  /// total cycles or lowest register usage.
> -  Sched::Preference SchedPreferenceInfo;
> -
> -  /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
> -  unsigned JumpBufSize;
> -
> -  /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
> -  /// buffers
> -  unsigned JumpBufAlignment;
> -
> -  /// MinStackArgumentAlignment - The minimum alignment that any argument
> -  /// on the stack needs to have.
> -  ///
> -  unsigned MinStackArgumentAlignment;
> -
> -  /// MinFunctionAlignment - The minimum function alignment (used when
> -  /// optimizing for size, and to prevent explicitly provided alignment
> -  /// from leading to incorrect code).
> -  ///
> -  unsigned MinFunctionAlignment;
> -
> -  /// PrefFunctionAlignment - The preferred function alignment (used when
> -  /// alignment unspecified and optimizing for speed).
> -  ///
> -  unsigned PrefFunctionAlignment;
> -
> -  /// PrefLoopAlignment - The preferred loop alignment.
> -  ///
> -  unsigned PrefLoopAlignment;
> -
> -  /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions
> should
> -  /// be folded into the enclosed atomic intrinsic instruction by the
> -  /// combiner.
> -  bool ShouldFoldAtomicFences;
> -
> -  /// InsertFencesForAtomic - Whether the DAG builder should automatically
> -  /// insert fences and reduce ordering for atomics.  (This will be set
> for
> -  /// for most architectures with weak memory ordering.)
> -  bool InsertFencesForAtomic;
> +  /// InsertFencesForAtomic - Whether the DAG builder should automatically
> +  /// insert fences and reduce ordering for atomics.  (This will be set
> for
> +  /// for most architectures with weak memory ordering.)
> +  bool InsertFencesForAtomic;
>
>    /// StackPointerRegisterToSaveRestore - If set to a physical register,
> this
>    /// specifies the register that llvm.savestack/llvm.restorestack should
> save
> @@ -2246,12 +1643,627 @@
>    /// more expensive than a branch if the branch is usually predicted
> right.
>    bool predictableSelectIsExpensive;
>
> -private:
> +protected:
>    /// isLegalRC - Return true if the value types that can be represented
> by the
>    /// specified register class are all legal.
>    bool isLegalRC(const TargetRegisterClass *RC) const;
>  };
>
>
> +//===----------------------------------------------------------------------===//
> +/// TargetLowering - This class defines information used to lower LLVM
> code to
> +/// legal SelectionDAG operators that the target instruction selector can
> accept
> +/// natively.
> +///
> +/// This class also defines callbacks that targets must implement to lower
> +/// target-specific constructs to SelectionDAG operators.
> +///
> +class TargetLowering : public TargetLoweringBase {
> +  TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
> +  void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
> +
> +public:
> +  /// NOTE: The constructor takes ownership of TLOF.
> +  explicit TargetLowering(const TargetMachine &TM,
> +                          const TargetLoweringObjectFile *TLOF);
> +
> +  /// getPreIndexedAddressParts - returns true by value, base pointer and
> +  /// offset pointer and addressing mode by reference if the node's
> address
> +  /// can be legally represented as pre-indexed load / store address.
> +  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue
> &/*Base*/,
> +                                         SDValue &/*Offset*/,
> +                                         ISD::MemIndexedMode &/*AM*/,
> +                                         SelectionDAG &/*DAG*/) const {
> +    return false;
> +  }
> +
> +  /// getPostIndexedAddressParts - returns true by value, base pointer and
> +  /// offset pointer and addressing mode by reference if this node can be
> +  /// combined with a load / store to form a post-indexed load / store.
> +  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
> +                                          SDValue &/*Base*/, SDValue
> &/*Offset*/,
> +                                          ISD::MemIndexedMode &/*AM*/,
> +                                          SelectionDAG &/*DAG*/) const {
> +    return false;
> +  }
> +
> +  /// getJumpTableEncoding - Return the entry encoding for a jump table
> in the
> +  /// current function.  The returned value is a member of the
> +  /// MachineJumpTableInfo::JTEntryKind enum.
> +  virtual unsigned getJumpTableEncoding() const;
> +
> +  virtual const MCExpr *
> +  LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
> +                            const MachineBasicBlock * /*MBB*/, unsigned
> /*uid*/,
> +                            MCContext &/*Ctx*/) const {
> +    llvm_unreachable("Need to implement this hook if target has custom
> JTIs");
> +  }
> +
> +  /// getPICJumpTableRelocaBase - Returns relocation base for the given
> PIC
> +  /// jumptable.
> +  virtual SDValue getPICJumpTableRelocBase(SDValue Table,
> +                                           SelectionDAG &DAG) const;
> +
> +  /// getPICJumpTableRelocBaseExpr - This returns the relocation base for
> the
> +  /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
> +  /// MCExpr.
> +  virtual const MCExpr *
> +  getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
> +                               unsigned JTI, MCContext &Ctx) const;
> +
> +  /// isOffsetFoldingLegal - Return true if folding a constant offset
> +  /// with the given GlobalAddress is legal.  It is frequently not legal
> in
> +  /// PIC relocation models.
> +  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
> +
> +  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
> +                            SDValue &Chain) const;
> +
> +  void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
> +                           SDValue &NewLHS, SDValue &NewRHS,
> +                           ISD::CondCode &CCCode, DebugLoc DL) const;
> +
> +  SDValue makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
> +                      const SDValue *Ops, unsigned NumOps,
> +                      bool isSigned, DebugLoc dl) const;
> +
> +
>  //===--------------------------------------------------------------------===//
> +  // TargetLowering Optimization Methods
> +  //
> +
> +  /// TargetLoweringOpt - A convenience struct that encapsulates a DAG,
> and two
> +  /// SDValues for returning information from TargetLowering to its
> clients
> +  /// that want to combine
> +  struct TargetLoweringOpt {
> +    SelectionDAG &DAG;
> +    bool LegalTys;
> +    bool LegalOps;
> +    SDValue Old;
> +    SDValue New;
> +
> +    explicit TargetLoweringOpt(SelectionDAG &InDAG,
> +                               bool LT, bool LO) :
> +      DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
> +
> +    bool LegalTypes() const { return LegalTys; }
> +    bool LegalOperations() const { return LegalOps; }
> +
> +    bool CombineTo(SDValue O, SDValue N) {
> +      Old = O;
> +      New = N;
> +      return true;
> +    }
> +
> +    /// ShrinkDemandedConstant - Check to see if the specified operand of
> the
> +    /// specified instruction is a constant integer.  If so, check to see
> if
> +    /// there are any bits set in the constant that are not demanded.  If
> so,
> +    /// shrink the constant and return true.
> +    bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
> +
> +    /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if
> the
> +    /// casts are free.  This uses isZExtFree and ZERO_EXTEND for the
> widening
> +    /// cast, but it could be generalized for targets with other types of
> +    /// implicit widening casts.
> +    bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt
> &Demanded,
> +                          DebugLoc dl);
> +  };
> +
> +  /// SimplifyDemandedBits - Look at Op.  At this point, we know that
> only the
> +  /// DemandedMask bits of the result of Op are ever used downstream.  If
> we can
> +  /// use this information to simplify Op, create a new simplified DAG
> node and
> +  /// return true, returning the original and new nodes in Old and New.
> +  /// Otherwise, analyze the expression and return a mask of KnownOne and
> +  /// KnownZero bits for the expression (used to simplify the caller).
> +  /// The KnownZero/One bits may only be accurate for those bits in the
> +  /// DemandedMask.
> +  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
> +                            APInt &KnownZero, APInt &KnownOne,
> +                            TargetLoweringOpt &TLO, unsigned Depth = 0)
> const;
> +
> +  /// computeMaskedBitsForTargetNode - Determine which of the bits
> specified in
> +  /// Mask are known to be either zero or one and return them in the
> +  /// KnownZero/KnownOne bitsets.
> +  virtual void computeMaskedBitsForTargetNode(const SDValue Op,
> +                                              APInt &KnownZero,
> +                                              APInt &KnownOne,
> +                                              const SelectionDAG &DAG,
> +                                              unsigned Depth = 0) const;
> +
> +  /// ComputeNumSignBitsForTargetNode - This method can be implemented by
> +  /// targets that want to expose additional information about sign bits
> to the
> +  /// DAG Combiner.
> +  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
> +                                                   unsigned Depth = 0)
> const;
> +
> +  struct DAGCombinerInfo {
> +    void *DC;  // The DAG Combiner object.
> +    CombineLevel Level;
> +    bool CalledByLegalizer;
> +  public:
> +    SelectionDAG &DAG;
> +
> +    DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void
> *dc)
> +      : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
> +
> +    bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
> +    bool isBeforeLegalizeOps() const { return Level <
> AfterLegalizeVectorOps; }
> +    bool isAfterLegalizeVectorOps() const {
> +      return Level == AfterLegalizeDAG;
> +    }
> +    CombineLevel getDAGCombineLevel() { return Level; }
> +    bool isCalledByLegalizer() const { return CalledByLegalizer; }
> +
> +    void AddToWorklist(SDNode *N);
> +    void RemoveFromWorklist(SDNode *N);
> +    SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
> +                      bool AddTo = true);
> +    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
> +    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo =
> true);
> +
> +    void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
> +  };
> +
> +  /// SimplifySetCC - Try to simplify a setcc built with the specified
> operands
> +  /// and cc. If it is unable to simplify it, return a null SDValue.
> +  SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
> +                          ISD::CondCode Cond, bool foldBooleans,
> +                          DAGCombinerInfo &DCI, DebugLoc dl) const;
> +
> +  /// isGAPlusOffset - Returns true (and the GlobalValue and the offset)
> if the
> +  /// node is a GlobalAddress + offset.
> +  virtual bool
> +  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset)
> const;
> +
> +  /// PerformDAGCombine - This method will be invoked for all target
> nodes and
> +  /// for any target-independent nodes that the target has registered with
> +  /// invoke it for.
> +  ///
> +  /// The semantics are as follows:
> +  /// Return Value:
> +  ///   SDValue.Val == 0   - No change was made
> +  ///   SDValue.Val == N   - N was replaced, is dead, and is already
> handled.
> +  ///   otherwise          - N should be replaced by the returned Operand.
> +  ///
> +  /// In addition, methods provided by DAGCombinerInfo may be used to
> perform
> +  /// more complex transformations.
> +  ///
> +  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
> const;
> +
> +  /// isTypeDesirableForOp - Return true if the target has native support
> for
> +  /// the specified value type and it is 'desirable' to use the type for
> the
> +  /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
> +  /// instruction encodings are longer and some i16 instructions are slow.
> +  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
> +    // By default, assume all legal types are desirable.
> +    return isTypeLegal(VT);
> +  }
> +
> +  /// isDesirableToPromoteOp - Return true if it is profitable for dag
> combiner
> +  /// to transform a floating point op of specified opcode to a
> equivalent op of
> +  /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM.
> +  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
> +                                                 EVT /*VT*/) const {
> +    return false;
> +  }
> +
> +  /// IsDesirableToPromoteOp - This method query the target whether it is
> +  /// beneficial for dag combiner to promote the specified node. If true,
> it
> +  /// should return the desired promotion type by reference.
> +  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const
> {
> +    return false;
> +  }
> +
> +
>  //===--------------------------------------------------------------------===//
> +  // Lowering methods - These methods must be implemented by targets so
> that
> +  // the SelectionDAGBuilder code knows how to lower these.
> +  //
> +
> +  /// LowerFormalArguments - This hook must be implemented to lower the
> +  /// incoming (formal) arguments, described by the Ins array, into the
> +  /// specified DAG. The implementation should fill in the InVals array
> +  /// with legal-type argument values, and return the resulting token
> +  /// chain value.
> +  ///
> +  virtual SDValue
> +    LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
> +                         bool /*isVarArg*/,
> +                         const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
> +                         DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
> +                         SmallVectorImpl<SDValue> &/*InVals*/) const {
> +    llvm_unreachable("Not Implemented");
> +  }
> +
> +  struct ArgListEntry {
> +    SDValue Node;
> +    Type* Ty;
> +    bool isSExt  : 1;
> +    bool isZExt  : 1;
> +    bool isInReg : 1;
> +    bool isSRet  : 1;
> +    bool isNest  : 1;
> +    bool isByVal : 1;
> +    uint16_t Alignment;
> +
> +    ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
> +      isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
> +  };
> +  typedef std::vector<ArgListEntry> ArgListTy;
> +
> +  /// CallLoweringInfo - This structure contains all information that is
> +  /// necessary for lowering calls. It is passed to TLI::LowerCallTo when
> the
> +  /// SelectionDAG builder needs to lower a call, and targets will see
> this
> +  /// struct in their LowerCall implementation.
> +  struct CallLoweringInfo {
> +    SDValue Chain;
> +    Type *RetTy;
> +    bool RetSExt           : 1;
> +    bool RetZExt           : 1;
> +    bool IsVarArg          : 1;
> +    bool IsInReg           : 1;
> +    bool DoesNotReturn     : 1;
> +    bool IsReturnValueUsed : 1;
> +
> +    // IsTailCall should be modified by implementations of
> +    // TargetLowering::LowerCall that perform tail call conversions.
> +    bool IsTailCall;
> +
> +    unsigned NumFixedArgs;
> +    CallingConv::ID CallConv;
> +    SDValue Callee;
> +    ArgListTy &Args;
> +    SelectionDAG &DAG;
> +    DebugLoc DL;
> +    ImmutableCallSite *CS;
> +    SmallVector<ISD::OutputArg, 32> Outs;
> +    SmallVector<SDValue, 32> OutVals;
> +    SmallVector<ISD::InputArg, 32> Ins;
> +
> +
> +    /// CallLoweringInfo - Constructs a call lowering context based on the
> +    /// ImmutableCallSite \p cs.
> +    CallLoweringInfo(SDValue chain, Type *retTy,
> +                     FunctionType *FTy, bool isTailCall, SDValue callee,
> +                     ArgListTy &args, SelectionDAG &dag, DebugLoc dl,
> +                     ImmutableCallSite &cs)
> +    : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0,
> Attribute::SExt)),
> +      RetZExt(cs.paramHasAttr(0, Attribute::ZExt)),
> IsVarArg(FTy->isVarArg()),
> +      IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
> +      DoesNotReturn(cs.doesNotReturn()),
> +      IsReturnValueUsed(!cs.getInstruction()->use_empty()),
> +      IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
> +      CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
> +      DL(dl), CS(&cs) {}
> +
> +    /// CallLoweringInfo - Constructs a call lowering context based on the
> +    /// provided call information.
> +    CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool
> retZExt,
> +                     bool isVarArg, bool isInReg, unsigned numFixedArgs,
> +                     CallingConv::ID callConv, bool isTailCall,
> +                     bool doesNotReturn, bool isReturnValueUsed, SDValue
> callee,
> +                     ArgListTy &args, SelectionDAG &dag, DebugLoc dl)
> +    : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
> +      IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
> +      IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
> +      NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
> +      Args(args), DAG(dag), DL(dl), CS(NULL) {}
> +  };
> +
> +  /// LowerCallTo - This function lowers an abstract call to a function
> into an
> +  /// actual call.  This returns a pair of operands.  The first element
> is the
> +  /// return value for the function (if RetTy is not VoidTy).  The second
> +  /// element is the outgoing token chain. It calls LowerCall to do the
> actual
> +  /// lowering.
> +  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
> +
> +  /// LowerCall - This hook must be implemented to lower calls into the
> +  /// the specified DAG. The outgoing arguments to the call are described
> +  /// by the Outs array, and the values to be returned by the call are
> +  /// described by the Ins array. The implementation should fill in the
> +  /// InVals array with legal-type return values from the call, and return
> +  /// the resulting token chain value.
> +  virtual SDValue
> +    LowerCall(CallLoweringInfo &/*CLI*/,
> +              SmallVectorImpl<SDValue> &/*InVals*/) const {
> +    llvm_unreachable("Not Implemented");
> +  }
> +
> +  /// HandleByVal - Target-specific cleanup for formal ByVal parameters.
> +  virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
> +
> +  /// CanLowerReturn - This hook should be implemented to check whether
> the
> +  /// return values described by the Outs array can fit into the return
> +  /// registers.  If false is returned, an sret-demotion is performed.
> +  ///
> +  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
> +                              MachineFunction &/*MF*/, bool /*isVarArg*/,
> +               const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
> +               LLVMContext &/*Context*/) const
> +  {
> +    // Return true by default to get preexisting behavior.
> +    return true;
> +  }
> +
> +  /// LowerReturn - This hook must be implemented to lower outgoing
> +  /// return values, described by the Outs array, into the specified
> +  /// DAG. The implementation should return the resulting token chain
> +  /// value.
> +  ///
> +  virtual SDValue
> +    LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
> +                bool /*isVarArg*/,
> +                const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
> +                const SmallVectorImpl<SDValue> &/*OutVals*/,
> +                DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const {
> +    llvm_unreachable("Not Implemented");
> +  }
> +
> +  /// isUsedByReturnOnly - Return true if result of the specified node is
> used
> +  /// by a return node only. It also compute and return the input chain
> for the
> +  /// tail call.
> +  /// This is used to determine whether it is possible
> +  /// to codegen a libcall as tail call at legalization time.
> +  virtual bool isUsedByReturnOnly(SDNode *, SDValue &Chain) const {
> +    return false;
> +  }
> +
> +  /// mayBeEmittedAsTailCall - Return true if the target may be able emit
> the
> +  /// call instruction as a tail call. This is used by optimization
> passes to
> +  /// determine if it's profitable to duplicate return instructions to
> enable
> +  /// tailcall optimization.
> +  virtual bool mayBeEmittedAsTailCall(CallInst *) const {
> +    return false;
> +  }
> +
> +  /// getTypeForExtArgOrReturn - Return the type that should be used to
> zero or
> +  /// sign extend a zeroext/signext integer argument or return value.
> +  /// FIXME: Most C calling convention requires the return type to be
> promoted,
> +  /// but this is not true all the time, e.g. i1 on x86-64. It is also not
> +  /// necessary for non-C calling conventions. The frontend should handle
> this
> +  /// and include all of the necessary information.
> +  virtual MVT getTypeForExtArgOrReturn(MVT VT,
> +                                       ISD::NodeType /*ExtendKind*/)
> const {
> +    MVT MinVT = getRegisterType(MVT::i32);
> +    return VT.bitsLT(MinVT) ? MinVT : VT;
> +  }
> +
> +  /// LowerOperationWrapper - This callback is invoked by the type
> legalizer
> +  /// to legalize nodes with an illegal operand type but legal result
> types.
> +  /// It replaces the LowerOperation callback in the type Legalizer.
> +  /// The reason we can not do away with LowerOperation entirely is that
> +  /// LegalizeDAG isn't yet ready to use this callback.
> +  /// TODO: Consider merging with ReplaceNodeResults.
> +
> +  /// The target places new result values for the node in Results (their
> number
> +  /// and types must exactly match those of the original return values of
> +  /// the node), or leaves Results empty, which indicates that the node
> is not
> +  /// to be custom lowered after all.
> +  /// The default implementation calls LowerOperation.
> +  virtual void LowerOperationWrapper(SDNode *N,
> +                                     SmallVectorImpl<SDValue> &Results,
> +                                     SelectionDAG &DAG) const;
> +
> +  /// LowerOperation - This callback is invoked for operations that are
> +  /// unsupported by the target, which are registered to use 'custom'
> lowering,
> +  /// and whose defined values are all legal.
> +  /// If the target has no operations that require custom lowering, it
> need not
> +  /// implement this.  The default implementation of this aborts.
> +  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
> +
> +  /// ReplaceNodeResults - This callback is invoked when a node result
> type is
> +  /// illegal for the target, and the operation was registered to use
> 'custom'
> +  /// lowering for that result type.  The target places new result values
> for
> +  /// the node in Results (their number and types must exactly match
> those of
> +  /// the original return values of the node), or leaves Results empty,
> which
> +  /// indicates that the node is not to be custom lowered after all.
> +  ///
> +  /// If the target has no operations that require custom lowering, it
> need not
> +  /// implement this.  The default implementation aborts.
> +  virtual void ReplaceNodeResults(SDNode * /*N*/,
> +                                  SmallVectorImpl<SDValue> &/*Results*/,
> +                                  SelectionDAG &/*DAG*/) const {
> +    llvm_unreachable("ReplaceNodeResults not implemented for this
> target!");
> +  }
> +
> +  /// getTargetNodeName() - This method returns the name of a target
> specific
> +  /// DAG node.
> +  virtual const char *getTargetNodeName(unsigned Opcode) const;
> +
> +  /// createFastISel - This method returns a target specific FastISel
> object,
> +  /// or null if the target does not support "fast" ISel.
> +  virtual FastISel *createFastISel(FunctionLoweringInfo &,
> +                                   const TargetLibraryInfo *) const {
> +    return 0;
> +  }
> +
> +
>  //===--------------------------------------------------------------------===//
> +  // Inline Asm Support hooks
> +  //
> +
> +  /// ExpandInlineAsm - This hook allows the target to expand an inline
> asm
> +  /// call to be explicit llvm code if it wants to.  This is useful for
> +  /// turning simple inline asms into LLVM intrinsics, which gives the
> +  /// compiler more information about the behavior of the code.
> +  virtual bool ExpandInlineAsm(CallInst *) const {
> +    return false;
> +  }
> +
> +  enum ConstraintType {
> +    C_Register,            // Constraint represents specific register(s).
> +    C_RegisterClass,       // Constraint represents any of register(s) in
> class.
> +    C_Memory,              // Memory constraint.
> +    C_Other,               // Something else.
> +    C_Unknown              // Unsupported constraint.
> +  };
> +
> +  enum ConstraintWeight {
> +    // Generic weights.
> +    CW_Invalid  = -1,     // No match.
> +    CW_Okay     = 0,      // Acceptable.
> +    CW_Good     = 1,      // Good weight.
> +    CW_Better   = 2,      // Better weight.
> +    CW_Best     = 3,      // Best weight.
> +
> +    // Well-known weights.
> +    CW_SpecificReg  = CW_Okay,    // Specific register operands.
> +    CW_Register     = CW_Good,    // Register operands.
> +    CW_Memory       = CW_Better,  // Memory operands.
> +    CW_Constant     = CW_Best,    // Constant operand.
> +    CW_Default      = CW_Okay     // Default or don't know type.
> +  };
> +
> +  /// AsmOperandInfo - This contains information for each constraint that
> we are
> +  /// lowering.
> +  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
> +    /// ConstraintCode - This contains the actual string for the code,
> like "m".
> +    /// TargetLowering picks the 'best' code from ConstraintInfo::Codes
> that
> +    /// most closely matches the operand.
> +    std::string ConstraintCode;
> +
> +    /// ConstraintType - Information about the constraint code, e.g.
> Register,
> +    /// RegisterClass, Memory, Other, Unknown.
> +    TargetLowering::ConstraintType ConstraintType;
> +
> +    /// CallOperandval - If this is the result output operand or a
> +    /// clobber, this is null, otherwise it is the incoming operand to the
> +    /// CallInst.  This gets modified as the asm is processed.
> +    Value *CallOperandVal;
> +
> +    /// ConstraintVT - The ValueType for the operand value.
> +    MVT ConstraintVT;
> +
> +    /// isMatchingInputConstraint - Return true of this is an input
> operand that
> +    /// is a matching constraint like "4".
> +    bool isMatchingInputConstraint() const;
> +
> +    /// getMatchedOperand - If this is an input matching constraint, this
> method
> +    /// returns the output operand it matches.
> +    unsigned getMatchedOperand() const;
> +
> +    /// Copy constructor for copying from an AsmOperandInfo.
> +    AsmOperandInfo(const AsmOperandInfo &info)
> +      : InlineAsm::ConstraintInfo(info),
> +        ConstraintCode(info.ConstraintCode),
> +        ConstraintType(info.ConstraintType),
> +        CallOperandVal(info.CallOperandVal),
> +        ConstraintVT(info.ConstraintVT) {
> +    }
> +
> +    /// Copy constructor for copying from a ConstraintInfo.
> +    AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
> +      : InlineAsm::ConstraintInfo(info),
> +        ConstraintType(TargetLowering::C_Unknown),
> +        CallOperandVal(0), ConstraintVT(MVT::Other) {
> +    }
> +  };
> +
> +  typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
> +
> +  /// ParseConstraints - Split up the constraint string from the inline
> +  /// assembly value into the specific constraints and their prefixes,
> +  /// and also tie in the associated operand values.
> +  /// If this returns an empty vector, and if the constraint string itself
> +  /// isn't empty, there was an error parsing.
> +  virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS)
> const;
> +
> +  /// Examine constraint type and operand type and determine a weight
> value.
> +  /// The operand object must already have been set up with the operand
> type.
> +  virtual ConstraintWeight getMultipleConstraintMatchWeight(
> +      AsmOperandInfo &info, int maIndex) const;
> +
> +  /// Examine constraint string and operand type and determine a weight
> value.
> +  /// The operand object must already have been set up with the operand
> type.
> +  virtual ConstraintWeight getSingleConstraintMatchWeight(
> +      AsmOperandInfo &info, const char *constraint) const;
> +
> +  /// ComputeConstraintToUse - Determines the constraint code and
> constraint
> +  /// type to use for the specific AsmOperandInfo, setting
> +  /// OpInfo.ConstraintCode and OpInfo.ConstraintType.  If the actual
> operand
> +  /// being passed in is available, it can be passed in as Op, otherwise
> an
> +  /// empty SDValue can be passed.
> +  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
> +                                      SDValue Op,
> +                                      SelectionDAG *DAG = 0) const;
> +
> +  /// getConstraintType - Given a constraint, return the type of
> constraint it
> +  /// is for this target.
> +  virtual ConstraintType getConstraintType(const std::string &Constraint)
> const;
> +
> +  /// getRegForInlineAsmConstraint - Given a physical register constraint
> (e.g.
> +  /// {edx}), return the register number and the register class for the
> +  /// register.
> +  ///
> +  /// Given a register class constraint, like 'r', if this corresponds
> directly
> +  /// to an LLVM register class, return a register of 0 and the register
> class
> +  /// pointer.
> +  ///
> +  /// This should only be used for C_Register constraints.  On error,
> +  /// this returns a register number of 0 and a null register class
> pointer..
> +  virtual std::pair<unsigned, const TargetRegisterClass*>
> +    getRegForInlineAsmConstraint(const std::string &Constraint,
> +                                 EVT VT) const;
> +
> +  /// LowerXConstraint - try to replace an X constraint, which matches
> anything,
> +  /// with another that has more specific requirements based on the type
> of the
> +  /// corresponding operand.  This returns null if there is no
> replacement to
> +  /// make.
> +  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
> +
> +  /// LowerAsmOperandForConstraint - Lower the specified operand into the
> Ops
> +  /// vector.  If it is invalid, don't add anything to Ops.
> +  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string
> &Constraint,
> +                                            std::vector<SDValue> &Ops,
> +                                            SelectionDAG &DAG) const;
> +
> +
>  //===--------------------------------------------------------------------===//
> +  // Div utility functions
> +  //
> +  SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl,
> +                         SelectionDAG &DAG) const;
> +  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool
> IsAfterLegalization,
> +                      std::vector<SDNode*> *Created) const;
> +  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool
> IsAfterLegalization,
> +                      std::vector<SDNode*> *Created) const;
> +
> +
>  //===--------------------------------------------------------------------===//
> +  // Instruction Emitting Hooks
> +  //
> +
> +  // EmitInstrWithCustomInserter - This method should be implemented by
> targets
> +  // that mark instructions with the 'usesCustomInserter' flag.  These
> +  // instructions are special in various ways, which require special
> support to
> +  // insert.  The specified MachineInstr is created but not inserted into
> any
> +  // basic blocks, and this method is called to expand it into a sequence
> of
> +  // instructions, potentially also creating new basic blocks and control
> flow.
> +  virtual MachineBasicBlock *
> +    EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB)
> const;
> +
> +  /// AdjustInstrPostInstrSelection - This method should be implemented by
> +  /// targets that mark instructions with the 'hasPostISelHook' flag.
> These
> +  /// instructions must be adjusted after instruction selection by target
> hooks.
> +  /// e.g. To fill in optional defs for ARM 's' setting instructions.
> +  virtual void
> +  AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
> +};
> +
>  /// GetReturnInfo - Given an LLVM IR type and return type attributes,
>  /// compute the return value EVTs and flags, and optionally also
>  /// the offsets, if the return value is being lowered to memory.
>
> Modified: llvm/trunk/lib/CodeGen/BasicTargetTransformInfo.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/BasicTargetTransformInfo.cpp?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/BasicTargetTransformInfo.cpp (original)
> +++ llvm/trunk/lib/CodeGen/BasicTargetTransformInfo.cpp Fri Jan 11
> 14:05:37 2013
> @@ -26,7 +26,7 @@
>  namespace {
>
>  class BasicTTI : public ImmutablePass, public TargetTransformInfo {
> -  const TargetLowering *TLI;
> +  const TargetLoweringBase *TLI;
>
>    /// Estimate the overhead of scalarizing an instruction. Insert and
> Extract
>    /// are set if the result needs to be inserted and/or extracted from
> vectors.
> @@ -37,7 +37,7 @@
>      llvm_unreachable("This pass cannot be directly constructed");
>    }
>
> -  BasicTTI(const TargetLowering *TLI) : ImmutablePass(ID), TLI(TLI) {
> +  BasicTTI(const TargetLoweringBase *TLI) : ImmutablePass(ID), TLI(TLI) {
>      initializeBasicTTIPass(*PassRegistry::getPassRegistry());
>    }
>
> @@ -112,7 +112,7 @@
>  char BasicTTI::ID = 0;
>
>  ImmutablePass *
> -llvm::createBasicTargetTransformInfoPass(const TargetLowering *TLI) {
> +llvm::createBasicTargetTransformInfoPass(const TargetLoweringBase *TLI) {
>    return new BasicTTI(TLI);
>  }
>
> @@ -128,7 +128,7 @@
>  bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
>                                       int64_t BaseOffset, bool HasBaseReg,
>                                       int64_t Scale) const {
> -  TargetLowering::AddrMode AM;
> +  TargetLoweringBase::AddrMode AM;
>    AM.BaseGV = BaseGV;
>    AM.BaseOffs = BaseOffset;
>    AM.HasBaseReg = HasBaseReg;
>
> Modified: llvm/trunk/lib/CodeGen/CMakeLists.txt
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CMakeLists.txt?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/CMakeLists.txt (original)
> +++ llvm/trunk/lib/CodeGen/CMakeLists.txt Fri Jan 11 14:05:37 2013
> @@ -9,8 +9,8 @@
>    CodeGen.cpp
>    CodePlacementOpt.cpp
>    CriticalAntiDepBreaker.cpp
> -  DeadMachineInstructionElim.cpp
>    DFAPacketizer.cpp
> +  DeadMachineInstructionElim.cpp
>    DwarfEHPrepare.cpp
>    EarlyIfConversion.cpp
>    EdgeBundles.cpp
> @@ -32,21 +32,20 @@
>    LiveInterval.cpp
>    LiveIntervalAnalysis.cpp
>    LiveIntervalUnion.cpp
> +  LiveRangeCalc.cpp
> +  LiveRangeEdit.cpp
>    LiveRegMatrix.cpp
>    LiveStackAnalysis.cpp
>    LiveVariables.cpp
> -  LiveRangeCalc.cpp
> -  LiveRangeEdit.cpp
>    LocalStackSlotAllocation.cpp
>    MachineBasicBlock.cpp
>    MachineBlockFrequencyInfo.cpp
>    MachineBlockPlacement.cpp
>    MachineBranchProbabilityInfo.cpp
> +  MachineCSE.cpp
>    MachineCodeEmitter.cpp
>    MachineCopyPropagation.cpp
> -  MachineCSE.cpp
>    MachineDominators.cpp
> -  MachinePostDominators.cpp
>    MachineFunction.cpp
>    MachineFunctionAnalysis.cpp
>    MachineFunctionPass.cpp
> @@ -58,6 +57,7 @@
>    MachineModuleInfo.cpp
>    MachineModuleInfoImpls.cpp
>    MachinePassRegistry.cpp
> +  MachinePostDominators.cpp
>    MachineRegisterInfo.cpp
>    MachineSSAUpdater.cpp
>    MachineScheduler.cpp
> @@ -91,16 +91,17 @@
>    ShrinkWrapping.cpp
>    SjLjEHPrepare.cpp
>    SlotIndexes.cpp
> -  Spiller.cpp
>    SpillPlacement.cpp
> +  Spiller.cpp
>    SplitKit.cpp
> +  StackColoring.cpp
>    StackProtector.cpp
>    StackSlotColoring.cpp
> -  StackColoring.cpp
>    StrongPHIElimination.cpp
>    TailDuplication.cpp
>    TargetFrameLoweringImpl.cpp
>    TargetInstrInfo.cpp
> +  TargetLoweringBase.cpp
>    TargetLoweringObjectFileImpl.cpp
>    TargetOptionsImpl.cpp
>    TargetRegisterInfo.cpp
>
> Modified: llvm/trunk/lib/CodeGen/DwarfEHPrepare.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/DwarfEHPrepare.cpp?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/DwarfEHPrepare.cpp (original)
> +++ llvm/trunk/lib/CodeGen/DwarfEHPrepare.cpp Fri Jan 11 14:05:37 2013
> @@ -33,7 +33,7 @@
>  namespace {
>    class DwarfEHPrepare : public FunctionPass {
>      const TargetMachine *TM;
> -    const TargetLowering *TLI;
> +    const TargetLoweringBase *TLI;
>
>      // RewindFunction - _Unwind_Resume or the target equivalent.
>      Constant *RewindFunction;
>
> Modified: llvm/trunk/lib/CodeGen/IfConversion.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/IfConversion.cpp?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/IfConversion.cpp (original)
> +++ llvm/trunk/lib/CodeGen/IfConversion.cpp Fri Jan 11 14:05:37 2013
> @@ -151,7 +151,7 @@
>      /// basic block number.
>      std::vector<BBInfo> BBAnalysis;
>
> -    const TargetLowering *TLI;
> +    const TargetLoweringBase *TLI;
>      const TargetInstrInfo *TII;
>      const TargetRegisterInfo *TRI;
>      const InstrItineraryData *InstrItins;
>
> Modified: llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp (original)
> +++ llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp Fri Jan 11 14:05:37
> 2013
> @@ -171,7 +171,7 @@
>    const TargetInstrInfo *TII;
>
>    /// \brief A handle to the target's lowering info.
> -  const TargetLowering *TLI;
> +  const TargetLoweringBase *TLI;
>
>    /// \brief Allocator and owner of BlockChain structures.
>    ///
>
> Modified: llvm/trunk/lib/CodeGen/MachineLICM.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineLICM.cpp?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/MachineLICM.cpp (original)
> +++ llvm/trunk/lib/CodeGen/MachineLICM.cpp Fri Jan 11 14:05:37 2013
> @@ -62,7 +62,7 @@
>    class MachineLICM : public MachineFunctionPass {
>      const TargetMachine   *TM;
>      const TargetInstrInfo *TII;
> -    const TargetLowering *TLI;
> +    const TargetLoweringBase *TLI;
>      const TargetRegisterInfo *TRI;
>      const MachineFrameInfo *MFI;
>      MachineRegisterInfo *MRI;
>
> Modified: llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
> +++ llvm/trunk/lib/CodeGen/SelectionDAG/TargetLowering.cpp Fri Jan 11
> 14:05:37 2013
> @@ -33,324 +33,6 @@
>  #include <cctype>
>  using namespace llvm;
>
> -/// InitLibcallNames - Set default libcall names.
> -///
> -static void InitLibcallNames(const char **Names) {
> -  Names[RTLIB::SHL_I16] = "__ashlhi3";
> -  Names[RTLIB::SHL_I32] = "__ashlsi3";
> -  Names[RTLIB::SHL_I64] = "__ashldi3";
> -  Names[RTLIB::SHL_I128] = "__ashlti3";
> -  Names[RTLIB::SRL_I16] = "__lshrhi3";
> -  Names[RTLIB::SRL_I32] = "__lshrsi3";
> -  Names[RTLIB::SRL_I64] = "__lshrdi3";
> -  Names[RTLIB::SRL_I128] = "__lshrti3";
> -  Names[RTLIB::SRA_I16] = "__ashrhi3";
> -  Names[RTLIB::SRA_I32] = "__ashrsi3";
> -  Names[RTLIB::SRA_I64] = "__ashrdi3";
> -  Names[RTLIB::SRA_I128] = "__ashrti3";
> -  Names[RTLIB::MUL_I8] = "__mulqi3";
> -  Names[RTLIB::MUL_I16] = "__mulhi3";
> -  Names[RTLIB::MUL_I32] = "__mulsi3";
> -  Names[RTLIB::MUL_I64] = "__muldi3";
> -  Names[RTLIB::MUL_I128] = "__multi3";
> -  Names[RTLIB::MULO_I32] = "__mulosi4";
> -  Names[RTLIB::MULO_I64] = "__mulodi4";
> -  Names[RTLIB::MULO_I128] = "__muloti4";
> -  Names[RTLIB::SDIV_I8] = "__divqi3";
> -  Names[RTLIB::SDIV_I16] = "__divhi3";
> -  Names[RTLIB::SDIV_I32] = "__divsi3";
> -  Names[RTLIB::SDIV_I64] = "__divdi3";
> -  Names[RTLIB::SDIV_I128] = "__divti3";
> -  Names[RTLIB::UDIV_I8] = "__udivqi3";
> -  Names[RTLIB::UDIV_I16] = "__udivhi3";
> -  Names[RTLIB::UDIV_I32] = "__udivsi3";
> -  Names[RTLIB::UDIV_I64] = "__udivdi3";
> -  Names[RTLIB::UDIV_I128] = "__udivti3";
> -  Names[RTLIB::SREM_I8] = "__modqi3";
> -  Names[RTLIB::SREM_I16] = "__modhi3";
> -  Names[RTLIB::SREM_I32] = "__modsi3";
> -  Names[RTLIB::SREM_I64] = "__moddi3";
> -  Names[RTLIB::SREM_I128] = "__modti3";
> -  Names[RTLIB::UREM_I8] = "__umodqi3";
> -  Names[RTLIB::UREM_I16] = "__umodhi3";
> -  Names[RTLIB::UREM_I32] = "__umodsi3";
> -  Names[RTLIB::UREM_I64] = "__umoddi3";
> -  Names[RTLIB::UREM_I128] = "__umodti3";
> -
> -  // These are generally not available.
> -  Names[RTLIB::SDIVREM_I8] = 0;
> -  Names[RTLIB::SDIVREM_I16] = 0;
> -  Names[RTLIB::SDIVREM_I32] = 0;
> -  Names[RTLIB::SDIVREM_I64] = 0;
> -  Names[RTLIB::SDIVREM_I128] = 0;
> -  Names[RTLIB::UDIVREM_I8] = 0;
> -  Names[RTLIB::UDIVREM_I16] = 0;
> -  Names[RTLIB::UDIVREM_I32] = 0;
> -  Names[RTLIB::UDIVREM_I64] = 0;
> -  Names[RTLIB::UDIVREM_I128] = 0;
> -
> -  Names[RTLIB::NEG_I32] = "__negsi2";
> -  Names[RTLIB::NEG_I64] = "__negdi2";
> -  Names[RTLIB::ADD_F32] = "__addsf3";
> -  Names[RTLIB::ADD_F64] = "__adddf3";
> -  Names[RTLIB::ADD_F80] = "__addxf3";
> -  Names[RTLIB::ADD_F128] = "__addtf3";
> -  Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
> -  Names[RTLIB::SUB_F32] = "__subsf3";
> -  Names[RTLIB::SUB_F64] = "__subdf3";
> -  Names[RTLIB::SUB_F80] = "__subxf3";
> -  Names[RTLIB::SUB_F128] = "__subtf3";
> -  Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
> -  Names[RTLIB::MUL_F32] = "__mulsf3";
> -  Names[RTLIB::MUL_F64] = "__muldf3";
> -  Names[RTLIB::MUL_F80] = "__mulxf3";
> -  Names[RTLIB::MUL_F128] = "__multf3";
> -  Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
> -  Names[RTLIB::DIV_F32] = "__divsf3";
> -  Names[RTLIB::DIV_F64] = "__divdf3";
> -  Names[RTLIB::DIV_F80] = "__divxf3";
> -  Names[RTLIB::DIV_F128] = "__divtf3";
> -  Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
> -  Names[RTLIB::REM_F32] = "fmodf";
> -  Names[RTLIB::REM_F64] = "fmod";
> -  Names[RTLIB::REM_F80] = "fmodl";
> -  Names[RTLIB::REM_F128] = "fmodl";
> -  Names[RTLIB::REM_PPCF128] = "fmodl";
> -  Names[RTLIB::FMA_F32] = "fmaf";
> -  Names[RTLIB::FMA_F64] = "fma";
> -  Names[RTLIB::FMA_F80] = "fmal";
> -  Names[RTLIB::FMA_F128] = "fmal";
> -  Names[RTLIB::FMA_PPCF128] = "fmal";
> -  Names[RTLIB::POWI_F32] = "__powisf2";
> -  Names[RTLIB::POWI_F64] = "__powidf2";
> -  Names[RTLIB::POWI_F80] = "__powixf2";
> -  Names[RTLIB::POWI_F128] = "__powitf2";
> -  Names[RTLIB::POWI_PPCF128] = "__powitf2";
> -  Names[RTLIB::SQRT_F32] = "sqrtf";
> -  Names[RTLIB::SQRT_F64] = "sqrt";
> -  Names[RTLIB::SQRT_F80] = "sqrtl";
> -  Names[RTLIB::SQRT_F128] = "sqrtl";
> -  Names[RTLIB::SQRT_PPCF128] = "sqrtl";
> -  Names[RTLIB::LOG_F32] = "logf";
> -  Names[RTLIB::LOG_F64] = "log";
> -  Names[RTLIB::LOG_F80] = "logl";
> -  Names[RTLIB::LOG_F128] = "logl";
> -  Names[RTLIB::LOG_PPCF128] = "logl";
> -  Names[RTLIB::LOG2_F32] = "log2f";
> -  Names[RTLIB::LOG2_F64] = "log2";
> -  Names[RTLIB::LOG2_F80] = "log2l";
> -  Names[RTLIB::LOG2_F128] = "log2l";
> -  Names[RTLIB::LOG2_PPCF128] = "log2l";
> -  Names[RTLIB::LOG10_F32] = "log10f";
> -  Names[RTLIB::LOG10_F64] = "log10";
> -  Names[RTLIB::LOG10_F80] = "log10l";
> -  Names[RTLIB::LOG10_F128] = "log10l";
> -  Names[RTLIB::LOG10_PPCF128] = "log10l";
> -  Names[RTLIB::EXP_F32] = "expf";
> -  Names[RTLIB::EXP_F64] = "exp";
> -  Names[RTLIB::EXP_F80] = "expl";
> -  Names[RTLIB::EXP_F128] = "expl";
> -  Names[RTLIB::EXP_PPCF128] = "expl";
> -  Names[RTLIB::EXP2_F32] = "exp2f";
> -  Names[RTLIB::EXP2_F64] = "exp2";
> -  Names[RTLIB::EXP2_F80] = "exp2l";
> -  Names[RTLIB::EXP2_F128] = "exp2l";
> -  Names[RTLIB::EXP2_PPCF128] = "exp2l";
> -  Names[RTLIB::SIN_F32] = "sinf";
> -  Names[RTLIB::SIN_F64] = "sin";
> -  Names[RTLIB::SIN_F80] = "sinl";
> -  Names[RTLIB::SIN_F128] = "sinl";
> -  Names[RTLIB::SIN_PPCF128] = "sinl";
> -  Names[RTLIB::COS_F32] = "cosf";
> -  Names[RTLIB::COS_F64] = "cos";
> -  Names[RTLIB::COS_F80] = "cosl";
> -  Names[RTLIB::COS_F128] = "cosl";
> -  Names[RTLIB::COS_PPCF128] = "cosl";
> -  Names[RTLIB::POW_F32] = "powf";
> -  Names[RTLIB::POW_F64] = "pow";
> -  Names[RTLIB::POW_F80] = "powl";
> -  Names[RTLIB::POW_F128] = "powl";
> -  Names[RTLIB::POW_PPCF128] = "powl";
> -  Names[RTLIB::CEIL_F32] = "ceilf";
> -  Names[RTLIB::CEIL_F64] = "ceil";
> -  Names[RTLIB::CEIL_F80] = "ceill";
> -  Names[RTLIB::CEIL_F128] = "ceill";
> -  Names[RTLIB::CEIL_PPCF128] = "ceill";
> -  Names[RTLIB::TRUNC_F32] = "truncf";
> -  Names[RTLIB::TRUNC_F64] = "trunc";
> -  Names[RTLIB::TRUNC_F80] = "truncl";
> -  Names[RTLIB::TRUNC_F128] = "truncl";
> -  Names[RTLIB::TRUNC_PPCF128] = "truncl";
> -  Names[RTLIB::RINT_F32] = "rintf";
> -  Names[RTLIB::RINT_F64] = "rint";
> -  Names[RTLIB::RINT_F80] = "rintl";
> -  Names[RTLIB::RINT_F128] = "rintl";
> -  Names[RTLIB::RINT_PPCF128] = "rintl";
> -  Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
> -  Names[RTLIB::NEARBYINT_F64] = "nearbyint";
> -  Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
> -  Names[RTLIB::NEARBYINT_F128] = "nearbyintl";
> -  Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
> -  Names[RTLIB::FLOOR_F32] = "floorf";
> -  Names[RTLIB::FLOOR_F64] = "floor";
> -  Names[RTLIB::FLOOR_F80] = "floorl";
> -  Names[RTLIB::FLOOR_F128] = "floorl";
> -  Names[RTLIB::FLOOR_PPCF128] = "floorl";
> -  Names[RTLIB::COPYSIGN_F32] = "copysignf";
> -  Names[RTLIB::COPYSIGN_F64] = "copysign";
> -  Names[RTLIB::COPYSIGN_F80] = "copysignl";
> -  Names[RTLIB::COPYSIGN_F128] = "copysignl";
> -  Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
> -  Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2";
> -  Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2";
> -  Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
> -  Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
> -  Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
> -  Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
> -  Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
> -  Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
> -  Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
> -  Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
> -  Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2";
> -  Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
> -  Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi";
> -  Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi";
> -  Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
> -  Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
> -  Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
> -  Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi";
> -  Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi";
> -  Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
> -  Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
> -  Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
> -  Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
> -  Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
> -  Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
> -  Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi";
> -  Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi";
> -  Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti";
> -  Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
> -  Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
> -  Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
> -  Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi";
> -  Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi";
> -  Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
> -  Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
> -  Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
> -  Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi";
> -  Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi";
> -  Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
> -  Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
> -  Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
> -  Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
> -  Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
> -  Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
> -  Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi";
> -  Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi";
> -  Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti";
> -  Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
> -  Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
> -  Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
> -  Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
> -  Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
> -  Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
> -  Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf";
> -  Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf";
> -  Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
> -  Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
> -  Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
> -  Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf";
> -  Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
> -  Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
> -  Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
> -  Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
> -  Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf";
> -  Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
> -  Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
> -  Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
> -  Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
> -  Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf";
> -  Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf";
> -  Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
> -  Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
> -  Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
> -  Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf";
> -  Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
> -  Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
> -  Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
> -  Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
> -  Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf";
> -  Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
> -  Names[RTLIB::OEQ_F32] = "__eqsf2";
> -  Names[RTLIB::OEQ_F64] = "__eqdf2";
> -  Names[RTLIB::OEQ_F128] = "__eqtf2";
> -  Names[RTLIB::UNE_F32] = "__nesf2";
> -  Names[RTLIB::UNE_F64] = "__nedf2";
> -  Names[RTLIB::UNE_F128] = "__netf2";
> -  Names[RTLIB::OGE_F32] = "__gesf2";
> -  Names[RTLIB::OGE_F64] = "__gedf2";
> -  Names[RTLIB::OGE_F128] = "__getf2";
> -  Names[RTLIB::OLT_F32] = "__ltsf2";
> -  Names[RTLIB::OLT_F64] = "__ltdf2";
> -  Names[RTLIB::OLT_F128] = "__lttf2";
> -  Names[RTLIB::OLE_F32] = "__lesf2";
> -  Names[RTLIB::OLE_F64] = "__ledf2";
> -  Names[RTLIB::OLE_F128] = "__letf2";
> -  Names[RTLIB::OGT_F32] = "__gtsf2";
> -  Names[RTLIB::OGT_F64] = "__gtdf2";
> -  Names[RTLIB::OGT_F128] = "__gttf2";
> -  Names[RTLIB::UO_F32] = "__unordsf2";
> -  Names[RTLIB::UO_F64] = "__unorddf2";
> -  Names[RTLIB::UO_F128] = "__unordtf2";
> -  Names[RTLIB::O_F32] = "__unordsf2";
> -  Names[RTLIB::O_F64] = "__unorddf2";
> -  Names[RTLIB::O_F128] = "__unordtf2";
> -  Names[RTLIB::MEMCPY] = "memcpy";
> -  Names[RTLIB::MEMMOVE] = "memmove";
> -  Names[RTLIB::MEMSET] = "memset";
> -  Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
> -  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] =
> "__sync_val_compare_and_swap_1";
> -  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] =
> "__sync_val_compare_and_swap_2";
> -  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] =
> "__sync_val_compare_and_swap_4";
> -  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] =
> "__sync_val_compare_and_swap_8";
> -  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
> -  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
> -  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
> -  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
> -  Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
> -  Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
> -  Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
> -  Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
> -  Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
> -  Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
> -  Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
> -  Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
> -  Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
> -  Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
> -  Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
> -  Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
> -  Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
> -  Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
> -  Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
> -  Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
> -  Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
> -  Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
> -  Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4";
> -  Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
> -  Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
> -  Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
> -  Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
> -  Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
> -}
> -
> -/// InitLibcallCallingConvs - Set default libcall CallingConvs.
> -///
> -static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
> -  for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
> -    CCs[i] = CallingConv::C;
> -  }
> -}
> -
>  /// getFPEXT - Return the FPEXT_*_* value for the given types, or
>  /// UNKNOWN_LIBCALL if there is none.
>  RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
> @@ -571,447 +253,15 @@
>    return UNKNOWN_LIBCALL;
>  }
>
> -/// InitCmpLibcallCCs - Set default comparison libcall CC.
> -///
> -static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
> -  memset(CCs, ISD::SETCC_INVALID,
> sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
> -  CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
> -  CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
> -  CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
> -  CCs[RTLIB::UNE_F32] = ISD::SETNE;
> -  CCs[RTLIB::UNE_F64] = ISD::SETNE;
> -  CCs[RTLIB::UNE_F128] = ISD::SETNE;
> -  CCs[RTLIB::OGE_F32] = ISD::SETGE;
> -  CCs[RTLIB::OGE_F64] = ISD::SETGE;
> -  CCs[RTLIB::OGE_F128] = ISD::SETGE;
> -  CCs[RTLIB::OLT_F32] = ISD::SETLT;
> -  CCs[RTLIB::OLT_F64] = ISD::SETLT;
> -  CCs[RTLIB::OLT_F128] = ISD::SETLT;
> -  CCs[RTLIB::OLE_F32] = ISD::SETLE;
> -  CCs[RTLIB::OLE_F64] = ISD::SETLE;
> -  CCs[RTLIB::OLE_F128] = ISD::SETLE;
> -  CCs[RTLIB::OGT_F32] = ISD::SETGT;
> -  CCs[RTLIB::OGT_F64] = ISD::SETGT;
> -  CCs[RTLIB::OGT_F128] = ISD::SETGT;
> -  CCs[RTLIB::UO_F32] = ISD::SETNE;
> -  CCs[RTLIB::UO_F64] = ISD::SETNE;
> -  CCs[RTLIB::UO_F128] = ISD::SETNE;
> -  CCs[RTLIB::O_F32] = ISD::SETEQ;
> -  CCs[RTLIB::O_F64] = ISD::SETEQ;
> -  CCs[RTLIB::O_F128] = ISD::SETEQ;
> -}
> -
>  /// NOTE: The constructor takes ownership of TLOF.
>  TargetLowering::TargetLowering(const TargetMachine &tm,
>                                 const TargetLoweringObjectFile *tlof)
> -  : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
> -  // All operations default to being supported.
> -  memset(OpActions, 0, sizeof(OpActions));
> -  memset(LoadExtActions, 0, sizeof(LoadExtActions));
> -  memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
> -  memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
> -  memset(CondCodeActions, 0, sizeof(CondCodeActions));
> -
> -  // Set default actions for various operations.
> -  for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
> -    // Default all indexed load / store to expand.
> -    for (unsigned IM = (unsigned)ISD::PRE_INC;
> -         IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
> -      setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand);
> -      setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
> -    }
> -
> -    // These operations default to expand.
> -    setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
> -    setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT,
> Expand);
> -  }
> -
> -  // Most targets ignore the @llvm.prefetch intrinsic.
> -  setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
> -
> -  // ConstantFP nodes default to expand.  Targets can either change this
> to
> -  // Legal, in which case all fp constants are legal, or use
> isFPImmLegal()
> -  // to optimize expansions for certain constants.
> -  setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
> -  setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
> -  setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
> -  setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
> -  setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
> -
> -  // These library functions default to expand.
> -  setOperationAction(ISD::FLOG ,  MVT::f16, Expand);
> -  setOperationAction(ISD::FLOG2,  MVT::f16, Expand);
> -  setOperationAction(ISD::FLOG10, MVT::f16, Expand);
> -  setOperationAction(ISD::FEXP ,  MVT::f16, Expand);
> -  setOperationAction(ISD::FEXP2,  MVT::f16, Expand);
> -  setOperationAction(ISD::FFLOOR, MVT::f16, Expand);
> -  setOperationAction(ISD::FNEARBYINT, MVT::f16, Expand);
> -  setOperationAction(ISD::FCEIL,  MVT::f16, Expand);
> -  setOperationAction(ISD::FRINT,  MVT::f16, Expand);
> -  setOperationAction(ISD::FTRUNC, MVT::f16, Expand);
> -  setOperationAction(ISD::FLOG ,  MVT::f32, Expand);
> -  setOperationAction(ISD::FLOG2,  MVT::f32, Expand);
> -  setOperationAction(ISD::FLOG10, MVT::f32, Expand);
> -  setOperationAction(ISD::FEXP ,  MVT::f32, Expand);
> -  setOperationAction(ISD::FEXP2,  MVT::f32, Expand);
> -  setOperationAction(ISD::FFLOOR, MVT::f32, Expand);
> -  setOperationAction(ISD::FNEARBYINT, MVT::f32, Expand);
> -  setOperationAction(ISD::FCEIL,  MVT::f32, Expand);
> -  setOperationAction(ISD::FRINT,  MVT::f32, Expand);
> -  setOperationAction(ISD::FTRUNC, MVT::f32, Expand);
> -  setOperationAction(ISD::FLOG ,  MVT::f64, Expand);
> -  setOperationAction(ISD::FLOG2,  MVT::f64, Expand);
> -  setOperationAction(ISD::FLOG10, MVT::f64, Expand);
> -  setOperationAction(ISD::FEXP ,  MVT::f64, Expand);
> -  setOperationAction(ISD::FEXP2,  MVT::f64, Expand);
> -  setOperationAction(ISD::FFLOOR, MVT::f64, Expand);
> -  setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
> -  setOperationAction(ISD::FCEIL,  MVT::f64, Expand);
> -  setOperationAction(ISD::FRINT,  MVT::f64, Expand);
> -  setOperationAction(ISD::FTRUNC, MVT::f64, Expand);
> -  setOperationAction(ISD::FLOG ,  MVT::f128, Expand);
> -  setOperationAction(ISD::FLOG2,  MVT::f128, Expand);
> -  setOperationAction(ISD::FLOG10, MVT::f128, Expand);
> -  setOperationAction(ISD::FEXP ,  MVT::f128, Expand);
> -  setOperationAction(ISD::FEXP2,  MVT::f128, Expand);
> -  setOperationAction(ISD::FFLOOR, MVT::f128, Expand);
> -  setOperationAction(ISD::FNEARBYINT, MVT::f128, Expand);
> -  setOperationAction(ISD::FCEIL,  MVT::f128, Expand);
> -  setOperationAction(ISD::FRINT,  MVT::f128, Expand);
> -  setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
> -
> -  // Default ISD::TRAP to expand (which turns it into abort).
> -  setOperationAction(ISD::TRAP, MVT::Other, Expand);
> -
> -  // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
> -  // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
> -  //
> -  setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
> -
> -  IsLittleEndian = TD->isLittleEndian();
> -  PointerTy = MVT::getIntegerVT(8*TD->getPointerSize(0));
> -  memset(RegClassForVT,
> 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
> -  memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
> -  maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
> -  maxStoresPerMemsetOptSize = maxStoresPerMemcpyOptSize
> -    = maxStoresPerMemmoveOptSize = 4;
> -  benefitFromCodePlacementOpt = false;
> -  UseUnderscoreSetJmp = false;
> -  UseUnderscoreLongJmp = false;
> -  SelectIsExpensive = false;
> -  IntDivIsCheap = false;
> -  Pow2DivIsCheap = false;
> -  JumpIsExpensive = false;
> -  predictableSelectIsExpensive = false;
> -  StackPointerRegisterToSaveRestore = 0;
> -  ExceptionPointerRegister = 0;
> -  ExceptionSelectorRegister = 0;
> -  BooleanContents = UndefinedBooleanContent;
> -  BooleanVectorContents = UndefinedBooleanContent;
> -  SchedPreferenceInfo = Sched::ILP;
> -  JumpBufSize = 0;
> -  JumpBufAlignment = 0;
> -  MinFunctionAlignment = 0;
> -  PrefFunctionAlignment = 0;
> -  PrefLoopAlignment = 0;
> -  MinStackArgumentAlignment = 1;
> -  ShouldFoldAtomicFences = false;
> -  InsertFencesForAtomic = false;
> -  SupportJumpTables = true;
> -  MinimumJumpTableEntries = 4;
> -
> -  InitLibcallNames(LibcallRoutineNames);
> -  InitCmpLibcallCCs(CmpLibcallCCs);
> -  InitLibcallCallingConvs(LibcallCallingConvs);
> -}
> -
> -TargetLowering::~TargetLowering() {
> -  delete &TLOF;
> -}
> -
> -MVT TargetLowering::getShiftAmountTy(EVT LHSTy) const {
> -  return MVT::getIntegerVT(8*TD->getPointerSize(0));
> -}
> -
> -/// canOpTrap - Returns true if the operation can trap for the value type.
> -/// VT must be a legal type.
> -bool TargetLowering::canOpTrap(unsigned Op, EVT VT) const {
> -  assert(isTypeLegal(VT));
> -  switch (Op) {
> -  default:
> -    return false;
> -  case ISD::FDIV:
> -  case ISD::FREM:
> -  case ISD::SDIV:
> -  case ISD::UDIV:
> -  case ISD::SREM:
> -  case ISD::UREM:
> -    return true;
> -  }
> -}
> -
> -
> -static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
> -                                          unsigned &NumIntermediates,
> -                                          MVT &RegisterVT,
> -                                          TargetLowering *TLI) {
> -  // Figure out the right, legal destination reg to copy into.
> -  unsigned NumElts = VT.getVectorNumElements();
> -  MVT EltTy = VT.getVectorElementType();
> -
> -  unsigned NumVectorRegs = 1;
> -
> -  // FIXME: We don't support non-power-of-2-sized vectors for now.
>  Ideally we
> -  // could break down into LHS/RHS like LegalizeDAG does.
> -  if (!isPowerOf2_32(NumElts)) {
> -    NumVectorRegs = NumElts;
> -    NumElts = 1;
> -  }
> -
> -  // Divide the input until we get to a supported size.  This will always
> -  // end with a scalar if the target doesn't support vectors.
> -  while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy,
> NumElts))) {
> -    NumElts >>= 1;
> -    NumVectorRegs <<= 1;
> -  }
> -
> -  NumIntermediates = NumVectorRegs;
> -
> -  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
> -  if (!TLI->isTypeLegal(NewVT))
> -    NewVT = EltTy;
> -  IntermediateVT = NewVT;
> -
> -  unsigned NewVTSize = NewVT.getSizeInBits();
> -
> -  // Convert sizes such as i33 to i64.
> -  if (!isPowerOf2_32(NewVTSize))
> -    NewVTSize = NextPowerOf2(NewVTSize);
> -
> -  MVT DestVT = TLI->getRegisterType(NewVT);
> -  RegisterVT = DestVT;
> -  if (EVT(DestVT).bitsLT(NewVT))    // Value is expanded, e.g. i64 -> i16.
> -    return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
> -
> -  // Otherwise, promotion or legal types use the same number of registers
> as
> -  // the vector decimated to the appropriate level.
> -  return NumVectorRegs;
> -}
> -
> -/// isLegalRC - Return true if the value types that can be represented by
> the
> -/// specified register class are all legal.
> -bool TargetLowering::isLegalRC(const TargetRegisterClass *RC) const {
> -  for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E =
> RC->vt_end();
> -       I != E; ++I) {
> -    if (isTypeLegal(*I))
> -      return true;
> -  }
> -  return false;
> -}
> -
> -/// findRepresentativeClass - Return the largest legal super-reg register
> class
> -/// of the register class for the specified type and its associated
> "cost".
> -std::pair<const TargetRegisterClass*, uint8_t>
> -TargetLowering::findRepresentativeClass(MVT VT) const {
> -  const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
> -  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
> -  if (!RC)
> -    return std::make_pair(RC, 0);
> -
> -  // Compute the set of all super-register classes.
> -  BitVector SuperRegRC(TRI->getNumRegClasses());
> -  for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
> -    SuperRegRC.setBitsInMask(RCI.getMask());
> -
> -  // Find the first legal register class with the largest spill size.
> -  const TargetRegisterClass *BestRC = RC;
> -  for (int i = SuperRegRC.find_first(); i >= 0; i =
> SuperRegRC.find_next(i)) {
> -    const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
> -    // We want the largest possible spill size.
> -    if (SuperRC->getSize() <= BestRC->getSize())
> -      continue;
> -    if (!isLegalRC(SuperRC))
> -      continue;
> -    BestRC = SuperRC;
> -  }
> -  return std::make_pair(BestRC, 1);
> -}
> -
> -/// computeRegisterProperties - Once all of the register classes are
> added,
> -/// this allows us to compute derived properties we expose.
> -void TargetLowering::computeRegisterProperties() {
> -  assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE &&
> -         "Too many value types for ValueTypeActions to hold!");
> -
> -  // Everything defaults to needing one register.
> -  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
> -    NumRegistersForVT[i] = 1;
> -    RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
> -  }
> -  // ...except isVoid, which doesn't need any registers.
> -  NumRegistersForVT[MVT::isVoid] = 0;
> -
> -  // Find the largest integer register class.
> -  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
> -  for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
> -    assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
> -
> -  // Every integer value type larger than this largest register takes
> twice as
> -  // many registers to represent as the previous ValueType.
> -  for (unsigned ExpandedReg = LargestIntReg + 1;
> -       ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
> -    NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
> -    RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
> -    TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg -
> 1);
> -    ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
> -                                   TypeExpandInteger);
> -  }
> -
> -  // Inspect all of the ValueType's smaller than the largest integer
> -  // register to see which ones need promotion.
> -  unsigned LegalIntReg = LargestIntReg;
> -  for (unsigned IntReg = LargestIntReg - 1;
> -       IntReg >= (unsigned)MVT::i1; --IntReg) {
> -    MVT IVT = (MVT::SimpleValueType)IntReg;
> -    if (isTypeLegal(IVT)) {
> -      LegalIntReg = IntReg;
> -    } else {
> -      RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
> -        (const MVT::SimpleValueType)LegalIntReg;
> -      ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
> -    }
> -  }
> -
> -  // ppcf128 type is really two f64's.
> -  if (!isTypeLegal(MVT::ppcf128)) {
> -    NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
> -    RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
> -    TransformToType[MVT::ppcf128] = MVT::f64;
> -    ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
> -  }
> -
> -  // Decide how to handle f64. If the target does not have native f64
> support,
> -  // expand it to i64 and we will be generating soft float library calls.
> -  if (!isTypeLegal(MVT::f64)) {
> -    NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
> -    RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
> -    TransformToType[MVT::f64] = MVT::i64;
> -    ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
> -  }
> -
> -  // Decide how to handle f32. If the target does not have native support
> for
> -  // f32, promote it to f64 if it is legal. Otherwise, expand it to i32.
> -  if (!isTypeLegal(MVT::f32)) {
> -    if (isTypeLegal(MVT::f64)) {
> -      NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64];
> -      RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64];
> -      TransformToType[MVT::f32] = MVT::f64;
> -      ValueTypeActions.setTypeAction(MVT::f32, TypePromoteInteger);
> -    } else {
> -      NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
> -      RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
> -      TransformToType[MVT::f32] = MVT::i32;
> -      ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
> -    }
> -  }
> -
> -  // Loop over all of the vector value types to see which need
> transformations.
> -  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
> -       i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
> -    MVT VT = (MVT::SimpleValueType)i;
> -    if (isTypeLegal(VT)) continue;
> -
> -    // Determine if there is a legal wider type.  If so, we should
> promote to
> -    // that wider vector type.
> -    MVT EltVT = VT.getVectorElementType();
> -    unsigned NElts = VT.getVectorNumElements();
> -    if (NElts != 1 && !shouldSplitVectorElementType(EltVT)) {
> -      bool IsLegalWiderType = false;
> -      // First try to promote the elements of integer vectors. If no legal
> -      // promotion was found, fallback to the widen-vector method.
> -      for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
> -        MVT SVT = (MVT::SimpleValueType)nVT;
> -        // Promote vectors of integers to vectors with the same number
> -        // of elements, with a wider element type.
> -        if (SVT.getVectorElementType().getSizeInBits() >
> EltVT.getSizeInBits()
> -            && SVT.getVectorNumElements() == NElts &&
> -            isTypeLegal(SVT) && SVT.getScalarType().isInteger()) {
> -          TransformToType[i] = SVT;
> -          RegisterTypeForVT[i] = SVT;
> -          NumRegistersForVT[i] = 1;
> -          ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
> -          IsLegalWiderType = true;
> -          break;
> -        }
> -      }
> -
> -      if (IsLegalWiderType) continue;
> -
> -      // Try to widen the vector.
> -      for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
> -        MVT SVT = (MVT::SimpleValueType)nVT;
> -        if (SVT.getVectorElementType() == EltVT &&
> -            SVT.getVectorNumElements() > NElts &&
> -            isTypeLegal(SVT)) {
> -          TransformToType[i] = SVT;
> -          RegisterTypeForVT[i] = SVT;
> -          NumRegistersForVT[i] = 1;
> -          ValueTypeActions.setTypeAction(VT, TypeWidenVector);
> -          IsLegalWiderType = true;
> -          break;
> -        }
> -      }
> -      if (IsLegalWiderType) continue;
> -    }
> -
> -    MVT IntermediateVT;
> -    MVT RegisterVT;
> -    unsigned NumIntermediates;
> -    NumRegistersForVT[i] =
> -      getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
> -                                RegisterVT, this);
> -    RegisterTypeForVT[i] = RegisterVT;
> -
> -    MVT NVT = VT.getPow2VectorType();
> -    if (NVT == VT) {
> -      // Type is already a power of 2.  The default action is to split.
> -      TransformToType[i] = MVT::Other;
> -      unsigned NumElts = VT.getVectorNumElements();
> -      ValueTypeActions.setTypeAction(VT,
> -            NumElts > 1 ? TypeSplitVector : TypeScalarizeVector);
> -    } else {
> -      TransformToType[i] = NVT;
> -      ValueTypeActions.setTypeAction(VT, TypeWidenVector);
> -    }
> -  }
> -
> -  // Determine the 'representative' register class for each value type.
> -  // An representative register class is the largest (meaning one which is
> -  // not a sub-register class / subreg register class) legal register
> class for
> -  // a group of value types. For example, on i386, i8, i16, and i32
> -  // representative would be GR32; while on x86_64 it's GR64.
> -  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
> -    const TargetRegisterClass* RRC;
> -    uint8_t Cost;
> -    tie(RRC, Cost) =  findRepresentativeClass((MVT::SimpleValueType)i);
> -    RepRegClassForVT[i] = RRC;
> -    RepRegClassCostForVT[i] = Cost;
> -  }
> -}
> +  : TargetLoweringBase(tm, tlof) {}
>
>  const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
>    return NULL;
>  }
>
> -EVT TargetLowering::getSetCCResultType(EVT VT) const {
> -  assert(!VT.isVector() && "No default SetCC type for vectors!");
> -  return getPointerTy(0).SimpleTy;
> -}
> -
> -MVT::SimpleValueType TargetLowering::getCmpLibcallReturnType() const {
> -  return MVT::i32; // return the default value
> -}
> -
>  /// Check whether a given call node is in tail position within its
> function. If
>  /// so, it sets Chain to the input chain of the tail call.
>  bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
> @@ -1167,80 +417,6 @@
>    }
>  }
>
> -/// getVectorTypeBreakdown - Vector types are broken down into some
> number of
> -/// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
> -/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP
> stack.
> -/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and
> X86.
> -///
> -/// This method returns the number of registers needed, and the VT for
> each
> -/// register.  It also returns the VT and quantity of the intermediate
> values
> -/// before they are promoted/expanded.
> -///
> -unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT
> VT,
> -                                                EVT &IntermediateVT,
> -                                                unsigned
> &NumIntermediates,
> -                                                MVT &RegisterVT) const {
> -  unsigned NumElts = VT.getVectorNumElements();
> -
> -  // If there is a wider vector type with the same element type as this
> one,
> -  // or a promoted vector type that has the same number of elements which
> -  // are wider, then we should convert to that legal vector type.
> -  // This handles things like <2 x float> -> <4 x float> and
> -  // <4 x i1> -> <4 x i32>.
> -  LegalizeTypeAction TA = getTypeAction(Context, VT);
> -  if (NumElts != 1 && (TA == TypeWidenVector || TA ==
> TypePromoteInteger)) {
> -    EVT RegisterEVT = getTypeToTransformTo(Context, VT);
> -    if (isTypeLegal(RegisterEVT)) {
> -      IntermediateVT = RegisterEVT;
> -      RegisterVT = RegisterEVT.getSimpleVT();
> -      NumIntermediates = 1;
> -      return 1;
> -    }
> -  }
> -
> -  // Figure out the right, legal destination reg to copy into.
> -  EVT EltTy = VT.getVectorElementType();
> -
> -  unsigned NumVectorRegs = 1;
> -
> -  // FIXME: We don't support non-power-of-2-sized vectors for now.
>  Ideally we
> -  // could break down into LHS/RHS like LegalizeDAG does.
> -  if (!isPowerOf2_32(NumElts)) {
> -    NumVectorRegs = NumElts;
> -    NumElts = 1;
> -  }
> -
> -  // Divide the input until we get to a supported size.  This will always
> -  // end with a scalar if the target doesn't support vectors.
> -  while (NumElts > 1 && !isTypeLegal(
> -                                   EVT::getVectorVT(Context, EltTy,
> NumElts))) {
> -    NumElts >>= 1;
> -    NumVectorRegs <<= 1;
> -  }
> -
> -  NumIntermediates = NumVectorRegs;
> -
> -  EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
> -  if (!isTypeLegal(NewVT))
> -    NewVT = EltTy;
> -  IntermediateVT = NewVT;
> -
> -  MVT DestVT = getRegisterType(Context, NewVT);
> -  RegisterVT = DestVT;
> -  unsigned NewVTSize = NewVT.getSizeInBits();
> -
> -  // Convert sizes such as i33 to i64.
> -  if (!isPowerOf2_32(NewVTSize))
> -    NewVTSize = NextPowerOf2(NewVTSize);
> -
> -  if (EVT(DestVT).bitsLT(NewVT))   // Value is expanded, e.g. i64 -> i16.
> -    return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
> -
> -  // Otherwise, promotion or legal types use the same number of registers
> as
> -  // the vector decimated to the appropriate level.
> -  return NumVectorRegs;
> -}
> -
>  /// Get the EVTs and ArgFlags collections that represent the legalized
> return
>  /// type of the given function.  This does not require a DAG or a return
> value,
>  /// and is suitable for use before any DAGs for the function are
> constructed.
> @@ -1291,13 +467,6 @@
>    }
>  }
>
> -/// getByValTypeAlignment - Return the desired alignment for ByVal
> aggregate
> -/// function arguments in the caller parameter area.  This is the actual
> -/// alignment, not its logarithm.
> -unsigned TargetLowering::getByValTypeAlignment(Type *Ty) const {
> -  return TD->getCallFrameTypeAlignment(Ty);
> -}
> -
>  /// getJumpTableEncoding - Return the entry encoding for a jump table in
> the
>  /// current function.  The returned value is a member of the
>  /// MachineJumpTableInfo::JTEntryKind enum.
> @@ -1354,103 +523,6 @@
>  }
>
>
>  //===----------------------------------------------------------------------===//
> -//  TargetTransformInfo Helpers
>
> -//===----------------------------------------------------------------------===//
> -
> -int TargetLowering::InstructionOpcodeToISD(unsigned Opcode) const {
> -  enum InstructionOpcodes {
> -#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
> -#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
> -#include "llvm/IR/Instruction.def"
> -  };
> -  switch (static_cast<InstructionOpcodes>(Opcode)) {
> -  case Ret:            return 0;
> -  case Br:             return 0;
> -  case Switch:         return 0;
> -  case IndirectBr:     return 0;
> -  case Invoke:         return 0;
> -  case Resume:         return 0;
> -  case Unreachable:    return 0;
> -  case Add:            return ISD::ADD;
> -  case FAdd:           return ISD::FADD;
> -  case Sub:            return ISD::SUB;
> -  case FSub:           return ISD::FSUB;
> -  case Mul:            return ISD::MUL;
> -  case FMul:           return ISD::FMUL;
> -  case UDiv:           return ISD::UDIV;
> -  case SDiv:           return ISD::UDIV;
> -  case FDiv:           return ISD::FDIV;
> -  case URem:           return ISD::UREM;
> -  case SRem:           return ISD::SREM;
> -  case FRem:           return ISD::FREM;
> -  case Shl:            return ISD::SHL;
> -  case LShr:           return ISD::SRL;
> -  case AShr:           return ISD::SRA;
> -  case And:            return ISD::AND;
> -  case Or:             return ISD::OR;
> -  case Xor:            return ISD::XOR;
> -  case Alloca:         return 0;
> -  case Load:           return ISD::LOAD;
> -  case Store:          return ISD::STORE;
> -  case GetElementPtr:  return 0;
> -  case Fence:          return 0;
> -  case AtomicCmpXchg:  return 0;
> -  case AtomicRMW:      return 0;
> -  case Trunc:          return ISD::TRUNCATE;
> -  case ZExt:           return ISD::ZERO_EXTEND;
> -  case SExt:           return ISD::SIGN_EXTEND;
> -  case FPToUI:         return ISD::FP_TO_UINT;
> -  case FPToSI:         return ISD::FP_TO_SINT;
> -  case UIToFP:         return ISD::UINT_TO_FP;
> -  case SIToFP:         return ISD::SINT_TO_FP;
> -  case FPTrunc:        return ISD::FP_ROUND;
> -  case FPExt:          return ISD::FP_EXTEND;
> -  case PtrToInt:       return ISD::BITCAST;
> -  case IntToPtr:       return ISD::BITCAST;
> -  case BitCast:        return ISD::BITCAST;
> -  case ICmp:           return ISD::SETCC;
> -  case FCmp:           return ISD::SETCC;
> -  case PHI:            return 0;
> -  case Call:           return 0;
> -  case Select:         return ISD::SELECT;
> -  case UserOp1:        return 0;
> -  case UserOp2:        return 0;
> -  case VAArg:          return 0;
> -  case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
> -  case InsertElement:  return ISD::INSERT_VECTOR_ELT;
> -  case ShuffleVector:  return ISD::VECTOR_SHUFFLE;
> -  case ExtractValue:   return ISD::MERGE_VALUES;
> -  case InsertValue:    return ISD::MERGE_VALUES;
> -  case LandingPad:     return 0;
> -  }
> -
> -  llvm_unreachable("Unknown instruction type encountered!");
> -}
> -
> -std::pair<unsigned, MVT>
> -TargetLowering::getTypeLegalizationCost(Type *Ty) const {
> -  LLVMContext &C = Ty->getContext();
> -  EVT MTy = getValueType(Ty);
> -
> -  unsigned Cost = 1;
> -  // We keep legalizing the type until we find a legal kind. We assume
> that
> -  // the only operation that costs anything is the split. After splitting
> -  // we need to handle two types.
> -  while (true) {
> -    LegalizeKind LK = getTypeConversion(C, MTy);
> -
> -    if (LK.first == TypeLegal)
> -      return std::make_pair(Cost, MTy.getSimpleVT());
> -
> -    if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
> -      Cost *= 2;
> -
> -    // Keep legalizing the type.
> -    MTy = LK.second;
> -  }
> -}
> -
>
> -//===----------------------------------------------------------------------===//
>  //  Optimization Methods
>
>  //===----------------------------------------------------------------------===//
>
> @@ -2394,7 +1466,7 @@
>            APInt newMask = APInt::getLowBitsSet(maskWidth, width);
>            for (unsigned offset=0; offset<origWidth/width; offset++) {
>              if ((newMask & Mask) == Mask) {
> -              if (!TD->isLittleEndian())
> +              if (!getDataLayout()->isLittleEndian())
>                  bestOffset = (origWidth/width - offset - 1) * (width/8);
>                else
>                  bestOffset = (uint64_t)offset * (width/8);
> @@ -3199,7 +2271,7 @@
>      std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
>
>    // Figure out which register class contains this reg.
> -  const TargetRegisterInfo *RI = TM.getRegisterInfo();
> +  const TargetRegisterInfo *RI = getTargetMachine().getRegisterInfo();
>    for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
>         E = RI->regclass_end(); RCI != E; ++RCI) {
>      const TargetRegisterClass *RC = *RCI;
> @@ -3323,7 +2395,7 @@
>        // If OpTy is not a single value, it may be a struct/union that we
>        // can tile with integers.
>        if (!OpTy->isSingleValueType() && OpTy->isSized()) {
> -        unsigned BitSize = TD->getTypeSizeInBits(OpTy);
> +        unsigned BitSize = getDataLayout()->getTypeSizeInBits(OpTy);
>          switch (BitSize) {
>          default: break;
>          case 1:
> @@ -3338,7 +2410,7 @@
>          }
>        } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) {
>          OpInfo.ConstraintVT = MVT::getIntegerVT(
> -            8*TD->getPointerSize(PT->getAddressSpace()));
> +            8*getDataLayout()->getPointerSize(PT->getAddressSpace()));
>        } else {
>          OpInfo.ConstraintVT = MVT::getVT(OpTy, true);
>        }
> @@ -3633,44 +2705,6 @@
>    }
>  }
>
>
> -//===----------------------------------------------------------------------===//
> -//  Loop Strength Reduction hooks
>
> -//===----------------------------------------------------------------------===//
> -
> -/// isLegalAddressingMode - Return true if the addressing mode represented
> -/// by AM is legal for this target, for a load/store of the specified
> type.
> -bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
> -                                           Type *Ty) const {
> -  // The default implementation of this implements a conservative RISCy,
> r+r and
> -  // r+i addr mode.
> -
> -  // Allows a sign-extended 16-bit immediate field.
> -  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
> -    return false;
> -
> -  // No global is ever allowed as a base.
> -  if (AM.BaseGV)
> -    return false;
> -
> -  // Only support r+r,
> -  switch (AM.Scale) {
> -  case 0:  // "r+i" or just "i", depending on HasBaseReg.
> -    break;
> -  case 1:
> -    if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
> -      return false;
> -    // Otherwise we have r+r or r+i.
> -    break;
> -  case 2:
> -    if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
> -      return false;
> -    // Allow 2*r as r+r.
> -    break;
> -  }
> -
> -  return true;
> -}
> -
>  /// BuildExactDiv - Given an exact SDIV by a constant, create a
> multiplication
>  /// with the multiplicative inverse of the constant.
>  SDValue TargetLowering::BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc
> dl,
>
> Modified: llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp (original)
> +++ llvm/trunk/lib/CodeGen/SjLjEHPrepare.cpp Fri Jan 11 14:05:37 2013
> @@ -43,7 +43,7 @@
>
>  namespace {
>    class SjLjEHPrepare : public FunctionPass {
> -    const TargetLowering *TLI;
> +    const TargetLoweringBase *TLI;
>      Type *FunctionContextTy;
>      Constant *RegisterFn;
>      Constant *UnregisterFn;
> @@ -58,7 +58,7 @@
>      AllocaInst *FuncCtx;
>    public:
>      static char ID; // Pass identification, replacement for typeid
> -    explicit SjLjEHPrepare(const TargetLowering *tli = NULL)
> +    explicit SjLjEHPrepare(const TargetLoweringBase *tli = NULL)
>        : FunctionPass(ID), TLI(tli) { }
>      bool doInitialization(Module &M);
>      bool runOnFunction(Function &F);
> @@ -82,7 +82,7 @@
>  char SjLjEHPrepare::ID = 0;
>
>  // Public Interface To the SjLjEHPrepare pass.
> -FunctionPass *llvm::createSjLjEHPreparePass(const TargetLowering *TLI) {
> +FunctionPass *llvm::createSjLjEHPreparePass(const TargetLoweringBase
> *TLI) {
>    return new SjLjEHPrepare(TLI);
>  }
>  // doInitialization - Set up decalarations and types needed to process
>
> Modified: llvm/trunk/lib/CodeGen/StackProtector.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/StackProtector.cpp?rev=172246&r1=172245&r2=172246&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/StackProtector.cpp (original)
> +++ llvm/trunk/lib/CodeGen/StackProtector.cpp Fri Jan 11 14:05:37 2013
> @@ -36,7 +36,7 @@
>    class StackProtector : public FunctionPass {
>      /// TLI - Keep a pointer of a TargetLowering to consult for
> determining
>      /// target type sizes.
> -    const TargetLowering *TLI;
> +    const TargetLoweringBase *TLI;
>
>      Function *F;
>      Module *M;
> @@ -68,7 +68,7 @@
>      StackProtector() : FunctionPass(ID), TLI(0) {
>        initializeStackProtectorPass(*PassRegistry::getPassRegistry());
>      }
> -    StackProtector(const TargetLowering *tli)
> +    StackProtector(const TargetLoweringBase *tli)
>        : FunctionPass(ID), TLI(tli) {
>        initializeStackProtectorPass(*PassRegistry::getPassRegistry());
>      }
> @@ -85,7 +85,7 @@
>  INITIALIZE_PASS(StackProtector, "stack-protector",
>                  "Insert stack protectors", false, false)
>
> -FunctionPass *llvm::createStackProtectorPass(const TargetLowering *tli) {
> +FunctionPass *llvm::createStackProtectorPass(const TargetLoweringBase
> *tli) {
>    return new StackProtector(tli);
>  }
>
>
> Added: llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp?rev=172246&view=auto
>
> ==============================================================================
> --- llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp (added)
> +++ llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp Fri Jan 11 14:05:37 2013
> @@ -0,0 +1,1274 @@
> +//===-- TargetLoweringBase.cpp - Implement the TargetLoweringBase class
> ---===//
> +//
> +//                     The LLVM Compiler Infrastructure
> +//
> +// This file is distributed under the University of Illinois Open Source
> +// License. See LICENSE.TXT for details.
> +//
>
> +//===----------------------------------------------------------------------===//
> +//
> +// This implements the TargetLoweringBase class.
> +//
>
> +//===----------------------------------------------------------------------===//
> +
> +#include "llvm/Target/TargetLowering.h"
> +#include "llvm/ADT/BitVector.h"
> +#include "llvm/ADT/STLExtras.h"
> +#include "llvm/CodeGen/Analysis.h"
> +#include "llvm/CodeGen/MachineFrameInfo.h"
> +#include "llvm/CodeGen/MachineFunction.h"
> +#include "llvm/CodeGen/MachineJumpTableInfo.h"
> +#include "llvm/IR/DataLayout.h"
> +#include "llvm/IR/DerivedTypes.h"
> +#include "llvm/IR/GlobalVariable.h"
> +#include "llvm/MC/MCAsmInfo.h"
> +#include "llvm/MC/MCExpr.h"
> +#include "llvm/Support/CommandLine.h"
> +#include "llvm/Support/ErrorHandling.h"
> +#include "llvm/Support/MathExtras.h"
> +#include "llvm/Target/TargetLoweringObjectFile.h"
> +#include "llvm/Target/TargetMachine.h"
> +#include "llvm/Target/TargetRegisterInfo.h"
> +#include <cctype>
> +using namespace llvm;
> +
> +/// InitLibcallNames - Set default libcall names.
> +///
> +static void InitLibcallNames(const char **Names) {
> +  Names[RTLIB::SHL_I16] = "__ashlhi3";
> +  Names[RTLIB::SHL_I32] = "__ashlsi3";
> +  Names[RTLIB::SHL_I64] = "__ashldi3";
> +  Names[RTLIB::SHL_I128] = "__ashlti3";
> +  Names[RTLIB::SRL_I16] = "__lshrhi3";
> +  Names[RTLIB::SRL_I32] = "__lshrsi3";
> +  Names[RTLIB::SRL_I64] = "__lshrdi3";
> +  Names[RTLIB::SRL_I128] = "__lshrti3";
> +  Names[RTLIB::SRA_I16] = "__ashrhi3";
> +  Names[RTLIB::SRA_I32] = "__ashrsi3";
> +  Names[RTLIB::SRA_I64] = "__ashrdi3";
> +  Names[RTLIB::SRA_I128] = "__ashrti3";
> +  Names[RTLIB::MUL_I8] = "__mulqi3";
> +  Names[RTLIB::MUL_I16] = "__mulhi3";
> +  Names[RTLIB::MUL_I32] = "__mulsi3";
> +  Names[RTLIB::MUL_I64] = "__muldi3";
> +  Names[RTLIB::MUL_I128] = "__multi3";
> +  Names[RTLIB::MULO_I32] = "__mulosi4";
> +  Names[RTLIB::MULO_I64] = "__mulodi4";
> +  Names[RTLIB::MULO_I128] = "__muloti4";
> +  Names[RTLIB::SDIV_I8] = "__divqi3";
> +  Names[RTLIB::SDIV_I16] = "__divhi3";
> +  Names[RTLIB::SDIV_I32] = "__divsi3";
> +  Names[RTLIB::SDIV_I64] = "__divdi3";
> +  Names[RTLIB::SDIV_I128] = "__divti3";
> +  Names[RTLIB::UDIV_I8] = "__udivqi3";
> +  Names[RTLIB::UDIV_I16] = "__udivhi3";
> +  Names[RTLIB::UDIV_I32] = "__udivsi3";
> +  Names[RTLIB::UDIV_I64] = "__udivdi3";
> +  Names[RTLIB::UDIV_I128] = "__udivti3";
> +  Names[RTLIB::SREM_I8] = "__modqi3";
> +  Names[RTLIB::SREM_I16] = "__modhi3";
> +  Names[RTLIB::SREM_I32] = "__modsi3";
> +  Names[RTLIB::SREM_I64] = "__moddi3";
> +  Names[RTLIB::SREM_I128] = "__modti3";
> +  Names[RTLIB::UREM_I8] = "__umodqi3";
> +  Names[RTLIB::UREM_I16] = "__umodhi3";
> +  Names[RTLIB::UREM_I32] = "__umodsi3";
> +  Names[RTLIB::UREM_I64] = "__umoddi3";
> +  Names[RTLIB::UREM_I128] = "__umodti3";
> +
> +  // These are generally not available.
> +  Names[RTLIB::SDIVREM_I8] = 0;
> +  Names[RTLIB::SDIVREM_I16] = 0;
> +  Names[RTLIB::SDIVREM_I32] = 0;
> +  Names[RTLIB::SDIVREM_I64] = 0;
> +  Names[RTLIB::SDIVREM_I128] = 0;
> +  Names[RTLIB::UDIVREM_I8] = 0;
> +  Names[RTLIB::UDIVREM_I16] = 0;
> +  Names[RTLIB::UDIVREM_I32] = 0;
> +  Names[RTLIB::UDIVREM_I64] = 0;
> +  Names[RTLIB::UDIVREM_I128] = 0;
> +
> +  Names[RTLIB::NEG_I32] = "__negsi2";
> +  Names[RTLIB::NEG_I64] = "__negdi2";
> +  Names[RTLIB::ADD_F32] = "__addsf3";
> +  Names[RTLIB::ADD_F64] = "__adddf3";
> +  Names[RTLIB::ADD_F80] = "__addxf3";
> +  Names[RTLIB::ADD_F128] = "__addtf3";
> +  Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
> +  Names[RTLIB::SUB_F32] = "__subsf3";
> +  Names[RTLIB::SUB_F64] = "__subdf3";
> +  Names[RTLIB::SUB_F80] = "__subxf3";
> +  Names[RTLIB::SUB_F128] = "__subtf3";
> +  Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
> +  Names[RTLIB::MUL_F32] = "__mulsf3";
> +  Names[RTLIB::MUL_F64] = "__muldf3";
> +  Names[RTLIB::MUL_F80] = "__mulxf3";
> +  Names[RTLIB::MUL_F128] = "__multf3";
> +  Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
> +  Names[RTLIB::DIV_F32] = "__divsf3";
> +  Names[RTLIB::DIV_F64] = "__divdf3";
> +  Names[RTLIB::DIV_F80] = "__divxf3";
> +  Names[RTLIB::DIV_F128] = "__divtf3";
> +  Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
> +  Names[RTLIB::REM_F32] = "fmodf";
> +  Names[RTLIB::REM_F64] = "fmod";
> +  Names[RTLIB::REM_F80] = "fmodl";
> +  Names[RTLIB::REM_F128] = "fmodl";
> +  Names[RTLIB::REM_PPCF128] = "fmodl";
> +  Names[RTLIB::FMA_F32] = "fmaf";
> +  Names[RTLIB::FMA_F64] = "fma";
> +  Names[RTLIB::FMA_F80] = "fmal";
> +  Names[RTLIB::FMA_F128] = "fmal";
> +  Names[RTLIB::FMA_PPCF128] = "fmal";
> +  Names[RTLIB::POWI_F32] = "__powisf2";
> +  Names[RTLIB::POWI_F64] = "__powidf2";
> +  Names[RTLIB::POWI_F80] = "__powixf2";
> +  Names[RTLIB::POWI_F128] = "__powitf2";
> +  Names[RTLIB::POWI_PPCF128] = "__powitf2";
> +  Names[RTLIB::SQRT_F32] = "sqrtf";
> +  Names[RTLIB::SQRT_F64] = "sqrt";
> +  Names[RTLIB::SQRT_F80] = "sqrtl";
> +  Names[RTLIB::SQRT_F128] = "sqrtl";
> +  Names[RTLIB::SQRT_PPCF128] = "sqrtl";
> +  Names[RTLIB::LOG_F32] = "logf";
> +  Names[RTLIB::LOG_F64] = "log";
> +  Names[RTLIB::LOG_F80] = "logl";
> +  Names[RTLIB::LOG_F128] = "logl";
> +  Names[RTLIB::LOG_PPCF128] = "logl";
> +  Names[RTLIB::LOG2_F32] = "log2f";
> +  Names[RTLIB::LOG2_F64] = "log2";
> +  Names[RTLIB::LOG2_F80] = "log2l";
> +  Names[RTLIB::LOG2_F128] = "log2l";
> +  Names[RTLIB::LOG2_PPCF128] = "log2l";
> +  Names[RTLIB::LOG10_F32] = "log10f";
> +  Names[RTLIB::LOG10_F64] = "log10";
> +  Names[RTLIB::LOG10_F80] = "log10l";
> +  Names[RTLIB::LOG10_F128] = "log10l";
> +  Names[RTLIB::LOG10_PPCF128] = "log10l";
> +  Names[RTLIB::EXP_F32] = "expf";
> +  Names[RTLIB::EXP_F64] = "exp";
> +  Names[RTLIB::EXP_F80] = "expl";
> +  Names[RTLIB::EXP_F128] = "expl";
> +  Names[RTLIB::EXP_PPCF128] = "expl";
> +  Names[RTLIB::EXP2_F32] = "exp2f";
> +  Names[RTLIB::EXP2_F64] = "exp2";
> +  Names[RTLIB::EXP2_F80] = "exp2l";
> +  Names[RTLIB::EXP2_F128] = "exp2l";
> +  Names[RTLIB::EXP2_PPCF128] = "exp2l";
> +  Names[RTLIB::SIN_F32] = "sinf";
> +  Names[RTLIB::SIN_F64] = "sin";
> +  Names[RTLIB::SIN_F80] = "sinl";
> +  Names[RTLIB::SIN_F128] = "sinl";
> +  Names[RTLIB::SIN_PPCF128] = "sinl";
> +  Names[RTLIB::COS_F32] = "cosf";
> +  Names[RTLIB::COS_F64] = "cos";
> +  Names[RTLIB::COS_F80] = "cosl";
> +  Names[RTLIB::COS_F128] = "cosl";
> +  Names[RTLIB::COS_PPCF128] = "cosl";
> +  Names[RTLIB::POW_F32] = "powf";
> +  Names[RTLIB::POW_F64] = "pow";
> +  Names[RTLIB::POW_F80] = "powl";
> +  Names[RTLIB::POW_F128] = "powl";
> +  Names[RTLIB::POW_PPCF128] = "powl";
> +  Names[RTLIB::CEIL_F32] = "ceilf";
> +  Names[RTLIB::CEIL_F64] = "ceil";
> +  Names[RTLIB::CEIL_F80] = "ceill";
> +  Names[RTLIB::CEIL_F128] = "ceill";
> +  Names[RTLIB::CEIL_PPCF128] = "ceill";
> +  Names[RTLIB::TRUNC_F32] = "truncf";
> +  Names[RTLIB::TRUNC_F64] = "trunc";
> +  Names[RTLIB::TRUNC_F80] = "truncl";
> +  Names[RTLIB::TRUNC_F128] = "truncl";
> +  Names[RTLIB::TRUNC_PPCF128] = "truncl";
> +  Names[RTLIB::RINT_F32] = "rintf";
> +  Names[RTLIB::RINT_F64] = "rint";
> +  Names[RTLIB::RINT_F80] = "rintl";
> +  Names[RTLIB::RINT_F128] = "rintl";
> +  Names[RTLIB::RINT_PPCF128] = "rintl";
> +  Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
> +  Names[RTLIB::NEARBYINT_F64] = "nearbyint";
> +  Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
> +  Names[RTLIB::NEARBYINT_F128] = "nearbyintl";
> +  Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
> +  Names[RTLIB::FLOOR_F32] = "floorf";
> +  Names[RTLIB::FLOOR_F64] = "floor";
> +  Names[RTLIB::FLOOR_F80] = "floorl";
> +  Names[RTLIB::FLOOR_F128] = "floorl";
> +  Names[RTLIB::FLOOR_PPCF128] = "floorl";
> +  Names[RTLIB::COPYSIGN_F32] = "copysignf";
> +  Names[RTLIB::COPYSIGN_F64] = "copysign";
> +  Names[RTLIB::COPYSIGN_F80] = "copysignl";
> +  Names[RTLIB::COPYSIGN_F128] = "copysignl";
> +  Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
> +  Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2";
> +  Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2";
> +  Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
> +  Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
> +  Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
> +  Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
> +  Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
> +  Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
> +  Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
> +  Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
> +  Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2";
> +  Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
> +  Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi";
> +  Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi";
> +  Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
> +  Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
> +  Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
> +  Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi";
> +  Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi";
> +  Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
> +  Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
> +  Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
> +  Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
> +  Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
> +  Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
> +  Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi";
> +  Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi";
> +  Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti";
> +  Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
> +  Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
> +  Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
> +  Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi";
> +  Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi";
> +  Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
> +  Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
> +  Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
> +  Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi";
> +  Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi";
> +  Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
> +  Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
> +  Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
> +  Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
> +  Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
> +  Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
> +  Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi";
> +  Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi";
> +  Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti";
> +  Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
> +  Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
> +  Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
> +  Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
> +  Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
> +  Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
> +  Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf";
> +  Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf";
> +  Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
> +  Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
> +  Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
> +  Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf";
> +  Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
> +  Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
> +  Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
> +  Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
> +  Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf";
> +  Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
> +  Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
> +  Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
> +  Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
> +  Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf";
> +  Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf";
> +  Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
> +  Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
> +  Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
> +  Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf";
> +  Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
> +  Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
> +  Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
> +  Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
> +  Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf";
> +  Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
> +  Names[RTLIB::OEQ_F32] = "__eqsf2";
> +  Names[RTLIB::OEQ_F64] = "__eqdf2";
> +  Names[RTLIB::OEQ_F128] = "__eqtf2";
> +  Names[RTLIB::UNE_F32] = "__nesf2";
> +  Names[RTLIB::UNE_F64] = "__nedf2";
> +  Names[RTLIB::UNE_F128] = "__netf2";
> +  Names[RTLIB::OGE_F32] = "__gesf2";
> +  Names[RTLIB::OGE_F64] = "__gedf2";
> +  Names[RTLIB::OGE_F128] = "__getf2";
> +  Names[RTLIB::OLT_F32] = "__ltsf2";
> +  Names[RTLIB::OLT_F64] = "__ltdf2";
> +  Names[RTLIB::OLT_F128] = "__lttf2";
> +  Names[RTLIB::OLE_F32] = "__lesf2";
> +  Names[RTLIB::OLE_F64] = "__ledf2";
> +  Names[RTLIB::OLE_F128] = "__letf2";
> +  Names[RTLIB::OGT_F32] = "__gtsf2";
> +  Names[RTLIB::OGT_F64] = "__gtdf2";
> +  Names[RTLIB::OGT_F128] = "__gttf2";
> +  Names[RTLIB::UO_F32] = "__unordsf2";
> +  Names[RTLIB::UO_F64] = "__unorddf2";
> +  Names[RTLIB::UO_F128] = "__unordtf2";
> +  Names[RTLIB::O_F32] = "__unordsf2";
> +  Names[RTLIB::O_F64] = "__unorddf2";
> +  Names[RTLIB::O_F128] = "__unordtf2";
> +  Names[RTLIB::MEMCPY] = "memcpy";
> +  Names[RTLIB::MEMMOVE] = "memmove";
> +  Names[RTLIB::MEMSET] = "memset";
> +  Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
> +  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] =
> "__sync_val_compare_and_swap_1";
> +  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] =
> "__sync_val_compare_and_swap_2";
> +  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] =
> "__sync_val_compare_and_swap_4";
> +  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] =
> "__sync_val_compare_and_swap_8";
> +  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
> +  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
> +  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
> +  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
> +  Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
> +  Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
> +  Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
> +  Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
> +  Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
> +  Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
> +  Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
> +  Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
> +  Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
> +  Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
> +  Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
> +  Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
> +  Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
> +  Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
> +  Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
> +  Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
> +  Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
> +  Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
> +  Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4";
> +  Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
> +  Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
> +  Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
> +  Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
> +  Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
> +}
> +
> +/// InitLibcallCallingConvs - Set default libcall CallingConvs.
> +///
> +static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
> +  for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
> +    CCs[i] = CallingConv::C;
> +  }
> +}
> +
> +/// getFPEXT - Return the FPEXT_*_* value for the given types, or
> +/// UNKNOWN_LIBCALL if there is none.
> +RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
> +  if (OpVT == MVT::f32) {
> +    if (RetVT == MVT::f64)
> +      return FPEXT_F32_F64;
> +    if (RetVT == MVT::f128)
> +      return FPEXT_F32_F128;
> +  } else if (OpVT == MVT::f64) {
> +    if (RetVT == MVT::f128)
> +      return FPEXT_F64_F128;
> +  }
> +
> +  return UNKNOWN_LIBCALL;
> +}
> +
> +/// getFPROUND - Return the FPROUND_*_* value for the given types, or
> +/// UNKNOWN_LIBCALL if there is none.
> +RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
> +  if (RetVT == MVT::f32) {
> +    if (OpVT == MVT::f64)
> +      return FPROUND_F64_F32;
> +    if (OpVT == MVT::f80)
> +      return FPROUND_F80_F32;
> +    if (OpVT == MVT::f128)
> +      return FPROUND_F128_F32;
> +    if (OpVT == MVT::ppcf128)
> +      return FPROUND_PPCF128_F32;
> +  } else if (RetVT == MVT::f64) {
> +    if (OpVT == MVT::f80)
> +      return FPROUND_F80_F64;
> +    if (OpVT == MVT::f128)
> +      return FPROUND_F128_F64;
> +    if (OpVT == MVT::ppcf128)
> +      return FPROUND_PPCF128_F64;
> +  }
> +
> +  return UNKNOWN_LIBCALL;
> +}
> +
> +/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
> +/// UNKNOWN_LIBCALL if there is none.
> +RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
> +  if (OpVT == MVT::f32) {
> +    if (RetVT == MVT::i8)
> +      return FPTOSINT_F32_I8;
> +    if (RetVT == MVT::i16)
> +      return FPTOSINT_F32_I16;
> +    if (RetVT == MVT::i32)
> +      return FPTOSINT_F32_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOSINT_F32_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOSINT_F32_I128;
> +  } else if (OpVT == MVT::f64) {
> +    if (RetVT == MVT::i8)
> +      return FPTOSINT_F64_I8;
> +    if (RetVT == MVT::i16)
> +      return FPTOSINT_F64_I16;
> +    if (RetVT == MVT::i32)
> +      return FPTOSINT_F64_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOSINT_F64_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOSINT_F64_I128;
> +  } else if (OpVT == MVT::f80) {
> +    if (RetVT == MVT::i32)
> +      return FPTOSINT_F80_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOSINT_F80_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOSINT_F80_I128;
> +  } else if (OpVT == MVT::f128) {
> +    if (RetVT == MVT::i32)
> +      return FPTOSINT_F128_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOSINT_F128_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOSINT_F128_I128;
> +  } else if (OpVT == MVT::ppcf128) {
> +    if (RetVT == MVT::i32)
> +      return FPTOSINT_PPCF128_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOSINT_PPCF128_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOSINT_PPCF128_I128;
> +  }
> +  return UNKNOWN_LIBCALL;
> +}
> +
> +/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
> +/// UNKNOWN_LIBCALL if there is none.
> +RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
> +  if (OpVT == MVT::f32) {
> +    if (RetVT == MVT::i8)
> +      return FPTOUINT_F32_I8;
> +    if (RetVT == MVT::i16)
> +      return FPTOUINT_F32_I16;
> +    if (RetVT == MVT::i32)
> +      return FPTOUINT_F32_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOUINT_F32_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOUINT_F32_I128;
> +  } else if (OpVT == MVT::f64) {
> +    if (RetVT == MVT::i8)
> +      return FPTOUINT_F64_I8;
> +    if (RetVT == MVT::i16)
> +      return FPTOUINT_F64_I16;
> +    if (RetVT == MVT::i32)
> +      return FPTOUINT_F64_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOUINT_F64_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOUINT_F64_I128;
> +  } else if (OpVT == MVT::f80) {
> +    if (RetVT == MVT::i32)
> +      return FPTOUINT_F80_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOUINT_F80_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOUINT_F80_I128;
> +  } else if (OpVT == MVT::f128) {
> +    if (RetVT == MVT::i32)
> +      return FPTOUINT_F128_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOUINT_F128_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOUINT_F128_I128;
> +  } else if (OpVT == MVT::ppcf128) {
> +    if (RetVT == MVT::i32)
> +      return FPTOUINT_PPCF128_I32;
> +    if (RetVT == MVT::i64)
> +      return FPTOUINT_PPCF128_I64;
> +    if (RetVT == MVT::i128)
> +      return FPTOUINT_PPCF128_I128;
> +  }
> +  return UNKNOWN_LIBCALL;
> +}
> +
> +/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
> +/// UNKNOWN_LIBCALL if there is none.
> +RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
> +  if (OpVT == MVT::i32) {
> +    if (RetVT == MVT::f32)
> +      return SINTTOFP_I32_F32;
> +    if (RetVT == MVT::f64)
> +      return SINTTOFP_I32_F64;
> +    if (RetVT == MVT::f80)
> +      return SINTTOFP_I32_F80;
> +    if (RetVT == MVT::f128)
> +      return SINTTOFP_I32_F128;
> +    if (RetVT == MVT::ppcf128)
> +      return SINTTOFP_I32_PPCF128;
> +  } else if (OpVT == MVT::i64) {
> +    if (RetVT == MVT::f32)
> +      return SINTTOFP_I64_F32;
> +    if (RetVT == MVT::f64)
> +      return SINTTOFP_I64_F64;
> +    if (RetVT == MVT::f80)
> +      return SINTTOFP_I64_F80;
> +    if (RetVT == MVT::f128)
> +      return SINTTOFP_I64_F128;
> +    if (RetVT == MVT::ppcf128)
> +      return SINTTOFP_I64_PPCF128;
> +  } else if (OpVT == MVT::i128) {
> +    if (RetVT == MVT::f32)
> +      return SINTTOFP_I128_F32;
> +    if (RetVT == MVT::f64)
> +      return SINTTOFP_I128_F64;
> +    if (RetVT == MVT::f80)
> +      return SINTTOFP_I128_F80;
> +    if (RetVT == MVT::f128)
> +      return SINTTOFP_I128_F128;
> +    if (RetVT == MVT::ppcf128)
> +      return SINTTOFP_I128_PPCF128;
> +  }
> +  return UNKNOWN_LIBCALL;
> +}
> +
> +/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
> +/// UNKNOWN_LIBCALL if there is none.
> +RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
> +  if (OpVT == MVT::i32) {
> +    if (RetVT == MVT::f32)
> +      return UINTTOFP_I32_F32;
> +    if (RetVT == MVT::f64)
> +      return UINTTOFP_I32_F64;
> +    if (RetVT == MVT::f80)
> +      return UINTTOFP_I32_F80;
> +    if (RetVT == MVT::f128)
> +      return UINTTOFP_I32_F128;
> +    if (RetVT == MVT::ppcf128)
> +      return UINTTOFP_I32_PPCF128;
> +  } else if (OpVT == MVT::i64) {
> +    if (RetVT == MVT::f32)
> +      return UINTTOFP_I64_F32;
> +    if (RetVT == MVT::f64)
> +      return UINTTOFP_I64_F64;
> +    if (RetVT == MVT::f80)
> +      return UINTTOFP_I64_F80;
> +    if (RetVT == MVT::f128)
> +      return UINTTOFP_I64_F128;
> +    if (RetVT == MVT::ppcf128)
> +      return UINTTOFP_I64_PPCF128;
> +  } else if (OpVT == MVT::i128) {
> +    if (RetVT == MVT::f32)
> +      return UINTTOFP_I128_F32;
> +    if (RetVT == MVT::f64)
> +      return UINTTOFP_I128_F64;
> +    if (RetVT == MVT::f80)
> +      return UINTTOFP_I128_F80;
> +    if (RetVT == MVT::f128)
> +      return UINTTOFP_I128_F128;
> +    if (RetVT == MVT::ppcf128)
> +      return UINTTOFP_I128_PPCF128;
> +  }
> +  return UNKNOWN_LIBCALL;
> +}
> +
> +/// InitCmpLibcallCCs - Set default comparison libcall CC.
> +///
> +static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
> +  memset(CCs, ISD::SETCC_INVALID,
> sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
> +  CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
> +  CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
> +  CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
> +  CCs[RTLIB::UNE_F32] = ISD::SETNE;
> +  CCs[RTLIB::UNE_F64] = ISD::SETNE;
> +  CCs[RTLIB::UNE_F128] = ISD::SETNE;
> +  CCs[RTLIB::OGE_F32] = ISD::SETGE;
> +  CCs[RTLIB::OGE_F64] = ISD::SETGE;
> +  CCs[RTLIB::OGE_F128] = ISD::SETGE;
> +  CCs[RTLIB::OLT_F32] = ISD::SETLT;
> +  CCs[RTLIB::OLT_F64] = ISD::SETLT;
> +  CCs[RTLIB::OLT_F128] = ISD::SETLT;
> +  CCs[RTLIB::OLE_F32] = ISD::SETLE;
> +  CCs[RTLIB::OLE_F64] = ISD::SETLE;
> +  CCs[RTLIB::OLE_F128] = ISD::SETLE;
> +  CCs[RTLIB::OGT_F32] = ISD::SETGT;
> +  CCs[RTLIB::OGT_F64] = ISD::SETGT;
> +  CCs[RTLIB::OGT_F128] = ISD::SETGT;
> +  CCs[RTLIB::UO_F32] = ISD::SETNE;
> +  CCs[RTLIB::UO_F64] = ISD::SETNE;
> +  CCs[RTLIB::UO_F128] = ISD::SETNE;
> +  CCs[RTLIB::O_F32] = ISD::SETEQ;
> +  CCs[RTLIB::O_F64] = ISD::SETEQ;
> +  CCs[RTLIB::O_F128] = ISD::SETEQ;
> +}
> +
> +/// NOTE: The constructor takes ownership of TLOF.
> +TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
> +                                       const TargetLoweringObjectFile
> *tlof)
> +  : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
> +  // All operations default to being supported.
> +  memset(OpActions, 0, sizeof(OpActions));
> +  memset(LoadExtActions, 0, sizeof(LoadExtActions));
> +  memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
> +  memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
> +  memset(CondCodeActions, 0, sizeof(CondCodeActions));
> +
> +  // Set default actions for various operations.
> +  for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
> +    // Default all indexed load / store to expand.
> +    for (unsigned IM = (unsigned)ISD::PRE_INC;
> +         IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
> +      setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand);
> +      setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
> +    }
> +
> +    // These operations default to expand.
> +    setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
> +    setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT,
> Expand);
> +  }
> +
> +  // Most targets ignore the @llvm.prefetch intrinsic.
> +  setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
> +
> +  // ConstantFP nodes default to expand.  Targets can either change this
> to
> +  // Legal, in which case all fp constants are legal, or use
> isFPImmLegal()
> +  // to optimize expansions for certain constants.
> +  setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
> +  setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
> +  setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
> +  setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
> +  setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
> +
> +  // These library functions default to expand.
> +  setOperationAction(ISD::FLOG ,  MVT::f16, Expand);
> +  setOperationAction(ISD::FLOG2,  MVT::f16, Expand);
> +  setOperationAction(ISD::FLOG10, MVT::f16, Expand);
> +  setOperationAction(ISD::FEXP ,  MVT::f16, Expand);
> +  setOperationAction(ISD::FEXP2,  MVT::f16, Expand);
> +  setOperationAction(ISD::FFLOOR, MVT::f16, Expand);
> +  setOperationAction(ISD::FNEARBYINT, MVT::f16, Expand);
> +  setOperationAction(ISD::FCEIL,  MVT::f16, Expand);
> +  setOperationAction(ISD::FRINT,  MVT::f16, Expand);
> +  setOperationAction(ISD::FTRUNC, MVT::f16, Expand);
> +  setOperationAction(ISD::FLOG ,  MVT::f32, Expand);
> +  setOperationAction(ISD::FLOG2,  MVT::f32, Expand);
> +  setOperationAction(ISD::FLOG10, MVT::f32, Expand);
> +  setOperationAction(ISD::FEXP ,  MVT::f32, Expand);
> +  setOperationAction(ISD::FEXP2,  MVT::f32, Expand);
> +  setOperationAction(ISD::FFLOOR, MVT::f32, Expand);
> +  setOperationAction(ISD::FNEARBYINT, MVT::f32, Expand);
> +  setOperationAction(ISD::FCEIL,  MVT::f32, Expand);
> +  setOperationAction(ISD::FRINT,  MVT::f32, Expand);
> +  setOperationAction(ISD::FTRUNC, MVT::f32, Expand);
> +  setOperationAction(ISD::FLOG ,  MVT::f64, Expand);
> +  setOperationAction(ISD::FLOG2,  MVT::f64, Expand);
> +  setOperationAction(ISD::FLOG10, MVT::f64, Expand);
> +  setOperationAction(ISD::FEXP ,  MVT::f64, Expand);
> +  setOperationAction(ISD::FEXP2,  MVT::f64, Expand);
> +  setOperationAction(ISD::FFLOOR, MVT::f64, Expand);
> +  setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
> +  setOperationAction(ISD::FCEIL,  MVT::f64, Expand);
> +  setOperationAction(ISD::FRINT,  MVT::f64, Expand);
> +  setOperationAction(ISD::FTRUNC, MVT::f64, Expand);
> +  setOperationAction(ISD::FLOG ,  MVT::f128, Expand);
> +  setOperationAction(ISD::FLOG2,  MVT::f128, Expand);
> +  setOperationAction(ISD::FLOG10, MVT::f128, Expand);
> +  setOperationAction(ISD::FEXP ,  MVT::f128, Expand);
> +  setOperationAction(ISD::FEXP2,  MVT::f128, Expand);
> +  setOperationAction(ISD::FFLOOR, MVT::f128, Expand);
> +  setOperationAction(ISD::FNEARBYINT, MVT::f128, Expand);
> +  setOperationAction(ISD::FCEIL,  MVT::f128, Expand);
> +  setOperationAction(ISD::FRINT,  MVT::f128, Expand);
> +  setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
> +
> +  // Default ISD::TRAP to expand (which turns it into abort).
> +  setOperationAction(ISD::TRAP, MVT::Other, Expand);
> +
> +  // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
> +  // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
> +  //
> +  setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
> +
> +  IsLittleEndian = TD->isLittleEndian();
> +  PointerTy = MVT::getIntegerVT(8*TD->getPointerSize(0));
> +  memset(RegClassForVT,
> 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
> +  memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
> +  maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
> +  maxStoresPerMemsetOptSize = maxStoresPerMemcpyOptSize
> +    = maxStoresPerMemmoveOptSize = 4;
> +  benefitFromCodePlacementOpt = false;
> +  UseUnderscoreSetJmp = false;
> +  UseUnderscoreLongJmp = false;
> +  SelectIsExpensive = false;
> +  IntDivIsCheap = false;
> +  Pow2DivIsCheap = false;
> +  JumpIsExpensive = false;
> +  predictableSelectIsExpensive = false;
> +  StackPointerRegisterToSaveRestore = 0;
> +  ExceptionPointerRegister = 0;
> +  ExceptionSelectorRegister = 0;
> +  BooleanContents = UndefinedBooleanContent;
> +  BooleanVectorContents = UndefinedBooleanContent;
> +  SchedPreferenceInfo = Sched::ILP;
> +  JumpBufSize = 0;
> +  JumpBufAlignment = 0;
> +  MinFunctionAlignment = 0;
> +  PrefFunctionAlignment = 0;
> +  PrefLoopAlignment = 0;
> +  MinStackArgumentAlignment = 1;
> +  ShouldFoldAtomicFences = false;
> +  InsertFencesForAtomic = false;
> +  SupportJumpTables = true;
> +  MinimumJumpTableEntries = 4;
> +
> +  InitLibcallNames(LibcallRoutineNames);
> +  InitCmpLibcallCCs(CmpLibcallCCs);
> +  InitLibcallCallingConvs(LibcallCallingConvs);
> +}
> +
> +TargetLoweringBase::~TargetLoweringBase() {
> +  delete &TLOF;
> +}
> +
> +MVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const {
> +  return MVT::getIntegerVT(8*TD->getPointerSize(0));
> +}
> +
> +/// canOpTrap - Returns true if the operation can trap for the value type.
> +/// VT must be a legal type.
> +bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
> +  assert(isTypeLegal(VT));
> +  switch (Op) {
> +  default:
> +    return false;
> +  case ISD::FDIV:
> +  case ISD::FREM:
> +  case ISD::SDIV:
> +  case ISD::UDIV:
> +  case ISD::SREM:
> +  case ISD::UREM:
> +    return true;
> +  }
> +}
> +
> +
> +static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
> +                                          unsigned &NumIntermediates,
> +                                          MVT &RegisterVT,
> +                                          TargetLoweringBase *TLI) {
> +  // Figure out the right, legal destination reg to copy into.
> +  unsigned NumElts = VT.getVectorNumElements();
> +  MVT EltTy = VT.getVectorElementType();
> +
> +  unsigned NumVectorRegs = 1;
> +
> +  // FIXME: We don't support non-power-of-2-sized vectors for now.
>  Ideally we
> +  // could break down into LHS/RHS like LegalizeDAG does.
> +  if (!isPowerOf2_32(NumElts)) {
> +    NumVectorRegs = NumElts;
> +    NumElts = 1;
> +  }
> +
> +  // Divide the input until we get to a supported size.  This will always
> +  // end with a scalar if the target doesn't support vectors.
> +  while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy,
> NumElts))) {
> +    NumElts >>= 1;
> +    NumVectorRegs <<= 1;
> +  }
> +
> +  NumIntermediates = NumVectorRegs;
> +
> +  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
> +  if (!TLI->isTypeLegal(NewVT))
> +    NewVT = EltTy;
> +  IntermediateVT = NewVT;
> +
> +  unsigned NewVTSize = NewVT.getSizeInBits();
> +
> +  // Convert sizes such as i33 to i64.
> +  if (!isPowerOf2_32(NewVTSize))
> +    NewVTSize = NextPowerOf2(NewVTSize);
> +
> +  MVT DestVT = TLI->getRegisterType(NewVT);
> +  RegisterVT = DestVT;
> +  if (EVT(DestVT).bitsLT(NewVT))    // Value is expanded, e.g. i64 -> i16.
> +    return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
> +
> +  // Otherwise, promotion or legal types use the same number of registers
> as
> +  // the vector decimated to the appropriate level.
> +  return NumVectorRegs;
> +}
> +
> +/// isLegalRC - Return true if the value types that can be represented by
> the
> +/// specified register class are all legal.
> +bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const {
> +  for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E =
> RC->vt_end();
> +       I != E; ++I) {
> +    if (isTypeLegal(*I))
> +      return true;
> +  }
> +  return false;
> +}
> +
> +/// findRepresentativeClass - Return the largest legal super-reg register
> class
> +/// of the register class for the specified type and its associated
> "cost".
> +std::pair<const TargetRegisterClass*, uint8_t>
> +TargetLoweringBase::findRepresentativeClass(MVT VT) const {
> +  const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
> +  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
> +  if (!RC)
> +    return std::make_pair(RC, 0);
> +
> +  // Compute the set of all super-register classes.
> +  BitVector SuperRegRC(TRI->getNumRegClasses());
> +  for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
> +    SuperRegRC.setBitsInMask(RCI.getMask());
> +
> +  // Find the first legal register class with the largest spill size.
> +  const TargetRegisterClass *BestRC = RC;
> +  for (int i = SuperRegRC.find_first(); i >= 0; i =
> SuperRegRC.find_next(i)) {
> +    const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
> +    // We want the largest possible spill size.
> +    if (SuperRC->getSize() <= BestRC->getSize())
> +      continue;
> +    if (!isLegalRC(SuperRC))
> +      continue;
> +    BestRC = SuperRC;
> +  }
> +  return std::make_pair(BestRC, 1);
> +}
> +
> +/// computeRegisterProperties - Once all of the register classes are
> added,
> +/// this allows us to compute derived properties we expose.
> +void TargetLoweringBase::computeRegisterProperties() {
> +  assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE &&
> +         "Too many value types for ValueTypeActions to hold!");
> +
> +  // Everything defaults to needing one register.
> +  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
> +    NumRegistersForVT[i] = 1;
> +    RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
> +  }
> +  // ...except isVoid, which doesn't need any registers.
> +  NumRegistersForVT[MVT::isVoid] = 0;
> +
> +  // Find the largest integer register class.
> +  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
> +  for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
> +    assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
> +
> +  // Every integer value type larger than this largest register takes
> twice as
> +  // many registers to represent as the previous ValueType.
> +  for (unsigned ExpandedReg = LargestIntReg + 1;
> +       ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
> +    NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
> +    RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
> +    TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg -
> 1);
> +    ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
> +                                   TypeExpandInteger);
> +  }
> +
> +  // Inspect all of the ValueType's smaller than the largest integer
> +  // register to see which ones need promotion.
> +  unsigned LegalIntReg = LargestIntReg;
> +  for (unsigned IntReg = LargestIntReg - 1;
> +       IntReg >= (unsigned)MVT::i1; --IntReg) {
> +    MVT IVT = (MVT::SimpleValueType)IntReg;
> +    if (isTypeLegal(IVT)) {
> +      LegalIntReg = IntReg;
> +    } else {
> +      RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
> +        (const MVT::SimpleValueType)LegalIntReg;
> +      ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
> +    }
> +  }
> +
> +  // ppcf128 type is really two f64's.
> +  if (!isTypeLegal(MVT::ppcf128)) {
> +    NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
> +    RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
> +    TransformToType[MVT::ppcf128] = MVT::f64;
> +    ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
> +  }
> +
> +  // Decide how to handle f64. If the target does not have native f64
> support,
> +  // expand it to i64 and we will be generating soft float library calls.
> +  if (!isTypeLegal(MVT::f64)) {
> +    NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
> +    RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
> +    TransformToType[MVT::f64] = MVT::i64;
> +    ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
> +  }
> +
> +  // Decide how to handle f32. If the target does not have native support
> for
> +  // f32, promote it to f64 if it is legal. Otherwise, expand it to i32.
> +  if (!isTypeLegal(MVT::f32)) {
> +    if (isTypeLegal(MVT::f64)) {
> +      NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64];
> +      RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64];
> +      TransformToType[MVT::f32] = MVT::f64;
> +      ValueTypeActions.setTypeAction(MVT::f32, TypePromoteInteger);
> +    } else {
> +      NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
> +      RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
> +      TransformToType[MVT::f32] = MVT::i32;
> +      ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
> +    }
> +  }
> +
> +  // Loop over all of the vector value types to see which need
> transformations.
> +  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
> +       i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
> +    MVT VT = (MVT::SimpleValueType)i;
> +    if (isTypeLegal(VT)) continue;
> +
> +    // Determine if there is a legal wider type.  If so, we should
> promote to
> +    // that wider vector type.
> +    MVT EltVT = VT.getVectorElementType();
> +    unsigned NElts = VT.getVectorNumElements();
> +    if (NElts != 1 && !shouldSplitVectorElementType(EltVT)) {
> +      bool IsLegalWiderType = false;
> +      // First try to promote the elements of integer vectors. If no legal
> +      // promotion was found, fallback to the widen-vector method.
> +      for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
> +        MVT SVT = (MVT::SimpleValueType)nVT;
> +        // Promote vectors of integers to vectors with the same number
> +        // of elements, with a wider element type.
> +        if (SVT.getVectorElementType().getSizeInBits() >
> EltVT.getSizeInBits()
> +            && SVT.getVectorNumElements() == NElts &&
> +            isTypeLegal(SVT) && SVT.getScalarType().isInteger()) {
> +          TransformToType[i] = SVT;
> +          RegisterTypeForVT[i] = SVT;
> +          NumRegistersForVT[i] = 1;
> +          ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
> +          IsLegalWiderType = true;
> +          break;
> +        }
> +      }
> +
> +      if (IsLegalWiderType) continue;
> +
> +      // Try to widen the vector.
> +      for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
> +        MVT SVT = (MVT::SimpleValueType)nVT;
> +        if (SVT.getVectorElementType() == EltVT &&
> +            SVT.getVectorNumElements() > NElts &&
> +            isTypeLegal(SVT)) {
> +          TransformToType[i] = SVT;
> +          RegisterTypeForVT[i] = SVT;
> +          NumRegistersForVT[i] = 1;
> +          ValueTypeActions.setTypeAction(VT, TypeWidenVector);
> +          IsLegalWiderType = true;
> +          break;
> +        }
> +      }
> +      if (IsLegalWiderType) continue;
> +    }
> +
> +    MVT IntermediateVT;
> +    MVT RegisterVT;
> +    unsigned NumIntermediates;
> +    NumRegistersForVT[i] =
> +      getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
> +                                RegisterVT, this);
> +    RegisterTypeForVT[i] = RegisterVT;
> +
> +    MVT NVT = VT.getPow2VectorType();
> +    if (NVT == VT) {
> +      // Type is already a power of 2.  The default action is to split.
> +      TransformToType[i] = MVT::Other;
> +      unsigned NumElts = VT.getVectorNumElements();
> +      ValueTypeActions.setTypeAction(VT,
> +            NumElts > 1 ? TypeSplitVector : TypeScalarizeVector);
> +    } else {
> +      TransformToType[i] = NVT;
> +      ValueTypeActions.setTypeAction(VT, TypeWidenVector);
> +    }
> +  }
> +
> +  // Determine the 'representative' register class for each value type.
> +  // An representative register class is the largest (meaning one which is
> +  // not a sub-register class / subreg register class) legal register
> class for
> +  // a group of value types. For example, on i386, i8, i16, and i32
> +  // representative would be GR32; while on x86_64 it's GR64.
> +  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
> +    const TargetRegisterClass* RRC;
> +    uint8_t Cost;
> +    tie(RRC, Cost) =  findRepresentativeClass((MVT::SimpleValueType)i);
> +    RepRegClassForVT[i] = RRC;
> +    RepRegClassCostForVT[i] = Cost;
> +  }
> +}
> +
> +EVT TargetLoweringBase::getSetCCResultType(EVT VT) const {
> +  assert(!VT.isVector() && "No default SetCC type for vectors!");
> +  return getPointerTy(0).SimpleTy;
> +}
> +
> +MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
> +  return MVT::i32; // return the default value
> +}
> +
> +/// getVectorTypeBreakdown - Vector types are broken down into some
> number of
> +/// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
> +/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP
> stack.
> +/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and
> X86.
> +///
> +/// This method returns the number of registers needed, and the VT for
> each
> +/// register.  It also returns the VT and quantity of the intermediate
> values
> +/// before they are promoted/expanded.
> +///
> +unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context,
> EVT VT,
> +                                                EVT &IntermediateVT,
> +                                                unsigned
> &NumIntermediates,
> +                                                MVT &RegisterVT) const {
> +  unsigned NumElts = VT.getVectorNumElements();
> +
> +  // If there is a wider vector type with the same element type as this
> one,
> +  // or a promoted vector type that has the same number of elements which
> +  // are wider, then we should convert to that legal vector type.
> +  // This handles things like <2 x float> -> <4 x float> and
> +  // <4 x i1> -> <4 x i32>.
> +  LegalizeTypeAction TA = getTypeAction(Context, VT);
> +  if (NumElts != 1 && (TA == TypeWidenVector || TA ==
> TypePromoteInteger)) {
> +    EVT RegisterEVT = getTypeToTransformTo(Context, VT);
> +    if (isTypeLegal(RegisterEVT)) {
> +      IntermediateVT = RegisterEVT;
> +      RegisterVT = RegisterEVT.getSimpleVT();
> +      NumIntermediates = 1;
> +      return 1;
> +    }
> +  }
> +
> +  // Figure out the right, legal destination reg to copy into.
> +  EVT EltTy = VT.getVectorElementType();
> +
> +  unsigned NumVectorRegs = 1;
> +
> +  // FIXME: We don't support non-power-of-2-sized vectors for now.
>  Ideally we
> +  // could break down into LHS/RHS like LegalizeDAG does.
> +  if (!isPowerOf2_32(NumElts)) {
> +    NumVectorRegs = NumElts;
> +    NumElts = 1;
> +  }
> +
> +  // Divide the input until we get to a supported size.  This will always
> +  // end with a scalar if the target doesn't support vectors.
> +  while (NumElts > 1 && !isTypeLegal(
> +                                   EVT::getVectorVT(Context, EltTy,
> NumElts))) {
> +    NumElts >>= 1;
> +    NumVectorRegs <<= 1;
> +  }
> +
> +  NumIntermediates = NumVectorRegs;
> +
> +  EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
> +  if (!isTypeLegal(NewVT))
> +    NewVT = EltTy;
> +  IntermediateVT = NewVT;
> +
> +  MVT DestVT = getRegisterType(Context, NewVT);
> +  RegisterVT = DestVT;
> +  unsigned NewVTSize = NewVT.getSizeInBits();
> +
> +  // Convert sizes such as i33 to i64.
> +  if (!isPowerOf2_32(NewVTSize))
> +    NewVTSize = NextPowerOf2(NewVTSize);
> +
> +  if (EVT(DestVT).bitsLT(NewVT))   // Value is expanded, e.g. i64 -> i16.
> +    return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
> +
> +  // Otherwise, promotion or legal types use the same number of registers
> as
> +  // the vector decimated to the appropriate level.
> +  return NumVectorRegs;
> +}
> +
> +/// Get the EVTs and ArgFlags collections that represent the legalized
> return
> +/// type of the given function.  This does not require a DAG or a return
> value,
> +/// and is suitable for use before any DAGs for the function are
> constructed.
> +/// TODO: Move this out of TargetLowering.cpp.
> +void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr,
> +                         SmallVectorImpl<ISD::OutputArg> &Outs,
> +                         const TargetLowering &TLI) {
> +  SmallVector<EVT, 4> ValueVTs;
> +  ComputeValueVTs(TLI, ReturnType, ValueVTs);
> +  unsigned NumValues = ValueVTs.size();
> +  if (NumValues == 0) return;
> +
> +  for (unsigned j = 0, f = NumValues; j != f; ++j) {
> +    EVT VT = ValueVTs[j];
> +    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
> +
> +    if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
> +      ExtendKind = ISD::SIGN_EXTEND;
> +    else if (attr.hasAttribute(AttributeSet::ReturnIndex,
> Attribute::ZExt))
> +      ExtendKind = ISD::ZERO_EXTEND;
> +
> +    // FIXME: C calling convention requires the return type to be
> promoted to
> +    // at least 32-bit. But this is not necessary for non-C calling
> +    // conventions. The frontend should mark functions whose return values
> +    // require promoting with signext or zeroext attributes.
> +    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
> +      MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
> +      if (VT.bitsLT(MinVT))
> +        VT = MinVT;
> +    }
> +
> +    unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
> +    MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
> +
> +    // 'inreg' on function refers to return value
> +    ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
> +    if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg))
> +      Flags.setInReg();
> +
> +    // Propagate extension type if any
> +    if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
> +      Flags.setSExt();
> +    else if (attr.hasAttribute(AttributeSet::ReturnIndex,
> Attribute::ZExt))
> +      Flags.setZExt();
> +
> +    for (unsigned i = 0; i < NumParts; ++i)
> +      Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true, 0,
> 0));
> +  }
> +}
> +
> +/// getByValTypeAlignment - Return the desired alignment for ByVal
> aggregate
> +/// function arguments in the caller parameter area.  This is the actual
> +/// alignment, not its logarithm.
> +unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const {
> +  return TD->getCallFrameTypeAlignment(Ty);
> +}
> +
>
> +//===----------------------------------------------------------------------===//
> +//  TargetTransformInfo Helpers
>
> +//===----------------------------------------------------------------------===//
> +
> +int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
> +  enum InstructionOpcodes {
> +#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
> +#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
> +#include "llvm/IR/Instruction.def"
> +  };
> +  switch (static_cast<InstructionOpcodes>(Opcode)) {
> +  case Ret:            return 0;
> +  case Br:             return 0;
> +  case Switch:         return 0;
> +  case IndirectBr:     return 0;
> +  case Invoke:         return 0;
> +  case Resume:         return 0;
> +  case Unreachable:    return 0;
> +  case Add:            return ISD::ADD;
> +  case FAdd:           return ISD::FADD;
> +  case Sub:            return ISD::SUB;
> +  case FSub:           return ISD::FSUB;
> +  case Mul:            return ISD::MUL;
> +  case FMul:           return ISD::FMUL;
> +  case UDiv:           return ISD::UDIV;
> +  case SDiv:           return ISD::UDIV;
> +  case FDiv:           return ISD::FDIV;
> +  case URem:           return ISD::UREM;
> +  case SRem:           return ISD::SREM;
> +  case FRem:           return ISD::FREM;
> +  case Shl:            return ISD::SHL;
> +  case LShr:           return ISD::SRL;
> +  case AShr:           return ISD::SRA;
> +  case And:            return ISD::AND;
> +  case Or:             return ISD::OR;
> +  case Xor:            return ISD::XOR;
> +  case Alloca:         return 0;
> +  case Load:           return ISD::LOAD;
> +  case Store:          return ISD::STORE;
> +  case GetElementPtr:  return 0;
> +  case Fence:          return 0;
> +  case AtomicCmpXchg:  return 0;
> +  case AtomicRMW:      return 0;
> +  case Trunc:          return ISD::TRUNCATE;
> +  case ZExt:           return ISD::ZERO_EXTEND;
> +  case SExt:           return ISD::SIGN_EXTEND;
> +  case FPToUI:         return ISD::FP_TO_UINT;
> +  case FPToSI:         return ISD::FP_TO_SINT;
> +  case UIToFP:         return ISD::UINT_TO_FP;
> +  case SIToFP:         return ISD::SINT_TO_FP;
> +  case FPTrunc:        return ISD::FP_ROUND;
> +  case FPExt:          return ISD::FP_EXTEND;
> +  case PtrToInt:       return ISD::BITCAST;
> +  case IntToPtr:       return ISD::BITCAST;
> +  case BitCast:        return ISD::BITCAST;
> +  case ICmp:           return ISD::SETCC;
> +  case FCmp:           return ISD::SETCC;
> +  case PHI:            return 0;
> +  case Call:           return 0;
> +  case Select:         return ISD::SELECT;
> +  case UserOp1:        return 0;
> +  case UserOp2:        return 0;
> +  case VAArg:          return 0;
> +  case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
> +  case InsertElement:  return ISD::INSERT_VECTOR_ELT;
> +  case ShuffleVector:  return ISD::VECTOR_SHUFFLE;
> +  case ExtractValue:   return ISD::MERGE_VALUES;
> +  case InsertValue:    return ISD::MERGE_VALUES;
> +  case LandingPad:     return 0;
> +  }
> +
> +  llvm_unreachable("Unknown instruction type encountered!");
> +}
> +
> +std::pair<unsigned, MVT>
> +TargetLoweringBase::getTypeLegalizationCost(Type *Ty) const {
> +  LLVMContext &C = Ty->getContext();
> +  EVT MTy = getValueType(Ty);
> +
> +  unsigned Cost = 1;
> +  // We keep legalizing the type until we find a legal kind. We assume
> that
> +  // the only operation that costs anything is the split. After splitting
> +  // we need to handle two types.
> +  while (true) {
> +    LegalizeKind LK = getTypeConversion(C, MTy);
> +
> +    if (LK.first == TypeLegal)
> +      return std::make_pair(Cost, MTy.getSimpleVT());
> +
> +    if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
> +      Cost *= 2;
> +
> +    // Keep legalizing the type.
> +    MTy = LK.second;
> +  }
> +}
> +
>
> +//===----------------------------------------------------------------------===//
> +//  Loop Strength Reduction hooks
>
> +//===----------------------------------------------------------------------===//
> +
> +/// isLegalAddressingMode - Return true if the addressing mode represented
> +/// by AM is legal for this target, for a load/store of the specified
> type.
> +bool TargetLoweringBase::isLegalAddressingMode(const AddrMode &AM,
> +                                           Type *Ty) const {
> +  // The default implementation of this implements a conservative RISCy,
> r+r and
> +  // r+i addr mode.
> +
> +  // Allows a sign-extended 16-bit immediate field.
> +  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
> +    return false;
> +
> +  // No global is ever allowed as a base.
> +  if (AM.BaseGV)
> +    return false;
> +
> +  // Only support r+r,
> +  switch (AM.Scale) {
> +  case 0:  // "r+i" or just "i", depending on HasBaseReg.
> +    break;
> +  case 1:
> +    if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
> +      return false;
> +    // Otherwise we have r+r or r+i.
> +    break;
> +  case 2:
> +    if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
> +      return false;
> +    // Allow 2*r as r+r.
> +    break;
> +  }
> +
> +  return true;
> +}
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20130111/852a3da4/attachment.html>


More information about the llvm-commits mailing list