[llvm-commits] [llvm] r106628 - /llvm/trunk/include/llvm/Target/TargetLowering.h

Jim Grosbach grosbach at apple.com
Wed Jun 23 08:49:19 PDT 2010


Author: grosbach
Date: Wed Jun 23 10:49:18 2010
New Revision: 106628

URL: http://llvm.org/viewvc/llvm-project?rev=106628&view=rev
Log:
remove trailing whitespace

Modified:
    llvm/trunk/include/llvm/Target/TargetLowering.h

Modified: llvm/trunk/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetLowering.h?rev=106628&r1=106627&r2=106628&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/Target/TargetLowering.h Wed Jun 23 10:49:18 2010
@@ -114,7 +114,7 @@
   /// isSelectExpensive - Return true if the select operation is expensive for
   /// this target.
   bool isSelectExpensive() const { return SelectIsExpensive; }
-  
+
   /// isIntDivCheap() - Return true if integer divide is usually cheaper than
   /// a sequence of several shifts, adds, and multiplies for this target.
   bool isIntDivCheap() const { return IntDivIsCheap; }
@@ -131,10 +131,10 @@
   virtual
   MVT::SimpleValueType getSetCCResultType(EVT VT) const;
 
-  /// getCmpLibcallReturnType - Return the ValueType for comparison 
+  /// getCmpLibcallReturnType - Return the ValueType for comparison
   /// libcalls. Comparions libcalls include floating point comparion calls,
   /// and Ordered/Unordered check calls on floating point numbers.
-  virtual 
+  virtual
   MVT::SimpleValueType getCmpLibcallReturnType() const;
 
   /// getBooleanContents - For targets without i1 registers, this gives the
@@ -208,7 +208,7 @@
       ValueTypeActions[I] = Action;
     }
   };
-  
+
   const ValueTypeActionImpl &getValueTypeActions() const {
     return ValueTypeActions;
   }
@@ -229,7 +229,7 @@
   /// returns the integer type to transform to.
   EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
     if (VT.isSimple()) {
-      assert((unsigned)VT.getSimpleVT().SimpleTy < 
+      assert((unsigned)VT.getSimpleVT().SimpleTy <
              array_lengthof(TransformToType));
       EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
       assert(getTypeAction(Context, NVT) != Promote &&
@@ -256,7 +256,7 @@
         return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
       else
         // Promote to a power of two size, avoiding multi-step promotion.
-        return getTypeAction(Context, NVT) == Promote ? 
+        return getTypeAction(Context, NVT) == Promote ?
           getTypeToTransformTo(Context, NVT) : NVT;
     }
     assert(0 && "Unsupported extended type!");
@@ -302,11 +302,11 @@
   /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If
   /// this is the case, it returns true and store the intrinsic
   /// information into the IntrinsicInfo that was passed to the function.
-  struct IntrinsicInfo { 
+  struct IntrinsicInfo {
     unsigned     opc;         // target opcode
     EVT          memVT;       // memory VT
     const Value* ptrVal;      // value representing memory location
-    int          offset;      // offset off of ptrVal 
+    int          offset;      // offset off of ptrVal
     unsigned     align;       // alignment
     bool         vol;         // is volatile?
     bool         readMem;     // reads memory?
@@ -324,7 +324,7 @@
   virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const {
     return false;
   }
-  
+
   /// isShuffleMaskLegal - Targets can use this to indicate that they only
   /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
   /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
@@ -446,7 +446,7 @@
            "Table isn't big enough!");
     unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
     return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
-  }  
+  }
 
   /// isIndexedStoreLegal - Return true if the specified indexed load is legal
   /// on this target.
@@ -492,7 +492,7 @@
 
     assert((VT.isInteger() || VT.isFloatingPoint()) &&
            "Cannot autopromote this type, add it with AddPromotedToType.");
-    
+
     EVT NVT = VT;
     do {
       NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1);
@@ -516,14 +516,14 @@
   /// function arguments in the caller parameter area.  This is the actual
   /// alignment, not its logarithm.
   virtual unsigned getByValTypeAlignment(const Type *Ty) const;
-  
+
   /// getRegisterType - Return the type of registers that this ValueType will
   /// eventually require.
   EVT getRegisterType(MVT VT) const {
     assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
     return RegisterTypeForVT[VT.SimpleTy];
   }
-  
+
   /// getRegisterType - Return the type of registers that this ValueType will
   /// eventually require.
   EVT getRegisterType(LLVMContext &Context, EVT VT) const {
@@ -606,7 +606,7 @@
   /// of the specified type. This is used, for example, in situations where an
   /// array copy/move/set is  converted to a sequence of store operations. It's
   /// use helps to ensure that such replacements don't generate code that causes
-  /// an alignment error  (trap) on the target machine. 
+  /// an alignment error  (trap) on the target machine.
   /// @brief Determine if the target supports unaligned memory accesses.
   virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
     return false;
@@ -637,7 +637,7 @@
                                   MachineFunction &MF) const {
     return MVT::Other;
   }
-  
+
   /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
   /// to implement llvm.setjmp.
   bool usesUnderscoreSetJmp() const {
@@ -701,7 +701,7 @@
   unsigned getPrefLoopAlignment() const {
     return PrefLoopAlignment;
   }
-  
+
   /// getPreIndexedAddressParts - returns true by value, base pointer and
   /// offset pointer and addressing mode by reference if the node's address
   /// can be legally represented as pre-indexed load / store address.
@@ -711,7 +711,7 @@
                                          SelectionDAG &DAG) const {
     return false;
   }
-  
+
   /// getPostIndexedAddressParts - returns true by value, base pointer and
   /// offset pointer and addressing mode by reference if this node can be
   /// combined with a load / store to form a post-indexed load / store.
@@ -721,12 +721,12 @@
                                           SelectionDAG &DAG) const {
     return false;
   }
-  
+
   /// getJumpTableEncoding - Return the entry encoding for a jump table in the
   /// current function.  The returned value is a member of the
   /// MachineJumpTableInfo::JTEntryKind enum.
   virtual unsigned getJumpTableEncoding() const;
-  
+
   virtual const MCExpr *
   LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
                             const MachineBasicBlock *MBB, unsigned uid,
@@ -734,7 +734,7 @@
     assert(0 && "Need to implement this hook if target has custom JTIs");
     return 0;
   }
-  
+
   /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
   /// jumptable.
   virtual SDValue getPICJumpTableRelocBase(SDValue Table,
@@ -746,7 +746,7 @@
   virtual const MCExpr *
   getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
                                unsigned JTI, MCContext &Ctx) const;
-  
+
   /// isOffsetFoldingLegal - Return true if folding a constant offset
   /// with the given GlobalAddress is legal.  It is frequently not legal in
   /// PIC relocation models.
@@ -758,10 +758,10 @@
   //===--------------------------------------------------------------------===//
   // TargetLowering Optimization Methods
   //
-  
+
   /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
   /// SDValues for returning information from TargetLowering to its clients
-  /// that want to combine 
+  /// that want to combine
   struct TargetLoweringOpt {
     SelectionDAG &DAG;
     bool LegalTys;
@@ -775,14 +775,14 @@
 
     bool LegalTypes() const { return LegalTys; }
     bool LegalOperations() const { return LegalOps; }
-    
-    bool CombineTo(SDValue O, SDValue N) { 
-      Old = O; 
-      New = N; 
+
+    bool CombineTo(SDValue O, SDValue N) {
+      Old = O;
+      New = N;
       return true;
     }
-    
-    /// ShrinkDemandedConstant - Check to see if the specified operand of the 
+
+    /// ShrinkDemandedConstant - Check to see if the specified operand of the
     /// specified instruction is a constant integer.  If so, check to see if
     /// there are any bits set in the constant that are not demanded.  If so,
     /// shrink the constant and return true.
@@ -795,25 +795,25 @@
     bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
                           DebugLoc dl);
   };
-                                                
+
   /// SimplifyDemandedBits - Look at Op.  At this point, we know that only the
   /// DemandedMask bits of the result of Op are ever used downstream.  If we can
   /// use this information to simplify Op, create a new simplified DAG node and
-  /// return true, returning the original and new nodes in Old and New. 
-  /// Otherwise, analyze the expression and return a mask of KnownOne and 
-  /// KnownZero bits for the expression (used to simplify the caller).  
-  /// The KnownZero/One bits may only be accurate for those bits in the 
+  /// return true, returning the original and new nodes in Old and New.
+  /// Otherwise, analyze the expression and return a mask of KnownOne and
+  /// KnownZero bits for the expression (used to simplify the caller).
+  /// The KnownZero/One bits may only be accurate for those bits in the
   /// DemandedMask.
-  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 
+  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
                             APInt &KnownZero, APInt &KnownOne,
                             TargetLoweringOpt &TLO, unsigned Depth = 0) const;
-  
+
   /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
-  /// Mask are known to be either zero or one and return them in the 
+  /// Mask are known to be either zero or one and return them in the
   /// KnownZero/KnownOne bitsets.
   virtual void computeMaskedBitsForTargetNode(const SDValue Op,
                                               const APInt &Mask,
-                                              APInt &KnownZero, 
+                                              APInt &KnownZero,
                                               APInt &KnownOne,
                                               const SelectionDAG &DAG,
                                               unsigned Depth = 0) const;
@@ -823,7 +823,7 @@
   /// DAG Combiner.
   virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
                                                    unsigned Depth = 0) const;
-  
+
   struct DAGCombinerInfo {
     void *DC;  // The DAG Combiner object.
     bool BeforeLegalize;
@@ -831,15 +831,15 @@
     bool CalledByLegalizer;
   public:
     SelectionDAG &DAG;
-    
+
     DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc)
       : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo),
         CalledByLegalizer(cl), DAG(dag) {}
-    
+
     bool isBeforeLegalize() const { return BeforeLegalize; }
     bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; }
     bool isCalledByLegalizer() const { return CalledByLegalizer; }
-    
+
     void AddToWorklist(SDNode *N);
     SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
                       bool AddTo = true);
@@ -849,7 +849,7 @@
     void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
   };
 
-  /// SimplifySetCC - Try to simplify a setcc built with the specified operands 
+  /// SimplifySetCC - Try to simplify a setcc built with the specified operands
   /// and cc. If it is unable to simplify it, return a null SDValue.
   SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
                           ISD::CondCode Cond, bool foldBooleans,
@@ -890,7 +890,7 @@
   virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
     return false;
   }
-  
+
   //===--------------------------------------------------------------------===//
   // TargetLowering Configuration Methods - These methods should be invoked by
   // the derived class constructor to configure this object for the target.
@@ -930,7 +930,7 @@
   void setStackPointerRegisterToSaveRestore(unsigned R) {
     StackPointerRegisterToSaveRestore = R;
   }
-  
+
   /// setExceptionPointerRegister - If set to a physical register, this sets
   /// the register that receives the exception address on entry to a landing
   /// pad.
@@ -953,12 +953,12 @@
   /// expensive, and if possible, should be replaced by an alternate sequence
   /// of instructions not containing an integer divide.
   void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
-  
+
   /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
   /// srl/add/sra for a signed divide by power of two, and let the target handle
   /// it.
   void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
-  
+
   /// addRegisterClass - Add the specified register class as an available
   /// regclass for the specified value type.  This indicates the selector can
   /// handle values of that class natively.
@@ -981,7 +981,7 @@
     assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
     OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
   }
-  
+
   /// setLoadExtAction - Indicate that the specified load with extension does
   /// not work with the specified type and indicate what to do about it.
   void setLoadExtAction(unsigned ExtType, MVT VT,
@@ -991,7 +991,7 @@
            "Table isn't big enough!");
     LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
   }
-  
+
   /// setTruncStoreAction - Indicate that the specified truncating store does
   /// not work with the specified type and indicate what to do about it.
   void setTruncStoreAction(MVT ValVT, MVT MemVT,
@@ -1016,7 +1016,7 @@
     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
   }
-  
+
   /// setIndexedStoreAction - Indicate that the specified indexed store does or
   /// does not work with the specified type and indicate what to do about
   /// it. NOTE: All indexed mode stores are initialized to Expand in
@@ -1031,7 +1031,7 @@
     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
   }
-  
+
   /// setCondCodeAction - Indicate that the specified condition code is or isn't
   /// supported on the target and indicate what to do about it.
   void setCondCodeAction(ISD::CondCode CC, MVT VT,
@@ -1058,7 +1058,7 @@
     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
     TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
   }
-  
+
   /// setJumpBufSize - Set the target's required jmp_buf buffer size (in
   /// bytes); default is 200
   void setJumpBufSize(unsigned Size) {
@@ -1076,7 +1076,7 @@
   void setIfCvtBlockSizeLimit(unsigned Limit) {
     IfCvtBlockSizeLimit = Limit;
   }
-  
+
   /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
   /// of instructions) to be considered for code duplication during
   /// if-conversion; default is 2.
@@ -1089,7 +1089,7 @@
   void setPrefLoopAlignment(unsigned Align) {
     PrefLoopAlignment = Align;
   }
-  
+
 public:
   //===--------------------------------------------------------------------===//
   // Lowering methods - These methods must be implemented by targets so that
@@ -1198,7 +1198,7 @@
                                      SmallVectorImpl<SDValue> &Results,
                                      SelectionDAG &DAG) const;
 
-  /// LowerOperation - This callback is invoked for operations that are 
+  /// LowerOperation - This callback is invoked for operations that are
   /// unsupported by the target, which are registered to use 'custom' lowering,
   /// and whose defined values are all legal.
   /// If the target has no operations that require custom lowering, it need not
@@ -1241,7 +1241,7 @@
   //===--------------------------------------------------------------------===//
   // Inline Asm Support hooks
   //
-  
+
   /// ExpandInlineAsm - This hook allows the target to expand an inline asm
   /// call to be explicit llvm code if it wants to.  This is useful for
   /// turning simple inline asms into LLVM intrinsics, which gives the
@@ -1249,7 +1249,7 @@
   virtual bool ExpandInlineAsm(CallInst *CI) const {
     return false;
   }
-  
+
   enum ConstraintType {
     C_Register,            // Constraint represents specific register(s).
     C_RegisterClass,       // Constraint represents any of register(s) in class.
@@ -1257,7 +1257,7 @@
     C_Other,               // Something else.
     C_Unknown              // Unsupported constraint.
   };
-  
+
   /// AsmOperandInfo - This contains information for each constraint that we are
   /// lowering.
   struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
@@ -1269,25 +1269,25 @@
     /// ConstraintType - Information about the constraint code, e.g. Register,
     /// RegisterClass, Memory, Other, Unknown.
     TargetLowering::ConstraintType ConstraintType;
-  
+
     /// CallOperandval - If this is the result output operand or a
     /// clobber, this is null, otherwise it is the incoming operand to the
     /// CallInst.  This gets modified as the asm is processed.
     Value *CallOperandVal;
-  
+
     /// ConstraintVT - The ValueType for the operand value.
     EVT ConstraintVT;
-    
+
     /// isMatchingInputConstraint - Return true of this is an input operand that
     /// is a matching constraint like "4".
     bool isMatchingInputConstraint() const;
-    
+
     /// getMatchedOperand - If this is an input matching constraint, this method
     /// returns the output operand it matches.
     unsigned getMatchedOperand() const;
-  
+
     AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
-      : InlineAsm::ConstraintInfo(info), 
+      : InlineAsm::ConstraintInfo(info),
         ConstraintType(TargetLowering::C_Unknown),
         CallOperandVal(0), ConstraintVT(MVT::Other) {
     }
@@ -1303,15 +1303,15 @@
                                       SDValue Op,
                                       bool hasMemory,
                                       SelectionDAG *DAG = 0) const;
-  
+
   /// getConstraintType - Given a constraint, return the type of constraint it
   /// is for this target.
   virtual ConstraintType getConstraintType(const std::string &Constraint) const;
-  
+
   /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
   /// return a list of registers that can be used to satisfy the constraint.
   /// This should only be used for C_RegisterClass constraints.
-  virtual std::vector<unsigned> 
+  virtual std::vector<unsigned>
   getRegClassForInlineAsmConstraint(const std::string &Constraint,
                                     EVT VT) const;
 
@@ -1325,16 +1325,16 @@
   ///
   /// This should only be used for C_Register constraints.  On error,
   /// this returns a register number of 0 and a null register class pointer..
-  virtual std::pair<unsigned, const TargetRegisterClass*> 
+  virtual std::pair<unsigned, const TargetRegisterClass*>
     getRegForInlineAsmConstraint(const std::string &Constraint,
                                  EVT VT) const;
-  
+
   /// LowerXConstraint - try to replace an X constraint, which matches anything,
   /// with another that has more specific requirements based on the type of the
   /// corresponding operand.  This returns null if there is no replacement to
   /// make.
   virtual const char *LowerXConstraint(EVT ConstraintVT) const;
-  
+
   /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
   /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is true
   /// it means one of the asm constraint of the inline asm instruction being
@@ -1343,11 +1343,11 @@
                                             bool hasMemory,
                                             std::vector<SDValue> &Ops,
                                             SelectionDAG &DAG) const;
-  
+
   //===--------------------------------------------------------------------===//
   // Instruction Emitting Hooks
   //
-  
+
   // EmitInstrWithCustomInserter - This method should be implemented by targets
   // that mark instructions with the 'usesCustomInserter' flag.  These
   // instructions are special in various ways, which require special support to
@@ -1376,7 +1376,7 @@
     int64_t      Scale;
     AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
   };
-  
+
   /// isLegalAddressingMode - Return true if the addressing mode represented by
   /// AM is legal for this target, for a load/store of the specified type.
   /// The type may be VoidTy, in which case only return true if the addressing
@@ -1429,9 +1429,9 @@
   //===--------------------------------------------------------------------===//
   // Div utility functions
   //
-  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, 
+  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
                       std::vector<SDNode*>* Created) const;
-  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, 
+  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
                       std::vector<SDNode*>* Created) const;
 
 
@@ -1468,7 +1468,7 @@
   void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
     LibcallCallingConvs[Call] = CC;
   }
-  
+
   /// getLibcallCallingConv - Get the CallingConv that should be used for the
   /// specified libcall.
   CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
@@ -1497,12 +1497,12 @@
   /// a real cost model is in place.  If we ever optimize for size, this will be
   /// set to true unconditionally.
   bool IntDivIsCheap;
-  
+
   /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
   /// srl/add/sra for a signed divide by power of two, and let the target handle
   /// it.
   bool Pow2DivIsCheap;
-  
+
   /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
   /// llvm.setjmp.  Defaults to false.
   bool UseUnderscoreSetJmp;
@@ -1522,10 +1522,10 @@
   /// SchedPreferenceInfo - The target scheduling preference: shortest possible
   /// total cycles or lowest register usage.
   Sched::Preference SchedPreferenceInfo;
-  
+
   /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
   unsigned JumpBufSize;
-  
+
   /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
   /// buffers
   unsigned JumpBufAlignment;
@@ -1533,7 +1533,7 @@
   /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
   /// if-converted.
   unsigned IfCvtBlockSizeLimit;
-  
+
   /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
   /// duplicated during if-conversion.
   unsigned IfCvtDupBlockSizeLimit;
@@ -1581,12 +1581,12 @@
   /// operations that are not should be described.  Note that operations on
   /// non-legal value types are not described here.
   uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
-  
+
   /// LoadExtActions - For each load extension type and each value type,
   /// keep a LegalizeAction that indicates how instruction selection should deal
   /// with a load of a specific value type and extension type.
   uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
-  
+
   /// TruncStoreActions - For each value type pair keep a LegalizeAction that
   /// indicates whether a truncating store of a specific value type and
   /// truncating type is legal.
@@ -1598,7 +1598,7 @@
   /// value_type for the reference. The second dimension represents the various
   /// modes for load store.
   uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
-  
+
   /// CondCodeActions - For each condition code (ISD::CondCode) keep a
   /// LegalizeAction that indicates how instruction selection should
   /// deal with the condition code.
@@ -1613,7 +1613,7 @@
   /// which sets a bit in this array.
   unsigned char
   TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
-  
+
   /// PromoteToType - For operations that must be promoted to a specific type,
   /// this holds the destination type.  This map should be sparse, so don't hold
   /// it as an array.





More information about the llvm-commits mailing list