[llvm] r201581 - Rename some member variables from TD to DL.

Rafael Espindola rafael.espindola at gmail.com
Tue Feb 18 07:33:13 PST 2014


Author: rafael
Date: Tue Feb 18 09:33:12 2014
New Revision: 201581

URL: http://llvm.org/viewvc/llvm-project?rev=201581&view=rev
Log:
Rename some member variables from TD to DL.

TargetData was renamed DataLayout back in r165242.

Modified:
    llvm/trunk/include/llvm/Analysis/AliasAnalysis.h
    llvm/trunk/include/llvm/Analysis/IVUsers.h
    llvm/trunk/include/llvm/Analysis/InlineCost.h
    llvm/trunk/include/llvm/Analysis/LazyValueInfo.h
    llvm/trunk/include/llvm/Analysis/MemoryDependenceAnalysis.h
    llvm/trunk/include/llvm/Analysis/PHITransAddr.h
    llvm/trunk/include/llvm/Analysis/ScalarEvolution.h
    llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h
    llvm/trunk/include/llvm/CodeGen/IntrinsicLowering.h
    llvm/trunk/include/llvm/ExecutionEngine/ExecutionEngine.h
    llvm/trunk/include/llvm/Support/TargetFolder.h
    llvm/trunk/include/llvm/Target/TargetLowering.h
    llvm/trunk/include/llvm/Target/TargetSelectionDAGInfo.h
    llvm/trunk/include/llvm/Transforms/Utils/Cloning.h
    llvm/trunk/lib/Analysis/AliasAnalysis.cpp
    llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp
    llvm/trunk/lib/Analysis/IPA/InlineCost.cpp
    llvm/trunk/lib/Analysis/IVUsers.cpp
    llvm/trunk/lib/Analysis/LazyValueInfo.cpp
    llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp
    llvm/trunk/lib/Analysis/NoAliasAnalysis.cpp
    llvm/trunk/lib/Analysis/PHITransAddr.cpp
    llvm/trunk/lib/Analysis/ScalarEvolution.cpp
    llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp
    llvm/trunk/lib/CodeGen/IntrinsicLowering.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
    llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
    llvm/trunk/lib/ExecutionEngine/ExecutionEngine.cpp
    llvm/trunk/lib/Transforms/Utils/InlineFunction.cpp

Modified: llvm/trunk/include/llvm/Analysis/AliasAnalysis.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/AliasAnalysis.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/AliasAnalysis.h (original)
+++ llvm/trunk/include/llvm/Analysis/AliasAnalysis.h Tue Feb 18 09:33:12 2014
@@ -55,7 +55,7 @@ class DominatorTree;
 
 class AliasAnalysis {
 protected:
-  const DataLayout *TD;
+  const DataLayout *DL;
   const TargetLibraryInfo *TLI;
 
 private:
@@ -75,7 +75,7 @@ protected:
 
 public:
   static char ID; // Class identification, replacement for typeinfo
-  AliasAnalysis() : TD(0), TLI(0), AA(0) {}
+  AliasAnalysis() : DL(0), TLI(0), AA(0) {}
   virtual ~AliasAnalysis();  // We want to be subclassed
 
   /// UnknownSize - This is a special value which can be used with the
@@ -86,7 +86,7 @@ public:
   /// getDataLayout - Return a pointer to the current DataLayout object, or
   /// null if no DataLayout object is available.
   ///
-  const DataLayout *getDataLayout() const { return TD; }
+  const DataLayout *getDataLayout() const { return DL; }
 
   /// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo
   /// object, or null if no TargetLibraryInfo object is available.

Modified: llvm/trunk/include/llvm/Analysis/IVUsers.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/IVUsers.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/IVUsers.h (original)
+++ llvm/trunk/include/llvm/Analysis/IVUsers.h Tue Feb 18 09:33:12 2014
@@ -122,7 +122,7 @@ class IVUsers : public LoopPass {
   LoopInfo *LI;
   DominatorTree *DT;
   ScalarEvolution *SE;
-  DataLayout *TD;
+  DataLayout *DL;
   SmallPtrSet<Instruction*,16> Processed;
 
   /// IVUses - A list of all tracked IV uses of induction variable expressions

Modified: llvm/trunk/include/llvm/Analysis/InlineCost.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/InlineCost.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/InlineCost.h (original)
+++ llvm/trunk/include/llvm/Analysis/InlineCost.h Tue Feb 18 09:33:12 2014
@@ -99,7 +99,7 @@ public:
 
 /// \brief Cost analyzer used by inliner.
 class InlineCostAnalysis : public CallGraphSCCPass {
-  const DataLayout *TD;
+  const DataLayout *DL;
   const TargetTransformInfo *TTI;
 
 public:

Modified: llvm/trunk/include/llvm/Analysis/LazyValueInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/LazyValueInfo.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/LazyValueInfo.h (original)
+++ llvm/trunk/include/llvm/Analysis/LazyValueInfo.h Tue Feb 18 09:33:12 2014
@@ -26,7 +26,7 @@ namespace llvm {
 /// LazyValueInfo - This pass computes, caches, and vends lazy value constraint
 /// information.
 class LazyValueInfo : public FunctionPass {
-  class DataLayout *TD;
+  class DataLayout *DL;
   class TargetLibraryInfo *TLI;
   void *PImpl;
   LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION;

Modified: llvm/trunk/include/llvm/Analysis/MemoryDependenceAnalysis.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/MemoryDependenceAnalysis.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/MemoryDependenceAnalysis.h (original)
+++ llvm/trunk/include/llvm/Analysis/MemoryDependenceAnalysis.h Tue Feb 18 09:33:12 2014
@@ -323,7 +323,7 @@ namespace llvm {
 
     /// Current AA implementation, just a cache.
     AliasAnalysis *AA;
-    DataLayout *TD;
+    DataLayout *DL;
     DominatorTree *DT;
     OwningPtr<PredIteratorCache> PredCache;
   public:
@@ -415,7 +415,7 @@ namespace llvm {
                                                     int64_t MemLocOffs,
                                                     unsigned MemLocSize,
                                                     const LoadInst *LI,
-                                                    const DataLayout &TD);
+                                                    const DataLayout &DL);
 
   private:
     MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,

Modified: llvm/trunk/include/llvm/Analysis/PHITransAddr.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/PHITransAddr.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/PHITransAddr.h (original)
+++ llvm/trunk/include/llvm/Analysis/PHITransAddr.h Tue Feb 18 09:33:12 2014
@@ -36,8 +36,8 @@ class PHITransAddr {
   /// Addr - The actual address we're analyzing.
   Value *Addr;
   
-  /// TD - The target data we are playing with if known, otherwise null.
-  const DataLayout *TD;
+  /// The DataLayout we are playing with if known, otherwise null.
+  const DataLayout *DL;
 
   /// TLI - The target library info if known, otherwise null.
   const TargetLibraryInfo *TLI;
@@ -45,7 +45,7 @@ class PHITransAddr {
   /// InstInputs - The inputs for our symbolic address.
   SmallVector<Instruction*, 4> InstInputs;
 public:
-  PHITransAddr(Value *addr, const DataLayout *td) : Addr(addr), TD(td), TLI(0) {
+  PHITransAddr(Value *addr, const DataLayout *DL) : Addr(addr), DL(DL), TLI(0) {
     // If the address is an instruction, the whole thing is considered an input.
     if (Instruction *I = dyn_cast<Instruction>(Addr))
       InstInputs.push_back(I);

Modified: llvm/trunk/include/llvm/Analysis/ScalarEvolution.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/ScalarEvolution.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/ScalarEvolution.h (original)
+++ llvm/trunk/include/llvm/Analysis/ScalarEvolution.h Tue Feb 18 09:33:12 2014
@@ -225,9 +225,9 @@ namespace llvm {
     ///
     LoopInfo *LI;
 
-    /// TD - The target data information for the target we are targeting.
+    /// The DataLayout information for the target we are targeting.
     ///
-    DataLayout *TD;
+    DataLayout *DL;
 
     /// TLI - The target library information for the target we are targeting.
     ///

Modified: llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h (original)
+++ llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h Tue Feb 18 09:33:12 2014
@@ -94,7 +94,7 @@ namespace llvm {
     explicit SCEVExpander(ScalarEvolution &se, const char *name)
       : SE(se), IVName(name), IVIncInsertLoop(0), IVIncInsertPos(0),
         CanonicalMode(true), LSRMode(false),
-        Builder(se.getContext(), TargetFolder(se.TD)) {
+        Builder(se.getContext(), TargetFolder(se.DL)) {
 #ifndef NDEBUG
       DebugType = "";
 #endif

Modified: llvm/trunk/include/llvm/CodeGen/IntrinsicLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/IntrinsicLowering.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/IntrinsicLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/IntrinsicLowering.h Tue Feb 18 09:33:12 2014
@@ -24,13 +24,13 @@ namespace llvm {
   class DataLayout;
 
   class IntrinsicLowering {
-    const DataLayout& TD;
+    const DataLayout& DL;
 
     
     bool Warned;
   public:
-    explicit IntrinsicLowering(const DataLayout &td) :
-      TD(td), Warned(false) {}
+    explicit IntrinsicLowering(const DataLayout &DL) :
+      DL(DL), Warned(false) {}
 
     /// AddPrototypes - This method, if called, causes all of the prototypes
     /// that might be needed by an intrinsic lowering implementation to be

Modified: llvm/trunk/include/llvm/ExecutionEngine/ExecutionEngine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/ExecutionEngine/ExecutionEngine.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/ExecutionEngine/ExecutionEngine.h (original)
+++ llvm/trunk/include/llvm/ExecutionEngine/ExecutionEngine.h Tue Feb 18 09:33:12 2014
@@ -111,7 +111,7 @@ class ExecutionEngine {
   ExecutionEngineState EEState;
 
   /// The target data for the platform for which execution is being performed.
-  const DataLayout *TD;
+  const DataLayout *DL;
 
   /// Whether lazy JIT compilation is enabled.
   bool CompilingLazily;
@@ -130,7 +130,7 @@ protected:
   /// optimize for the case where there is only one module.
   SmallVector<Module*, 1> Modules;
 
-  void setDataLayout(const DataLayout *td) { TD = td; }
+  void setDataLayout(const DataLayout *Val) { DL = Val; }
 
   /// getMemoryforGV - Allocate memory for a global variable.
   virtual char *getMemoryForGV(const GlobalVariable *GV);
@@ -238,7 +238,7 @@ public:
 
   //===--------------------------------------------------------------------===//
 
-  const DataLayout *getDataLayout() const { return TD; }
+  const DataLayout *getDataLayout() const { return DL; }
 
   /// removeModule - Remove a Module from the list of modules.  Returns true if
   /// M is found.

Modified: llvm/trunk/include/llvm/Support/TargetFolder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Support/TargetFolder.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Support/TargetFolder.h (original)
+++ llvm/trunk/include/llvm/Support/TargetFolder.h Tue Feb 18 09:33:12 2014
@@ -30,18 +30,18 @@ class DataLayout;
 
 /// TargetFolder - Create constants with target dependent folding.
 class TargetFolder {
-  const DataLayout *TD;
+  const DataLayout *DL;
 
   /// Fold - Fold the constant using target specific information.
   Constant *Fold(Constant *C) const {
     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
-      if (Constant *CF = ConstantFoldConstantExpression(CE, TD))
+      if (Constant *CF = ConstantFoldConstantExpression(CE, DL))
         return CF;
     return C;
   }
 
 public:
-  explicit TargetFolder(const DataLayout *TheTD) : TD(TheTD) {}
+  explicit TargetFolder(const DataLayout *DL) : DL(DL) {}
 
   //===--------------------------------------------------------------------===//
   // Binary Operators

Modified: llvm/trunk/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetLowering.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/Target/TargetLowering.h Tue Feb 18 09:33:12 2014
@@ -143,7 +143,7 @@ protected:
 
 public:
   const TargetMachine &getTargetMachine() const { return TM; }
-  const DataLayout *getDataLayout() const { return TD; }
+  const DataLayout *getDataLayout() const { return DL; }
   const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
 
   bool isBigEndian() const { return !IsLittleEndian; }
@@ -1337,7 +1337,7 @@ public:
 
 private:
   const TargetMachine &TM;
-  const DataLayout *TD;
+  const DataLayout *DL;
   const TargetLoweringObjectFile &TLOF;
 
   /// True if this is a little endian target.

Modified: llvm/trunk/include/llvm/Target/TargetSelectionDAGInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetSelectionDAGInfo.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/TargetSelectionDAGInfo.h (original)
+++ llvm/trunk/include/llvm/Target/TargetSelectionDAGInfo.h Tue Feb 18 09:33:12 2014
@@ -31,10 +31,10 @@ class TargetSelectionDAGInfo {
   TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
   void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
 
-  const DataLayout *TD;
+  const DataLayout *DL;
 
 protected:
-  const DataLayout *getDataLayout() const { return TD; }
+  const DataLayout *getDataLayout() const { return DL; }
 
 public:
   explicit TargetSelectionDAGInfo(const TargetMachine &TM);

Modified: llvm/trunk/include/llvm/Transforms/Utils/Cloning.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Utils/Cloning.h?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Utils/Cloning.h (original)
+++ llvm/trunk/include/llvm/Transforms/Utils/Cloning.h Tue Feb 18 09:33:12 2014
@@ -151,7 +151,7 @@ void CloneAndPruneFunctionInto(Function
                                SmallVectorImpl<ReturnInst*> &Returns,
                                const char *NameSuffix = "", 
                                ClonedCodeInfo *CodeInfo = 0,
-                               const DataLayout *TD = 0,
+                               const DataLayout *DL = 0,
                                Instruction *TheCall = 0);
 
   
@@ -159,13 +159,13 @@ void CloneAndPruneFunctionInto(Function
 /// InlineFunction call, and records the auxiliary results produced by it. 
 class InlineFunctionInfo {
 public:
-  explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *td = 0)
-    : CG(cg), TD(td) {}
+  explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *DL = 0)
+    : CG(cg), DL(DL) {}
   
   /// CG - If non-null, InlineFunction will update the callgraph to reflect the
   /// changes it makes.
   CallGraph *CG;
-  const DataLayout *TD;
+  const DataLayout *DL;
 
   /// StaticAllocas - InlineFunction fills this in with all static allocas that
   /// get copied into the caller.

Modified: llvm/trunk/lib/Analysis/AliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/AliasAnalysis.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/AliasAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/AliasAnalysis.cpp Tue Feb 18 09:33:12 2014
@@ -416,9 +416,9 @@ AliasAnalysis::ModRefResult
 AliasAnalysis::callCapturesBefore(const Instruction *I,
                                   const AliasAnalysis::Location &MemLoc,
                                   DominatorTree *DT) {
-  if (!DT || !TD) return AliasAnalysis::ModRef;
+  if (!DT || !DL) return AliasAnalysis::ModRef;
 
-  const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
+  const Value *Object = GetUnderlyingObject(MemLoc.Ptr, DL);
   if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
       isa<Constant>(Object))
     return AliasAnalysis::ModRef;
@@ -472,7 +472,7 @@ AliasAnalysis::~AliasAnalysis() {}
 /// AliasAnalysis interface before any other methods are called.
 ///
 void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
-  TD = P->getAnalysisIfAvailable<DataLayout>();
+  DL = P->getAnalysisIfAvailable<DataLayout>();
   TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
   AA = &P->getAnalysis<AliasAnalysis>();
 }
@@ -487,7 +487,7 @@ void AliasAnalysis::getAnalysisUsage(Ana
 /// if known, or a conservative value otherwise.
 ///
 uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
-  return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
+  return DL ? DL->getTypeStoreSize(Ty) : UnknownSize;
 }
 
 /// canBasicBlockModify - Return true if it is possible for execution of the

Modified: llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp Tue Feb 18 09:33:12 2014
@@ -593,7 +593,7 @@ BasicAliasAnalysis::pointsToConstantMemo
   SmallVector<const Value *, 16> Worklist;
   Worklist.push_back(Loc.Ptr);
   do {
-    const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD);
+    const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
     if (!Visited.insert(V)) {
       Visited.clear();
       return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
@@ -698,7 +698,7 @@ BasicAliasAnalysis::getModRefInfo(Immuta
   assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
          "AliasAnalysis query involving multiple functions!");
 
-  const Value *Object = GetUnderlyingObject(Loc.Ptr, TD);
+  const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
 
   // If this is a tail call and Loc.Ptr points to a stack location, we know that
   // the tail call cannot access or modify the local stack.
@@ -805,7 +805,7 @@ BasicAliasAnalysis::getModRefInfo(Immuta
       // LLVM's vld1 and vst1 intrinsics currently only support a single
       // vector register.
       uint64_t Size =
-        TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize;
+        DL ? DL->getTypeStoreSize(II->getType()) : UnknownSize;
       if (isNoAlias(Location(II->getArgOperand(0), Size,
                              II->getMetadata(LLVMContext::MD_tbaa)),
                     Loc))
@@ -814,7 +814,7 @@ BasicAliasAnalysis::getModRefInfo(Immuta
     }
     case Intrinsic::arm_neon_vst1: {
       uint64_t Size =
-        TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize;
+        DL ? DL->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize;
       if (isNoAlias(Location(II->getArgOperand(0), Size,
                              II->getMetadata(LLVMContext::MD_tbaa)),
                     Loc))
@@ -877,7 +877,7 @@ static bool areVarIndicesEqual(SmallVect
 
 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
 /// against another pointer.  We know that V1 is a GEP, but we don't know
-/// anything about V2.  UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
+/// anything about V2.  UnderlyingV1 is GetUnderlyingObject(GEP1, DL),
 /// UnderlyingV2 is the same for V2.
 ///
 AliasAnalysis::AliasResult
@@ -911,13 +911,13 @@ BasicAliasAnalysis::aliasGEP(const GEPOp
         int64_t GEP2BaseOffset;
         SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
         const Value *GEP2BasePtr =
-          DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+          DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, DL);
         const Value *GEP1BasePtr =
-          DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+          DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
         // DecomposeGEPExpression and GetUnderlyingObject should return the
         // same result except when DecomposeGEPExpression has no DataLayout.
         if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
-          assert(TD == 0 &&
+          assert(DL == 0 &&
              "DecomposeGEPExpression and GetUnderlyingObject disagree!");
           return MayAlias;
         }
@@ -937,17 +937,17 @@ BasicAliasAnalysis::aliasGEP(const GEPOp
     // exactly, see if the computed offset from the common pointer tells us
     // about the relation of the resulting pointer.
     const Value *GEP1BasePtr =
-      DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+      DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
 
     int64_t GEP2BaseOffset;
     SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
     const Value *GEP2BasePtr =
-      DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+      DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, DL);
 
     // DecomposeGEPExpression and GetUnderlyingObject should return the
     // same result except when DecomposeGEPExpression has no DataLayout.
     if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
-      assert(TD == 0 &&
+      assert(DL == 0 &&
              "DecomposeGEPExpression and GetUnderlyingObject disagree!");
       return MayAlias;
     }
@@ -977,12 +977,12 @@ BasicAliasAnalysis::aliasGEP(const GEPOp
       return R;
 
     const Value *GEP1BasePtr =
-      DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+      DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
 
     // DecomposeGEPExpression and GetUnderlyingObject should return the
     // same result except when DecomposeGEPExpression has no DataLayout.
     if (GEP1BasePtr != UnderlyingV1) {
-      assert(TD == 0 &&
+      assert(DL == 0 &&
              "DecomposeGEPExpression and GetUnderlyingObject disagree!");
       return MayAlias;
     }
@@ -1215,8 +1215,8 @@ BasicAliasAnalysis::aliasCheck(const Val
     return NoAlias;  // Scalars cannot alias each other
 
   // Figure out what objects these things are pointing to if we can.
-  const Value *O1 = GetUnderlyingObject(V1, TD);
-  const Value *O2 = GetUnderlyingObject(V2, TD);
+  const Value *O1 = GetUnderlyingObject(V1, DL);
+  const Value *O2 = GetUnderlyingObject(V2, DL);
 
   // Null values in the default address space don't point to any object, so they
   // don't alias any other pointer.
@@ -1265,9 +1265,9 @@ BasicAliasAnalysis::aliasCheck(const Val
 
   // If the size of one access is larger than the entire object on the other
   // side, then we know such behavior is undefined and can assume no alias.
-  if (TD)
-    if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
-        (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
+  if (DL)
+    if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
+        (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
       return NoAlias;
 
   // Check the cache before climbing up use-def chains. This also terminates
@@ -1319,9 +1319,9 @@ BasicAliasAnalysis::aliasCheck(const Val
   // If both pointers are pointing into the same object and one of them
   // accesses is accessing the entire object, then the accesses must
   // overlap in some way.
-  if (TD && O1 == O2)
-    if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) ||
-        (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI)))
+  if (DL && O1 == O2)
+    if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) ||
+        (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI)))
       return AliasCache[Locs] = PartialAlias;
 
   AliasResult Result =

Modified: llvm/trunk/lib/Analysis/IPA/InlineCost.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/IPA/InlineCost.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/IPA/InlineCost.cpp (original)
+++ llvm/trunk/lib/Analysis/IPA/InlineCost.cpp Tue Feb 18 09:33:12 2014
@@ -1203,7 +1203,7 @@ INITIALIZE_PASS_END(InlineCostAnalysis,
 
 char InlineCostAnalysis::ID = 0;
 
-InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {}
+InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), DL(0) {}
 
 InlineCostAnalysis::~InlineCostAnalysis() {}
 
@@ -1214,7 +1214,7 @@ void InlineCostAnalysis::getAnalysisUsag
 }
 
 bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
-  TD = getAnalysisIfAvailable<DataLayout>();
+  DL = getAnalysisIfAvailable<DataLayout>();
   TTI = &getAnalysis<TargetTransformInfo>();
   return false;
 }
@@ -1272,7 +1272,7 @@ InlineCost InlineCostAnalysis::getInline
   DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
         << "...\n");
 
-  CallAnalyzer CA(TD, *TTI, *Callee, Threshold);
+  CallAnalyzer CA(DL, *TTI, *Callee, Threshold);
   bool ShouldInline = CA.analyzeCall(CS);
 
   DEBUG(CA.dump());

Modified: llvm/trunk/lib/Analysis/IVUsers.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/IVUsers.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/IVUsers.cpp (original)
+++ llvm/trunk/lib/Analysis/IVUsers.cpp Tue Feb 18 09:33:12 2014
@@ -123,14 +123,14 @@ bool IVUsers::AddUsersImpl(Instruction *
   // IVUsers is used by LSR which assumes that all SCEV expressions are safe to
   // pass to SCEVExpander. Expressions are not safe to expand if they represent
   // operations that are not safe to speculate, namely integer division.
-  if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, TD))
+  if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, DL))
     return false;
 
   // LSR is not APInt clean, do not touch integers bigger than 64-bits.
   // Also avoid creating IVs of non-native types. For example, we don't want a
   // 64-bit IV in 32-bit code just because the loop has one 64-bit cast.
   uint64_t Width = SE->getTypeSizeInBits(I->getType());
-  if (Width > 64 || (TD && !TD->isLegalInteger(Width)))
+  if (Width > 64 || (DL && !DL->isLegalInteger(Width)))
     return false;
 
   // Get the symbolic expression for this instruction.
@@ -234,7 +234,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassM
   LI = &getAnalysis<LoopInfo>();
   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
   SE = &getAnalysis<ScalarEvolution>();
-  TD = getAnalysisIfAvailable<DataLayout>();
+  DL = getAnalysisIfAvailable<DataLayout>();
 
   // Find all uses of induction variables in this loop, and categorize
   // them by stride.  Start by finding all of the PHI nodes in the header for

Modified: llvm/trunk/lib/Analysis/LazyValueInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/LazyValueInfo.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/LazyValueInfo.cpp (original)
+++ llvm/trunk/lib/Analysis/LazyValueInfo.cpp Tue Feb 18 09:33:12 2014
@@ -1013,7 +1013,7 @@ bool LazyValueInfo::runOnFunction(Functi
   if (PImpl)
     getCache(PImpl).clear();
 
-  TD = getAnalysisIfAvailable<DataLayout>();
+  DL = getAnalysisIfAvailable<DataLayout>();
   TLI = &getAnalysis<TargetLibraryInfo>();
 
   // Fully lazy.
@@ -1073,7 +1073,7 @@ LazyValueInfo::getPredicateOnEdge(unsign
   // If we know the value is a constant, evaluate the conditional.
   Constant *Res = 0;
   if (Result.isConstant()) {
-    Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD,
+    Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
                                           TLI);
     if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
       return ResCI->isZero() ? False : True;
@@ -1115,14 +1115,14 @@ LazyValueInfo::getPredicateOnEdge(unsign
     if (Pred == ICmpInst::ICMP_EQ) {
       // !C1 == C -> false iff C1 == C.
       Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
-                                            Result.getNotConstant(), C, TD,
+                                            Result.getNotConstant(), C, DL,
                                             TLI);
       if (Res->isNullValue())
         return False;
     } else if (Pred == ICmpInst::ICMP_NE) {
       // !C1 != C -> true iff C1 == C.
       Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
-                                            Result.getNotConstant(), C, TD,
+                                            Result.getNotConstant(), C, DL,
                                             TLI);
       if (Res->isNullValue())
         return True;

Modified: llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp Tue Feb 18 09:33:12 2014
@@ -87,7 +87,7 @@ void MemoryDependenceAnalysis::getAnalys
 
 bool MemoryDependenceAnalysis::runOnFunction(Function &) {
   AA = &getAnalysis<AliasAnalysis>();
-  TD = getAnalysisIfAvailable<DataLayout>();
+  DL = getAnalysisIfAvailable<DataLayout>();
   DominatorTreeWrapperPass *DTWP =
       getAnalysisIfAvailable<DominatorTreeWrapperPass>();
   DT = DTWP ? &DTWP->getDomTree() : 0;
@@ -258,17 +258,17 @@ isLoadLoadClobberIfExtendedToFullWidth(c
                                        const Value *&MemLocBase,
                                        int64_t &MemLocOffs,
                                        const LoadInst *LI,
-                                       const DataLayout *TD) {
+                                       const DataLayout *DL) {
   // If we have no target data, we can't do this.
-  if (TD == 0) return false;
+  if (DL == 0) return false;
 
   // If we haven't already computed the base/offset of MemLoc, do so now.
   if (MemLocBase == 0)
-    MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD);
+    MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
 
   unsigned Size = MemoryDependenceAnalysis::
     getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
-                                    LI, *TD);
+                                    LI, *DL);
   return Size != 0;
 }
 
@@ -282,7 +282,7 @@ isLoadLoadClobberIfExtendedToFullWidth(c
 unsigned MemoryDependenceAnalysis::
 getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
                                 unsigned MemLocSize, const LoadInst *LI,
-                                const DataLayout &TD) {
+                                const DataLayout &DL) {
   // We can only extend simple integer loads.
   if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
 
@@ -295,7 +295,7 @@ getLoadLoadClobberFullWidthSize(const Va
   // Get the base of this load.
   int64_t LIOffs = 0;
   const Value *LIBase =
-    GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD);
+    GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
 
   // If the two pointers are not based on the same pointer, we can't tell that
   // they are related.
@@ -331,7 +331,7 @@ getLoadLoadClobberFullWidthSize(const Va
     // If this load size is bigger than our known alignment or would not fit
     // into a native integer register, then we fail.
     if (NewLoadByteSize > LoadAlign ||
-        !TD.fitsInLegalInteger(NewLoadByteSize*8))
+        !DL.fitsInLegalInteger(NewLoadByteSize*8))
       return 0;
 
     if (LIOffs+NewLoadByteSize > MemLocEnd &&
@@ -424,7 +424,7 @@ getPointerDependencyFrom(const AliasAnal
           if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
             if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
                 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
-                                                       MemLocOffset, LI, TD))
+                                                       MemLocOffset, LI, DL))
               return MemDepResult::getClobber(Inst);
 
           continue;
@@ -500,7 +500,7 @@ getPointerDependencyFrom(const AliasAnal
     // need to continue scanning until the malloc call.
     const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
     if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
-      const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
+      const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
 
       if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
         return MemDepResult::getDef(Inst);
@@ -773,7 +773,7 @@ getNonLocalPointerDependency(const Alias
          "Can't get pointer deps of a non-pointer!");
   Result.clear();
 
-  PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
+  PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL);
 
   // This is the set of blocks we've inspected, and the pointer we consider in
   // each block.  Because of critical edges, we currently bail out if querying

Modified: llvm/trunk/lib/Analysis/NoAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/NoAliasAnalysis.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/NoAliasAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/NoAliasAnalysis.cpp Tue Feb 18 09:33:12 2014
@@ -36,7 +36,7 @@ namespace {
     virtual void initializePass() {
       // Note: NoAA does not call InitializeAliasAnalysis because it's
       // special and does not support chaining.
-      TD = getAnalysisIfAvailable<DataLayout>();
+      DL = getAnalysisIfAvailable<DataLayout>();
     }
 
     virtual AliasResult alias(const Location &LocA, const Location &LocB) {

Modified: llvm/trunk/lib/Analysis/PHITransAddr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/PHITransAddr.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/PHITransAddr.cpp (original)
+++ llvm/trunk/lib/Analysis/PHITransAddr.cpp Tue Feb 18 09:33:12 2014
@@ -229,7 +229,7 @@ Value *PHITransAddr::PHITranslateSubExpr
       return GEP;
 
     // Simplify the GEP to handle 'gep x, 0' -> x etc.
-    if (Value *V = SimplifyGEPInst(GEPOps, TD, TLI, DT)) {
+    if (Value *V = SimplifyGEPInst(GEPOps, DL, TLI, DT)) {
       for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
         RemoveInstInputs(GEPOps[i], InstInputs);
 
@@ -285,7 +285,7 @@ Value *PHITransAddr::PHITranslateSubExpr
         }
 
     // See if the add simplifies away.
-    if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, TLI, DT)) {
+    if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT)) {
       // If we simplified the operands, the LHS is no longer an input, but Res
       // is.
       RemoveInstInputs(LHS, InstInputs);
@@ -372,7 +372,7 @@ InsertPHITranslatedSubExpr(Value *InVal,
                            SmallVectorImpl<Instruction*> &NewInsts) {
   // See if we have a version of this value already available and dominating
   // PredBB.  If so, there is no need to insert a new instance of it.
-  PHITransAddr Tmp(InVal, TD);
+  PHITransAddr Tmp(InVal, DL);
   if (!Tmp.PHITranslateValue(CurBB, PredBB, &DT))
     return Tmp.getAddr();
 

Modified: llvm/trunk/lib/Analysis/ScalarEvolution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ScalarEvolution.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ScalarEvolution.cpp (original)
+++ llvm/trunk/lib/Analysis/ScalarEvolution.cpp Tue Feb 18 09:33:12 2014
@@ -2663,12 +2663,12 @@ const SCEV *ScalarEvolution::getSizeOfEx
   // If we have DataLayout, we can bypass creating a target-independent
   // constant expression and then folding it back into a ConstantInt.
   // This is just a compile-time optimization.
-  if (TD)
-    return getConstant(IntTy, TD->getTypeAllocSize(AllocTy));
+  if (DL)
+    return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
 
   Constant *C = ConstantExpr::getSizeOf(AllocTy);
   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
-    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
+    if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
       C = Folded;
   Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
   assert(Ty == IntTy && "Effective SCEV type doesn't match");
@@ -2681,14 +2681,14 @@ const SCEV *ScalarEvolution::getOffsetOf
   // If we have DataLayout, we can bypass creating a target-independent
   // constant expression and then folding it back into a ConstantInt.
   // This is just a compile-time optimization.
-  if (TD) {
+  if (DL) {
     return getConstant(IntTy,
-                       TD->getStructLayout(STy)->getElementOffset(FieldNo));
+                       DL->getStructLayout(STy)->getElementOffset(FieldNo));
   }
 
   Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
-    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
+    if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
       C = Folded;
 
   Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
@@ -2736,8 +2736,8 @@ uint64_t ScalarEvolution::getTypeSizeInB
   assert(isSCEVable(Ty) && "Type is not SCEVable!");
 
   // If we have a DataLayout, use it!
-  if (TD)
-    return TD->getTypeSizeInBits(Ty);
+  if (DL)
+    return DL->getTypeSizeInBits(Ty);
 
   // Integer types have fixed sizes.
   if (Ty->isIntegerTy())
@@ -2763,8 +2763,8 @@ Type *ScalarEvolution::getEffectiveSCEVT
   // The only other support type is pointer.
   assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
 
-  if (TD)
-    return TD->getIntPtrType(Ty);
+  if (DL)
+    return DL->getIntPtrType(Ty);
 
   // Without DataLayout, conservatively assume pointers are 64-bit.
   return Type::getInt64Ty(getContext());
@@ -3232,7 +3232,7 @@ const SCEV *ScalarEvolution::createNodeF
   // PHI's incoming blocks are in a different loop, in which case doing so
   // risks breaking LCSSA form. Instcombine would normally zap these, but
   // it doesn't have DominatorTree information, so it may miss cases.
-  if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
+  if (Value *V = SimplifyInstruction(PN, DL, TLI, DT))
     if (LI->replacementPreservesLCSSAForm(PN, V))
       return getSCEV(V);
 
@@ -3503,7 +3503,7 @@ ScalarEvolution::getUnsignedRange(const
   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
     // For a SCEVUnknown, ask ValueTracking.
     APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
-    ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
+    ComputeMaskedBits(U->getValue(), Zeros, Ones, DL);
     if (Ones == ~Zeros + 1)
       return setUnsignedRange(U, ConservativeResult);
     return setUnsignedRange(U,
@@ -3653,9 +3653,9 @@ ScalarEvolution::getSignedRange(const SC
 
   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
     // For a SCEVUnknown, ask ValueTracking.
-    if (!U->getValue()->getType()->isIntegerTy() && !TD)
+    if (!U->getValue()->getType()->isIntegerTy() && !DL)
       return setSignedRange(U, ConservativeResult);
-    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
+    unsigned NS = ComputeNumSignBits(U->getValue(), DL);
     if (NS <= 1)
       return setSignedRange(U, ConservativeResult);
     return setSignedRange(U, ConservativeResult.intersectWith(
@@ -3762,7 +3762,7 @@ const SCEV *ScalarEvolution::createSCEV(
       unsigned TZ = A.countTrailingZeros();
       unsigned BitWidth = A.getBitWidth();
       APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
-      ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
+      ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, DL);
 
       APInt EffectiveMask =
           APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
@@ -4956,7 +4956,7 @@ static PHINode *getConstantEvolvingPHI(V
 /// reason, return null.
 static Constant *EvaluateExpression(Value *V, const Loop *L,
                                     DenseMap<Instruction *, Constant *> &Vals,
-                                    const DataLayout *TD,
+                                    const DataLayout *DL,
                                     const TargetLibraryInfo *TLI) {
   // Convenient constant check, but redundant for recursive calls.
   if (Constant *C = dyn_cast<Constant>(V)) return C;
@@ -4983,7 +4983,7 @@ static Constant *EvaluateExpression(Valu
       if (!Operands[i]) return 0;
       continue;
     }
-    Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
+    Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
     Vals[Operand] = C;
     if (!C) return 0;
     Operands[i] = C;
@@ -4991,12 +4991,12 @@ static Constant *EvaluateExpression(Valu
 
   if (CmpInst *CI = dyn_cast<CmpInst>(I))
     return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
-                                           Operands[1], TD, TLI);
+                                           Operands[1], DL, TLI);
   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
     if (!LI->isVolatile())
-      return ConstantFoldLoadFromConstPtr(Operands[0], TD);
+      return ConstantFoldLoadFromConstPtr(Operands[0], DL);
   }
-  return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
+  return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
                                   TLI);
 }
 
@@ -5052,7 +5052,7 @@ ScalarEvolution::getConstantEvolutionLoo
     // Compute the value of the PHIs for the next iteration.
     // EvaluateExpression adds non-phi values to the CurrentIterVals map.
     DenseMap<Instruction *, Constant *> NextIterVals;
-    Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
+    Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
                                            TLI);
     if (NextPHI == 0)
       return 0;        // Couldn't evaluate!
@@ -5078,7 +5078,7 @@ ScalarEvolution::getConstantEvolutionLoo
       Constant *&NextPHI = NextIterVals[PHI];
       if (!NextPHI) {   // Not already computed.
         Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
-        NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+        NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
       }
       if (NextPHI != I->second)
         StoppedEvolving = false;
@@ -5134,7 +5134,7 @@ const SCEV *ScalarEvolution::ComputeExit
   for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
     ConstantInt *CondVal =
       dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
-                                                       TD, TLI));
+                                                       DL, TLI));
 
     // Couldn't symbolically evaluate.
     if (!CondVal) return getCouldNotCompute();
@@ -5164,7 +5164,7 @@ const SCEV *ScalarEvolution::ComputeExit
       if (NextPHI) continue;    // Already computed!
 
       Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
-      NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
+      NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
     }
     CurrentIterVals.swap(NextIterVals);
   }
@@ -5369,14 +5369,14 @@ const SCEV *ScalarEvolution::computeSCEV
           Constant *C = 0;
           if (const CmpInst *CI = dyn_cast<CmpInst>(I))
             C = ConstantFoldCompareInstOperands(CI->getPredicate(),
-                                                Operands[0], Operands[1], TD,
+                                                Operands[0], Operands[1], DL,
                                                 TLI);
           else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
             if (!LI->isVolatile())
-              C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
+              C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
           } else
             C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
-                                         Operands, TD, TLI);
+                                         Operands, DL, TLI);
           if (!C) return V;
           return getSCEV(C);
         }
@@ -7385,7 +7385,7 @@ ScalarEvolution::ScalarEvolution()
 bool ScalarEvolution::runOnFunction(Function &F) {
   this->F = &F;
   LI = &getAnalysis<LoopInfo>();
-  TD = getAnalysisIfAvailable<DataLayout>();
+  DL = getAnalysisIfAvailable<DataLayout>();
   TLI = &getAnalysis<TargetLibraryInfo>();
   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
   return false;

Modified: llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp (original)
+++ llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp Tue Feb 18 09:33:12 2014
@@ -210,7 +210,7 @@ static bool FactorOutConstant(const SCEV
                               const SCEV *&Remainder,
                               const SCEV *Factor,
                               ScalarEvolution &SE,
-                              const DataLayout *TD) {
+                              const DataLayout *DL) {
   // Everything is divisible by one.
   if (Factor->isOne())
     return true;
@@ -250,7 +250,7 @@ static bool FactorOutConstant(const SCEV
   // In a Mul, check if there is a constant operand which is a multiple
   // of the given factor.
   if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
-    if (TD) {
+    if (DL) {
       // With DataLayout, the size is known. Check if there is a constant
       // operand which is a multiple of the given factor. If so, we can
       // factor it.
@@ -270,7 +270,7 @@ static bool FactorOutConstant(const SCEV
       for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
         const SCEV *SOp = M->getOperand(i);
         const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
-        if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
+        if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) &&
             Remainder->isZero()) {
           SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
           NewMulOps[i] = SOp;
@@ -285,12 +285,12 @@ static bool FactorOutConstant(const SCEV
   if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
     const SCEV *Step = A->getStepRecurrence(SE);
     const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
-    if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
+    if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
       return false;
     if (!StepRem->isZero())
       return false;
     const SCEV *Start = A->getStart();
-    if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
+    if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
       return false;
     S = SE.getAddRecExpr(Start, Step, A->getLoop(),
                          A->getNoWrapFlags(SCEV::FlagNW));
@@ -404,8 +404,8 @@ Value *SCEVExpander::expandAddToGEP(cons
   // without the other.
   SplitAddRecs(Ops, Ty, SE);
 
-  Type *IntPtrTy = SE.TD
-                 ? SE.TD->getIntPtrType(PTy)
+  Type *IntPtrTy = SE.DL
+                 ? SE.DL->getIntPtrType(PTy)
                  : Type::getInt64Ty(PTy->getContext());
 
   // Descend down the pointer's type and attempt to convert the other
@@ -424,7 +424,7 @@ Value *SCEVExpander::expandAddToGEP(cons
         for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
           const SCEV *Op = Ops[i];
           const SCEV *Remainder = SE.getConstant(Ty, 0);
-          if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
+          if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) {
             // Op now has ElSize factored out.
             ScaledOps.push_back(Op);
             if (!Remainder->isZero())
@@ -458,13 +458,13 @@ Value *SCEVExpander::expandAddToGEP(cons
       bool FoundFieldNo = false;
       // An empty struct has no fields.
       if (STy->getNumElements() == 0) break;
-      if (SE.TD) {
+      if (SE.DL) {
         // With DataLayout, field offsets are known. See if a constant offset
         // falls within any of the struct fields.
         if (Ops.empty()) break;
         if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
           if (SE.getTypeSizeInBits(C->getType()) <= 64) {
-            const StructLayout &SL = *SE.TD->getStructLayout(STy);
+            const StructLayout &SL = *SE.DL->getStructLayout(STy);
             uint64_t FullOffset = C->getValue()->getZExtValue();
             if (FullOffset < SL.getSizeInBytes()) {
               unsigned ElIdx = SL.getElementContainingOffset(FullOffset);

Modified: llvm/trunk/lib/CodeGen/IntrinsicLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/IntrinsicLowering.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/IntrinsicLowering.cpp (original)
+++ llvm/trunk/lib/CodeGen/IntrinsicLowering.cpp Tue Feb 18 09:33:12 2014
@@ -115,21 +115,21 @@ void IntrinsicLowering::AddPrototypes(Mo
           Type::getInt8PtrTy(Context),
                               Type::getInt8PtrTy(Context), 
                               Type::getInt8PtrTy(Context), 
-                              TD.getIntPtrType(Context), (Type *)0);
+                              DL.getIntPtrType(Context), (Type *)0);
         break;
       case Intrinsic::memmove:
         M.getOrInsertFunction("memmove",
           Type::getInt8PtrTy(Context),
                               Type::getInt8PtrTy(Context), 
                               Type::getInt8PtrTy(Context), 
-                              TD.getIntPtrType(Context), (Type *)0);
+                              DL.getIntPtrType(Context), (Type *)0);
         break;
       case Intrinsic::memset:
         M.getOrInsertFunction("memset",
           Type::getInt8PtrTy(Context),
                               Type::getInt8PtrTy(Context), 
                               Type::getInt32Ty(M.getContext()), 
-                              TD.getIntPtrType(Context), (Type *)0);
+                              DL.getIntPtrType(Context), (Type *)0);
         break;
       case Intrinsic::sqrt:
         EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
@@ -463,7 +463,7 @@ void IntrinsicLowering::LowerIntrinsicCa
     break;   // Strip out annotate intrinsic
     
   case Intrinsic::memcpy: {
-    Type *IntPtr = TD.getIntPtrType(Context);
+    Type *IntPtr = DL.getIntPtrType(Context);
     Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
                                         /* isSigned */ false);
     Value *Ops[3];
@@ -474,7 +474,7 @@ void IntrinsicLowering::LowerIntrinsicCa
     break;
   }
   case Intrinsic::memmove: {
-    Type *IntPtr = TD.getIntPtrType(Context);
+    Type *IntPtr = DL.getIntPtrType(Context);
     Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
                                         /* isSigned */ false);
     Value *Ops[3];
@@ -486,7 +486,7 @@ void IntrinsicLowering::LowerIntrinsicCa
   }
   case Intrinsic::memset: {
     Value *Op0 = CI->getArgOperand(0);
-    Type *IntPtr = TD.getIntPtrType(Op0->getType());
+    Type *IntPtr = DL.getIntPtrType(Op0->getType());
     Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
                                         /* isSigned */ false);
     Value *Ops[3];

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp Tue Feb 18 09:33:12 2014
@@ -16,7 +16,7 @@
 using namespace llvm;
 
 TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM)
-  : TD(TM.getDataLayout()) {
+  : DL(TM.getDataLayout()) {
 }
 
 TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {

Modified: llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp Tue Feb 18 09:33:12 2014
@@ -661,11 +661,11 @@ static void InitCmpLibcallCCs(ISD::CondC
 /// NOTE: The constructor takes ownership of TLOF.
 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
                                        const TargetLoweringObjectFile *tlof)
-  : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
+  : TM(tm), DL(TM.getDataLayout()), TLOF(*tlof) {
   initActions();
 
   // Perform these initializations only once.
-  IsLittleEndian = TD->isLittleEndian();
+  IsLittleEndian = DL->isLittleEndian();
   MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
   MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
     = MaxStoresPerMemmoveOptSize = 4;
@@ -802,7 +802,7 @@ MVT TargetLoweringBase::getPointerTy(uin
 }
 
 unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const {
-  return TD->getPointerSizeInBits(AS);
+  return DL->getPointerSizeInBits(AS);
 }
 
 unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
@@ -811,7 +811,7 @@ unsigned TargetLoweringBase::getPointerT
 }
 
 MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const {
-  return MVT::getIntegerVT(8*TD->getPointerSize(0));
+  return MVT::getIntegerVT(8*DL->getPointerSize(0));
 }
 
 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const {
@@ -1286,7 +1286,7 @@ void llvm::GetReturnInfo(Type* ReturnTyp
 /// function arguments in the caller parameter area.  This is the actual
 /// alignment, not its logarithm.
 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const {
-  return TD->getABITypeAlignment(Ty);
+  return DL->getABITypeAlignment(Ty);
 }
 
 //===----------------------------------------------------------------------===//

Modified: llvm/trunk/lib/ExecutionEngine/ExecutionEngine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/ExecutionEngine.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/ExecutionEngine/ExecutionEngine.cpp (original)
+++ llvm/trunk/lib/ExecutionEngine/ExecutionEngine.cpp Tue Feb 18 09:33:12 2014
@@ -590,8 +590,8 @@ GenericValue ExecutionEngine::getConstan
     case Instruction::GetElementPtr: {
       // Compute the index
       GenericValue Result = getConstantValue(Op0);
-      APInt Offset(TD->getPointerSizeInBits(), 0);
-      cast<GEPOperator>(CE)->accumulateConstantOffset(*TD, Offset);
+      APInt Offset(DL->getPointerSizeInBits(), 0);
+      cast<GEPOperator>(CE)->accumulateConstantOffset(*DL, Offset);
 
       char* tmp = (char*) Result.PointerVal;
       Result = PTOGV(tmp + Offset.getSExtValue());
@@ -678,16 +678,16 @@ GenericValue ExecutionEngine::getConstan
     }
     case Instruction::PtrToInt: {
       GenericValue GV = getConstantValue(Op0);
-      uint32_t PtrWidth = TD->getTypeSizeInBits(Op0->getType());
+      uint32_t PtrWidth = DL->getTypeSizeInBits(Op0->getType());
       assert(PtrWidth <= 64 && "Bad pointer width");
       GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
-      uint32_t IntWidth = TD->getTypeSizeInBits(CE->getType());
+      uint32_t IntWidth = DL->getTypeSizeInBits(CE->getType());
       GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
       return GV;
     }
     case Instruction::IntToPtr: {
       GenericValue GV = getConstantValue(Op0);
-      uint32_t PtrWidth = TD->getTypeSizeInBits(CE->getType());
+      uint32_t PtrWidth = DL->getTypeSizeInBits(CE->getType());
       GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
       assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
       GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));

Modified: llvm/trunk/lib/Transforms/Utils/InlineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/InlineFunction.cpp?rev=201581&r1=201580&r2=201581&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/InlineFunction.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/InlineFunction.cpp Tue Feb 18 09:33:12 2014
@@ -343,7 +343,7 @@ static Value *HandleByValArgument(Value
     // If the pointer is already known to be sufficiently aligned, or if we can
     // round it up to a larger alignment, then we don't need a temporary.
     if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
-                                   IFI.TD) >= ByValAlignment)
+                                   IFI.DL) >= ByValAlignment)
       return Arg;
     
     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
@@ -356,8 +356,8 @@ static Value *HandleByValArgument(Value
   
   // Create the alloca.  If we have DataLayout, use nice alignment.
   unsigned Align = 1;
-  if (IFI.TD)
-    Align = IFI.TD->getPrefTypeAlignment(AggTy);
+  if (IFI.DL)
+    Align = IFI.DL->getPrefTypeAlignment(AggTy);
   
   // If the byval had an alignment specified, we *must* use at least that
   // alignment, as it is required by the byval argument (and uses of the
@@ -377,11 +377,11 @@ static Value *HandleByValArgument(Value
   Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
   
   Value *Size;
-  if (IFI.TD == 0)
+  if (IFI.DL == 0)
     Size = ConstantExpr::getSizeOf(AggTy);
   else
     Size = ConstantInt::get(Type::getInt64Ty(Context),
-                            IFI.TD->getTypeStoreSize(AggTy));
+                            IFI.DL->getTypeStoreSize(AggTy));
   
   // Always generate a memcpy of alignment 1 here because we don't know
   // the alignment of the src pointer.  Other optimizations can infer
@@ -599,7 +599,7 @@ bool llvm::InlineFunction(CallSite CS, I
     // happy with whatever the cloner can do.
     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 
                               /*ModuleLevelChanges=*/false, Returns, ".i",
-                              &InlinedFunctionInfo, IFI.TD, TheCall);
+                              &InlinedFunctionInfo, IFI.DL, TheCall);
 
     // Remember the first block that is newly cloned over.
     FirstNewBlock = LastBlock; ++FirstNewBlock;
@@ -669,9 +669,9 @@ bool llvm::InlineFunction(CallSite CS, I
       ConstantInt *AllocaSize = 0;
       if (ConstantInt *AIArraySize =
           dyn_cast<ConstantInt>(AI->getArraySize())) {
-        if (IFI.TD) {
+        if (IFI.DL) {
           Type *AllocaType = AI->getAllocatedType();
-          uint64_t AllocaTypeSize = IFI.TD->getTypeAllocSize(AllocaType);
+          uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
           uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
           assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
           // Check that array size doesn't saturate uint64_t and doesn't
@@ -908,7 +908,7 @@ bool llvm::InlineFunction(CallSite CS, I
   // the entries are the same or undef).  If so, remove the PHI so it doesn't
   // block other optimizations.
   if (PHI) {
-    if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
+    if (Value *V = SimplifyInstruction(PHI, IFI.DL)) {
       PHI->replaceAllUsesWith(V);
       PHI->eraseFromParent();
     }





More information about the llvm-commits mailing list