Index: docs/GarbageCollection.html =================================================================== --- docs/GarbageCollection.html (revision 165037) +++ docs/GarbageCollection.html (working copy) @@ -1252,8 +1252,8 @@
#include "llvm/CodeGen/AsmPrinter.h"
 #include "llvm/Function.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetAsmInfo.h"
 
 void MyGCPrinter::beginAssembly(std::ostream &OS, AsmPrinter &AP,
Index: docs/tutorial/LangImpl4.html
===================================================================
--- docs/tutorial/LangImpl4.html	(revision 165037)
+++ docs/tutorial/LangImpl4.html	(working copy)
@@ -523,8 +523,8 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/TargetSelect.h"
 #include <cstdio>
 #include <string>
Index: docs/tutorial/LangImpl5.html
===================================================================
--- docs/tutorial/LangImpl5.html	(revision 165037)
+++ docs/tutorial/LangImpl5.html	(working copy)
@@ -901,8 +901,8 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/TargetSelect.h"
 #include <cstdio>
 #include <string>
Index: docs/tutorial/LangImpl6.html
===================================================================
--- docs/tutorial/LangImpl6.html	(revision 165037)
+++ docs/tutorial/LangImpl6.html	(working copy)
@@ -840,8 +840,8 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/TargetSelect.h"
 #include <cstdio>
 #include <string>
Index: docs/tutorial/LangImpl7.html
===================================================================
--- docs/tutorial/LangImpl7.html	(revision 165037)
+++ docs/tutorial/LangImpl7.html	(working copy)
@@ -1008,8 +1008,8 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Transforms/Scalar.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/TargetSelect.h"
 #include <cstdio>
 #include <string>
Index: examples/ExceptionDemo/ExceptionDemo.cpp
===================================================================
--- examples/ExceptionDemo/ExceptionDemo.cpp	(revision 165037)
+++ examples/ExceptionDemo/ExceptionDemo.cpp	(working copy)
@@ -57,10 +57,10 @@
 #include "llvm/PassManager.h"
 #include "llvm/Intrinsics.h"
 #include "llvm/Analysis/Verifier.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Support/Dwarf.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/TargetSelect.h"
 
 // FIXME: Although all systems tested with (Linux, OS X), do not need this 
Index: examples/Kaleidoscope/Chapter4/toy.cpp
===================================================================
--- examples/Kaleidoscope/Chapter4/toy.cpp	(revision 165037)
+++ examples/Kaleidoscope/Chapter4/toy.cpp	(working copy)
@@ -7,7 +7,7 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Support/TargetSelect.h"
 #include 
@@ -584,7 +584,7 @@
 
   // Set up the optimizer pipeline.  Start with registering info about how the
   // target lays out data structures.
-  OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData()));
+  OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout()));
   // Provide basic AliasAnalysis support for GVN.
   OurFPM.add(createBasicAliasAnalysisPass());
   // Do simple "peephole" optimizations and bit-twiddling optzns.
Index: examples/Kaleidoscope/Chapter5/toy.cpp
===================================================================
--- examples/Kaleidoscope/Chapter5/toy.cpp	(revision 165037)
+++ examples/Kaleidoscope/Chapter5/toy.cpp	(working copy)
@@ -7,7 +7,7 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Support/TargetSelect.h"
 #include 
@@ -829,7 +829,7 @@
 
   // Set up the optimizer pipeline.  Start with registering info about how the
   // target lays out data structures.
-  OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData()));
+  OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout()));
   // Provide basic AliasAnalysis support for GVN.
   OurFPM.add(createBasicAliasAnalysisPass());
   // Do simple "peephole" optimizations and bit-twiddling optzns.
Index: examples/Kaleidoscope/Chapter6/toy.cpp
===================================================================
--- examples/Kaleidoscope/Chapter6/toy.cpp	(revision 165037)
+++ examples/Kaleidoscope/Chapter6/toy.cpp	(working copy)
@@ -7,7 +7,7 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Support/TargetSelect.h"
 #include 
@@ -947,7 +947,7 @@
 
   // Set up the optimizer pipeline.  Start with registering info about how the
   // target lays out data structures.
-  OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData()));
+  OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout()));
   // Provide basic AliasAnalysis support for GVN.
   OurFPM.add(createBasicAliasAnalysisPass());
   // Do simple "peephole" optimizations and bit-twiddling optzns.
Index: examples/Kaleidoscope/Chapter7/toy.cpp
===================================================================
--- examples/Kaleidoscope/Chapter7/toy.cpp	(revision 165037)
+++ examples/Kaleidoscope/Chapter7/toy.cpp	(working copy)
@@ -7,7 +7,7 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Analysis/Passes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Support/TargetSelect.h"
 #include 
@@ -1111,7 +1111,7 @@
 
   // Set up the optimizer pipeline.  Start with registering info about how the
   // target lays out data structures.
-  OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData()));
+  OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout()));
   // Provide basic AliasAnalysis support for GVN.
   OurFPM.add(createBasicAliasAnalysisPass());
   // Promote allocas to registers.
Index: include/llvm-c/Target.h
===================================================================
--- include/llvm-c/Target.h	(revision 165037)
+++ include/llvm-c/Target.h	(working copy)
@@ -145,7 +145,7 @@
 /*===-- Target Data -------------------------------------------------------===*/
 
 /** Creates target data from a target layout string.
-    See the constructor llvm::TargetData::TargetData. */
+    See the constructor llvm::DataLayout::DataLayout. */
 LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep);
 
 /** Adds target data information to a pass manager. This does not take ownership
@@ -160,48 +160,48 @@
 
 /** Converts target data to a target layout string. The string must be disposed
     with LLVMDisposeMessage.
-    See the constructor llvm::TargetData::TargetData. */
+    See the constructor llvm::DataLayout::DataLayout. */
 char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef);
 
 /** Returns the byte order of a target, either LLVMBigEndian or
     LLVMLittleEndian.
-    See the method llvm::TargetData::isLittleEndian. */
+    See the method llvm::DataLayout::isLittleEndian. */
 enum LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef);
 
 /** Returns the pointer size in bytes for a target.
-    See the method llvm::TargetData::getPointerSize. */
+    See the method llvm::DataLayout::getPointerSize. */
 unsigned LLVMPointerSize(LLVMTargetDataRef);
 
 /** Returns the integer type that is the same size as a pointer on a target.
-    See the method llvm::TargetData::getIntPtrType. */
+    See the method llvm::DataLayout::getIntPtrType. */
 LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef);
 
 /** Computes the size of a type in bytes for a target.
-    See the method llvm::TargetData::getTypeSizeInBits. */
+    See the method llvm::DataLayout::getTypeSizeInBits. */
 unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef);
 
 /** Computes the storage size of a type in bytes for a target.
-    See the method llvm::TargetData::getTypeStoreSize. */
+    See the method llvm::DataLayout::getTypeStoreSize. */
 unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef, LLVMTypeRef);
 
 /** Computes the ABI size of a type in bytes for a target.
-    See the method llvm::TargetData::getTypeAllocSize. */
+    See the method llvm::DataLayout::getTypeAllocSize. */
 unsigned long long LLVMABISizeOfType(LLVMTargetDataRef, LLVMTypeRef);
 
 /** Computes the ABI alignment of a type in bytes for a target.
-    See the method llvm::TargetData::getTypeABISize. */
+    See the method llvm::DataLayout::getTypeABISize. */
 unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
 
 /** Computes the call frame alignment of a type in bytes for a target.
-    See the method llvm::TargetData::getTypeABISize. */
+    See the method llvm::DataLayout::getTypeABISize. */
 unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
 
 /** Computes the preferred alignment of a type in bytes for a target.
-    See the method llvm::TargetData::getTypeABISize. */
+    See the method llvm::DataLayout::getTypeABISize. */
 unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
 
 /** Computes the preferred alignment of a global variable in bytes for a target.
-    See the method llvm::TargetData::getPreferredAlignment. */
+    See the method llvm::DataLayout::getPreferredAlignment. */
 unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef,
                                         LLVMValueRef GlobalVar);
 
@@ -215,8 +215,8 @@
 unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef, LLVMTypeRef StructTy,
                                        unsigned Element);
 
-/** Deallocates a TargetData.
-    See the destructor llvm::TargetData::~TargetData. */
+/** Deallocates a DataLayout.
+    See the destructor llvm::DataLayout::~DataLayout. */
 void LLVMDisposeTargetData(LLVMTargetDataRef);
 
 /**
@@ -227,15 +227,15 @@
 }
 
 namespace llvm {
-  class TargetData;
+  class DataLayout;
   class TargetLibraryInfo;
 
-  inline TargetData *unwrap(LLVMTargetDataRef P) {
-    return reinterpret_cast(P);
+  inline DataLayout *unwrap(LLVMTargetDataRef P) {
+    return reinterpret_cast(P);
   }
   
-  inline LLVMTargetDataRef wrap(const TargetData *P) {
-    return reinterpret_cast(const_cast(P));
+  inline LLVMTargetDataRef wrap(const DataLayout *P) {
+    return reinterpret_cast(const_cast(P));
   }
 
   inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) {
Index: include/llvm/Analysis/AliasAnalysis.h
===================================================================
--- include/llvm/Analysis/AliasAnalysis.h	(revision 165037)
+++ include/llvm/Analysis/AliasAnalysis.h	(working copy)
@@ -45,7 +45,7 @@
 class LoadInst;
 class StoreInst;
 class VAArgInst;
-class TargetData;
+class DataLayout;
 class TargetLibraryInfo;
 class Pass;
 class AnalysisUsage;
@@ -55,7 +55,7 @@
 
 class AliasAnalysis {
 protected:
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLibraryInfo *TLI;
 
 private:
@@ -83,17 +83,17 @@
   /// know the sizes of the potential memory references.
   static uint64_t const UnknownSize = ~UINT64_C(0);
 
-  /// getTargetData - Return a pointer to the current TargetData object, or
-  /// null if no TargetData object is available.
+  /// getDataLayout - Return a pointer to the current DataLayout object, or
+  /// null if no DataLayout object is available.
   ///
-  const TargetData *getTargetData() const { return TD; }
+  const DataLayout *getDataLayout() const { return TD; }
 
   /// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo
   /// object, or null if no TargetLibraryInfo object is available.
   ///
   const TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
 
-  /// getTypeStoreSize - Return the TargetData store size for the given type,
+  /// getTypeStoreSize - Return the DataLayout store size for the given type,
   /// if known, or a conservative value otherwise.
   ///
   uint64_t getTypeStoreSize(Type *Ty);
Index: include/llvm/Analysis/CodeMetrics.h
===================================================================
--- include/llvm/Analysis/CodeMetrics.h	(revision 165037)
+++ include/llvm/Analysis/CodeMetrics.h	(working copy)
@@ -22,11 +22,11 @@
   class BasicBlock;
   class Function;
   class Instruction;
-  class TargetData;
+  class DataLayout;
   class Value;
 
   /// \brief Check whether an instruction is likely to be "free" when lowered.
-  bool isInstructionFree(const Instruction *I, const TargetData *TD = 0);
+  bool isInstructionFree(const Instruction *I, const DataLayout *TD = 0);
 
   /// \brief Check whether a call will lower to something small.
   ///
@@ -85,10 +85,10 @@
                     NumRets(0) {}
 
     /// \brief Add information about a block to the current state.
-    void analyzeBasicBlock(const BasicBlock *BB, const TargetData *TD = 0);
+    void analyzeBasicBlock(const BasicBlock *BB, const DataLayout *TD = 0);
 
     /// \brief Add information about a function to the current state.
-    void analyzeFunction(Function *F, const TargetData *TD = 0);
+    void analyzeFunction(Function *F, const DataLayout *TD = 0);
   };
 }
 
Index: include/llvm/Analysis/ConstantFolding.h
===================================================================
--- include/llvm/Analysis/ConstantFolding.h	(revision 165037)
+++ include/llvm/Analysis/ConstantFolding.h	(working copy)
@@ -12,7 +12,7 @@
 //
 // Also, to supplement the basic VMCore ConstantExpr simplifications,
 // this file declares some additional folding routines that can make use of
-// TargetData information. These functions cannot go in VMCore due to library
+// DataLayout information. These functions cannot go in VMCore due to library
 // dependency issues.
 //
 //===----------------------------------------------------------------------===//
@@ -24,7 +24,7 @@
   class Constant;
   class ConstantExpr;
   class Instruction;
-  class TargetData;
+  class DataLayout;
   class TargetLibraryInfo;
   class Function;
   class Type;
@@ -36,14 +36,14 @@
 /// Note that this fails if not all of the operands are constant.  Otherwise,
 /// this function can only fail when attempting to fold instructions like loads
 /// and stores, which have no constant expression form.
-Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0,
+Constant *ConstantFoldInstruction(Instruction *I, const DataLayout *TD = 0,
                                   const TargetLibraryInfo *TLI = 0);
 
 /// ConstantFoldConstantExpression - Attempt to fold the constant expression
-/// using the specified TargetData.  If successful, the constant result is
+/// using the specified DataLayout.  If successful, the constant result is
 /// result is returned, if not, null is returned.
 Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
-                                         const TargetData *TD = 0,
+                                         const DataLayout *TD = 0,
                                          const TargetLibraryInfo *TLI = 0);
 
 /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
@@ -54,7 +54,7 @@
 ///
 Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
                                    ArrayRef Ops,
-                                   const TargetData *TD = 0,
+                                   const DataLayout *TD = 0,
                                    const TargetLibraryInfo *TLI = 0);
 
 /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
@@ -63,7 +63,7 @@
 ///
 Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
                                           Constant *LHS, Constant *RHS,
-                                          const TargetData *TD = 0,
+                                          const DataLayout *TD = 0,
                                           const TargetLibraryInfo *TLI = 0);
 
 /// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
@@ -75,7 +75,7 @@
 /// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
 /// produce if it is constant and determinable.  If this is not determinable,
 /// return null.
-Constant *ConstantFoldLoadFromConstPtr(Constant *C, const TargetData *TD = 0);
+Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout *TD = 0);
 
 /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
 /// getelementptr constantexpr, return the constant value being addressed by the
Index: include/llvm/Analysis/InlineCost.h
===================================================================
--- include/llvm/Analysis/InlineCost.h	(revision 165037)
+++ include/llvm/Analysis/InlineCost.h	(working copy)
@@ -26,7 +26,7 @@
 namespace llvm {
 
   class CallSite;
-  class TargetData;
+  class DataLayout;
 
   namespace InlineConstants {
     // Various magic constants used to adjust heuristics.
@@ -104,13 +104,13 @@
 
   /// InlineCostAnalyzer - Cost analyzer used by inliner.
   class InlineCostAnalyzer {
-    // TargetData if available, or null.
-    const TargetData *TD;
+    // DataLayout if available, or null.
+    const DataLayout *TD;
 
   public:
     InlineCostAnalyzer(): TD(0) {}
 
-    void setTargetData(const TargetData *TData) { TD = TData; }
+    void setDataLayout(const DataLayout *TData) { TD = TData; }
 
     /// \brief Get an InlineCost object representing the cost of inlining this
     /// callsite.
Index: include/llvm/Analysis/InstructionSimplify.h
===================================================================
--- include/llvm/Analysis/InstructionSimplify.h	(revision 165037)
+++ include/llvm/Analysis/InstructionSimplify.h	(working copy)
@@ -24,7 +24,7 @@
   class ArrayRef;
   class DominatorTree;
   class Instruction;
-  class TargetData;
+  class DataLayout;
   class TargetLibraryInfo;
   class Type;
   class Value;
@@ -32,122 +32,122 @@
   /// SimplifyAddInst - Given operands for an Add, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
-                         const TargetData *TD = 0,
+                         const DataLayout *TD = 0,
                          const TargetLibraryInfo *TLI = 0,
                          const DominatorTree *DT = 0);
 
   /// SimplifySubInst - Given operands for a Sub, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
-                         const TargetData *TD = 0,
+                         const DataLayout *TD = 0,
                          const TargetLibraryInfo *TLI = 0,
                          const DominatorTree *DT = 0);
 
   /// SimplifyMulInst - Given operands for a Mul, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyMulInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+  Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
                          const TargetLibraryInfo *TLI = 0,
                          const DominatorTree *DT = 0);
 
   /// SimplifySDivInst - Given operands for an SDiv, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifySDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+  Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifyUDivInst - Given operands for a UDiv, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyUDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0, 
+  Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, 
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifyFDivInst - Given operands for an FDiv, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyFDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+  Value *SimplifyFDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifySRemInst - Given operands for an SRem, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifySRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0, 
+  Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, 
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifyURemInst - Given operands for a URem, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyURemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+  Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifyFRemInst - Given operands for an FRem, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyFRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+  Value *SimplifyFRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifyShlInst - Given operands for a Shl, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
-                         const TargetData *TD = 0, 
+                         const DataLayout *TD = 0, 
                          const TargetLibraryInfo *TLI = 0,
                          const DominatorTree *DT = 0);
 
   /// SimplifyLShrInst - Given operands for a LShr, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
-                          const TargetData *TD = 0,
+                          const DataLayout *TD = 0,
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifyAShrInst - Given operands for a AShr, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
-                          const TargetData *TD = 0,
+                          const DataLayout *TD = 0,
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifyAndInst - Given operands for an And, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyAndInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+  Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
                          const TargetLibraryInfo *TLI = 0,
                          const DominatorTree *DT = 0);
 
   /// SimplifyOrInst - Given operands for an Or, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyOrInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+  Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
                         const TargetLibraryInfo *TLI = 0,
                         const DominatorTree *DT = 0);
 
   /// SimplifyXorInst - Given operands for a Xor, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyXorInst(Value *LHS, Value *RHS, const TargetData *TD = 0,
+  Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
                          const TargetLibraryInfo *TLI = 0,
                          const DominatorTree *DT = 0);
 
   /// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
-                          const TargetData *TD = 0, 
+                          const DataLayout *TD = 0, 
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
-                          const TargetData *TD = 0, 
+                          const DataLayout *TD = 0, 
                           const TargetLibraryInfo *TLI = 0,
                           const DominatorTree *DT = 0);
 
   /// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
   /// the result.  If not, this returns null.
   Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
-                            const TargetData *TD = 0,
+                            const DataLayout *TD = 0,
                             const TargetLibraryInfo *TLI = 0,
                             const DominatorTree *DT = 0);
 
   /// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
   /// fold the result.  If not, this returns null.
-  Value *SimplifyGEPInst(ArrayRef Ops, const TargetData *TD = 0,
+  Value *SimplifyGEPInst(ArrayRef Ops, const DataLayout *TD = 0,
                          const TargetLibraryInfo *TLI = 0,
                          const DominatorTree *DT = 0);
 
@@ -155,13 +155,13 @@
   /// can fold the result.  If not, this returns null.
   Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
                                  ArrayRef Idxs,
-                                 const TargetData *TD = 0,
+                                 const DataLayout *TD = 0,
                                  const TargetLibraryInfo *TLI = 0,
                                  const DominatorTree *DT = 0);
 
   /// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
   /// the result.  If not, this returns null.
-  Value *SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD = 0,
+  Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = 0,
                            const TargetLibraryInfo *TLI = 0,
                            const DominatorTree *DT = 0);
 
@@ -171,20 +171,20 @@
   /// SimplifyCmpInst - Given operands for a CmpInst, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
-                         const TargetData *TD = 0,
+                         const DataLayout *TD = 0,
                          const TargetLibraryInfo *TLI = 0,
                          const DominatorTree *DT = 0);
 
   /// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
   /// fold the result.  If not, this returns null.
   Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
-                       const TargetData *TD = 0, 
+                       const DataLayout *TD = 0, 
                        const TargetLibraryInfo *TLI = 0,
                        const DominatorTree *DT = 0);
 
   /// SimplifyInstruction - See if we can compute a simplified version of this
   /// instruction.  If not, this returns null.
-  Value *SimplifyInstruction(Instruction *I, const TargetData *TD = 0,
+  Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = 0,
                              const TargetLibraryInfo *TLI = 0,
                              const DominatorTree *DT = 0);
 
@@ -198,7 +198,7 @@
   ///
   /// The function returns true if any simplifications were performed.
   bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
-                                     const TargetData *TD = 0,
+                                     const DataLayout *TD = 0,
                                      const TargetLibraryInfo *TLI = 0,
                                      const DominatorTree *DT = 0);
 
@@ -209,7 +209,7 @@
   /// of the users impacted. It returns true if any simplifications were
   /// performed.
   bool recursivelySimplifyInstruction(Instruction *I,
-                                      const TargetData *TD = 0,
+                                      const DataLayout *TD = 0,
                                       const TargetLibraryInfo *TLI = 0,
                                       const DominatorTree *DT = 0);
 } // end namespace llvm
Index: include/llvm/Analysis/IVUsers.h
===================================================================
--- include/llvm/Analysis/IVUsers.h	(revision 165037)
+++ include/llvm/Analysis/IVUsers.h	(working copy)
@@ -28,7 +28,7 @@
 class ScalarEvolution;
 class SCEV;
 class IVUsers;
-class TargetData;
+class DataLayout;
 
 /// IVStrideUse - Keep track of one use of a strided induction variable.
 /// The Expr member keeps track of the expression, User is the actual user
@@ -123,7 +123,7 @@
   LoopInfo *LI;
   DominatorTree *DT;
   ScalarEvolution *SE;
-  TargetData *TD;
+  DataLayout *TD;
   SmallPtrSet Processed;
 
   /// IVUses - A list of all tracked IV uses of induction variable expressions
Index: include/llvm/Analysis/LazyValueInfo.h
===================================================================
--- include/llvm/Analysis/LazyValueInfo.h	(revision 165037)
+++ include/llvm/Analysis/LazyValueInfo.h	(working copy)
@@ -19,14 +19,14 @@
 
 namespace llvm {
   class Constant;
-  class TargetData;
+  class DataLayout;
   class TargetLibraryInfo;
   class Value;
   
 /// LazyValueInfo - This pass computes, caches, and vends lazy value constraint
 /// information.
 class LazyValueInfo : public FunctionPass {
-  class TargetData *TD;
+  class DataLayout *TD;
   class TargetLibraryInfo *TLI;
   void *PImpl;
   LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION;
Index: include/llvm/Analysis/Loads.h
===================================================================
--- include/llvm/Analysis/Loads.h	(revision 165037)
+++ include/llvm/Analysis/Loads.h	(working copy)
@@ -19,7 +19,7 @@
 namespace llvm {
 
 class AliasAnalysis;
-class TargetData;
+class DataLayout;
 class MDNode;
 
 /// isSafeToLoadUnconditionally - Return true if we know that executing a load
@@ -27,7 +27,7 @@
 /// specified pointer, we do a quick local scan of the basic block containing
 /// ScanFrom, to determine if the address is already accessed.
 bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
-                                 unsigned Align, const TargetData *TD = 0);
+                                 unsigned Align, const DataLayout *TD = 0);
 
 /// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
 /// the instruction before ScanFrom) checking to see if we have the value at
Index: include/llvm/Analysis/MemoryBuiltins.h
===================================================================
--- include/llvm/Analysis/MemoryBuiltins.h	(revision 165037)
+++ include/llvm/Analysis/MemoryBuiltins.h	(working copy)
@@ -27,7 +27,7 @@
 namespace llvm {
 class CallInst;
 class PointerType;
-class TargetData;
+class DataLayout;
 class TargetLibraryInfo;
 class Type;
 class Value;
@@ -81,7 +81,7 @@
 /// isArrayMalloc - Returns the corresponding CallInst if the instruction 
 /// is a call to malloc whose array size can be determined and the array size
 /// is not constant 1.  Otherwise, return NULL.
-const CallInst *isArrayMalloc(const Value *I, const TargetData *TD,
+const CallInst *isArrayMalloc(const Value *I, const DataLayout *TD,
                               const TargetLibraryInfo *TLI);
 
 /// getMallocType - Returns the PointerType resulting from the malloc call.
@@ -103,7 +103,7 @@
 /// then return that multiple.  For non-array mallocs, the multiple is
 /// constant 1.  Otherwise, return NULL for mallocs whose array size cannot be
 /// determined.
-Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
+Value *getMallocArraySize(CallInst *CI, const DataLayout *TD,
                           const TargetLibraryInfo *TLI,
                           bool LookThroughSExt = false);
 
@@ -141,7 +141,7 @@
 /// object size in Size if successful, and false otherwise.
 /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
 /// byval arguments, and global variables.
-bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
+bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
                    const TargetLibraryInfo *TLI, bool RoundToAlign = false);
 
 
@@ -153,7 +153,7 @@
 class ObjectSizeOffsetVisitor
   : public InstVisitor {
 
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLibraryInfo *TLI;
   bool RoundToAlign;
   unsigned IntTyBits;
@@ -167,7 +167,7 @@
   }
 
 public:
-  ObjectSizeOffsetVisitor(const TargetData *TD, const TargetLibraryInfo *TLI,
+  ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI,
                           LLVMContext &Context, bool RoundToAlign = false);
 
   SizeOffsetType compute(Value *V);
@@ -213,7 +213,7 @@
   typedef DenseMap CacheMapTy;
   typedef SmallPtrSet PtrSetTy;
 
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLibraryInfo *TLI;
   LLVMContext &Context;
   BuilderTy Builder;
@@ -228,7 +228,7 @@
   SizeOffsetEvalType compute_(Value *V);
 
 public:
-  ObjectSizeOffsetEvaluator(const TargetData *TD, const TargetLibraryInfo *TLI,
+  ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI,
                             LLVMContext &Context);
   SizeOffsetEvalType compute(Value *V);
 
Index: include/llvm/Analysis/MemoryDependenceAnalysis.h
===================================================================
--- include/llvm/Analysis/MemoryDependenceAnalysis.h	(revision 165037)
+++ include/llvm/Analysis/MemoryDependenceAnalysis.h	(working copy)
@@ -29,7 +29,7 @@
   class Instruction;
   class CallSite;
   class AliasAnalysis;
-  class TargetData;
+  class DataLayout;
   class MemoryDependenceAnalysis;
   class PredIteratorCache;
   class DominatorTree;
@@ -323,7 +323,7 @@
     
     /// Current AA implementation, just a cache.
     AliasAnalysis *AA;
-    TargetData *TD;
+    DataLayout *TD;
     DominatorTree *DT;
     OwningPtr PredCache;
   public:
@@ -412,7 +412,7 @@
                                                     int64_t MemLocOffs,
                                                     unsigned MemLocSize,
                                                     const LoadInst *LI,
-                                                    const TargetData &TD);
+                                                    const DataLayout &TD);
     
   private:
     MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
Index: include/llvm/Analysis/PHITransAddr.h
===================================================================
--- include/llvm/Analysis/PHITransAddr.h	(revision 165037)
+++ include/llvm/Analysis/PHITransAddr.h	(working copy)
@@ -19,7 +19,7 @@
 
 namespace llvm {
   class DominatorTree;
-  class TargetData;
+  class DataLayout;
   class TargetLibraryInfo;
 
 /// PHITransAddr - An address value which tracks and handles phi translation.
@@ -37,7 +37,7 @@
   Value *Addr;
   
   /// TD - The target data we are playing with if known, otherwise null.
-  const TargetData *TD;
+  const DataLayout *TD;
 
   /// TLI - The target library info if known, otherwise null.
   const TargetLibraryInfo *TLI;
@@ -45,7 +45,7 @@
   /// InstInputs - The inputs for our symbolic address.
   SmallVector InstInputs;
 public:
-  PHITransAddr(Value *addr, const TargetData *td) : Addr(addr), TD(td), TLI(0) {
+  PHITransAddr(Value *addr, const DataLayout *td) : Addr(addr), TD(td), TLI(0) {
     // If the address is an instruction, the whole thing is considered an input.
     if (Instruction *I = dyn_cast(Addr))
       InstInputs.push_back(I);
Index: include/llvm/Analysis/ScalarEvolution.h
===================================================================
--- include/llvm/Analysis/ScalarEvolution.h	(revision 165037)
+++ include/llvm/Analysis/ScalarEvolution.h	(working copy)
@@ -40,7 +40,7 @@
   class DominatorTree;
   class Type;
   class ScalarEvolution;
-  class TargetData;
+  class DataLayout;
   class TargetLibraryInfo;
   class LLVMContext;
   class Loop;
@@ -227,7 +227,7 @@
 
     /// TD - The target data information for the target we are targeting.
     ///
-    TargetData *TD;
+    DataLayout *TD;
 
     /// TLI - The target library information for the target we are targeting.
     ///
Index: include/llvm/Analysis/ValueTracking.h
===================================================================
--- include/llvm/Analysis/ValueTracking.h	(revision 165037)
+++ include/llvm/Analysis/ValueTracking.h	(working copy)
@@ -22,7 +22,7 @@
   class Value;
   class Instruction;
   class APInt;
-  class TargetData;
+  class DataLayout;
   class StringRef;
   class MDNode;
 
@@ -37,27 +37,27 @@
   /// same width as the vector element, and the bit is set only if it is true
   /// for all of the elements in the vector.
   void ComputeMaskedBits(Value *V,  APInt &KnownZero, APInt &KnownOne,
-                         const TargetData *TD = 0, unsigned Depth = 0);
+                         const DataLayout *TD = 0, unsigned Depth = 0);
   void computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero);
 
   /// ComputeSignBit - Determine whether the sign bit is known to be zero or
   /// one.  Convenience wrapper around ComputeMaskedBits.
   void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
-                      const TargetData *TD = 0, unsigned Depth = 0);
+                      const DataLayout *TD = 0, unsigned Depth = 0);
 
   /// isPowerOfTwo - Return true if the given value is known to have exactly one
   /// bit set when defined. For vectors return true if every element is known to
   /// be a power of two when defined.  Supports values with integer or pointer
   /// type and vectors of integers.  If 'OrZero' is set then returns true if the
   /// given value is either a power of two or zero.
-  bool isPowerOfTwo(Value *V, const TargetData *TD = 0, bool OrZero = false,
+  bool isPowerOfTwo(Value *V, const DataLayout *TD = 0, bool OrZero = false,
                     unsigned Depth = 0);
 
   /// isKnownNonZero - Return true if the given value is known to be non-zero
   /// when defined.  For vectors return true if every element is known to be
   /// non-zero when defined.  Supports values with integer or pointer type and
   /// vectors of integers.
-  bool isKnownNonZero(Value *V, const TargetData *TD = 0, unsigned Depth = 0);
+  bool isKnownNonZero(Value *V, const DataLayout *TD = 0, unsigned Depth = 0);
 
   /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use
   /// this predicate to simplify operations downstream.  Mask is known to be
@@ -69,7 +69,7 @@
   /// same width as the vector element, and the bit is set only if it is true
   /// for all of the elements in the vector.
   bool MaskedValueIsZero(Value *V, const APInt &Mask, 
-                         const TargetData *TD = 0, unsigned Depth = 0);
+                         const DataLayout *TD = 0, unsigned Depth = 0);
 
   
   /// ComputeNumSignBits - Return the number of times the sign bit of the
@@ -80,7 +80,7 @@
   ///
   /// 'Op' must have a scalar integer type.
   ///
-  unsigned ComputeNumSignBits(Value *Op, const TargetData *TD = 0,
+  unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = 0,
                               unsigned Depth = 0);
 
   /// ComputeMultiple - This function computes the integer multiple of Base that
@@ -118,10 +118,10 @@
   /// it can be expressed as a base pointer plus a constant offset.  Return the
   /// base and offset to the caller.
   Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
-                                          const TargetData &TD);
+                                          const DataLayout &TD);
   static inline const Value *
   GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
-                                   const TargetData &TD) {
+                                   const DataLayout &TD) {
     return GetPointerBaseWithConstantOffset(const_cast(Ptr), Offset,TD);
   }
   
@@ -143,10 +143,10 @@
   /// being addressed.  Note that the returned value has pointer type if the
   /// specified value does.  If the MaxLookup value is non-zero, it limits the
   /// number of instructions to be stripped off.
-  Value *GetUnderlyingObject(Value *V, const TargetData *TD = 0,
+  Value *GetUnderlyingObject(Value *V, const DataLayout *TD = 0,
                              unsigned MaxLookup = 6);
   static inline const Value *
-  GetUnderlyingObject(const Value *V, const TargetData *TD = 0,
+  GetUnderlyingObject(const Value *V, const DataLayout *TD = 0,
                       unsigned MaxLookup = 6) {
     return GetUnderlyingObject(const_cast(V), TD, MaxLookup);
   }
@@ -156,7 +156,7 @@
   /// multiple objects.
   void GetUnderlyingObjects(Value *V,
                             SmallVectorImpl &Objects,
-                            const TargetData *TD = 0,
+                            const DataLayout *TD = 0,
                             unsigned MaxLookup = 6);
 
   /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
@@ -182,7 +182,7 @@
   /// However, this method can return true for instructions that read memory;
   /// for such instructions, moving them may change the resulting value.
   bool isSafeToSpeculativelyExecute(const Value *V,
-                                    const TargetData *TD = 0);
+                                    const DataLayout *TD = 0);
 
 } // end namespace llvm
 
Index: include/llvm/CodeGen/AsmPrinter.h
===================================================================
--- include/llvm/CodeGen/AsmPrinter.h	(revision 165037)
+++ include/llvm/CodeGen/AsmPrinter.h	(working copy)
@@ -48,7 +48,7 @@
   class DwarfException;
   class Mangler;
   class TargetLoweringObjectFile;
-  class TargetData;
+  class DataLayout;
   class TargetMachine;
 
   /// AsmPrinter - This class is intended to be used as a driving class for all
@@ -131,8 +131,8 @@
     /// getObjFileLowering - Return information about object file lowering.
     const TargetLoweringObjectFile &getObjFileLowering() const;
 
-    /// getTargetData - Return information about data layout.
-    const TargetData &getTargetData() const;
+    /// getDataLayout - Return information about data layout.
+    const DataLayout &getDataLayout() const;
 
     /// getCurrentSection() - Return the current section we are emitting to.
     const MCSection *getCurrentSection() const;
Index: include/llvm/CodeGen/FastISel.h
===================================================================
--- include/llvm/CodeGen/FastISel.h	(revision 165037)
+++ include/llvm/CodeGen/FastISel.h	(working copy)
@@ -32,7 +32,7 @@
 class MachineInstr;
 class MachineFrameInfo;
 class MachineRegisterInfo;
-class TargetData;
+class DataLayout;
 class TargetInstrInfo;
 class TargetLibraryInfo;
 class TargetLowering;
@@ -54,7 +54,7 @@
   MachineConstantPool &MCP;
   DebugLoc DL;
   const TargetMachine &TM;
-  const TargetData &TD;
+  const DataLayout &TD;
   const TargetInstrInfo &TII;
   const TargetLowering &TLI;
   const TargetRegisterInfo &TRI;
Index: include/llvm/CodeGen/IntrinsicLowering.h
===================================================================
--- include/llvm/CodeGen/IntrinsicLowering.h	(revision 165037)
+++ include/llvm/CodeGen/IntrinsicLowering.h	(working copy)
@@ -21,15 +21,15 @@
 namespace llvm {
   class CallInst;
   class Module;
-  class TargetData;
+  class DataLayout;
 
   class IntrinsicLowering {
-    const TargetData& TD;
+    const DataLayout& TD;
 
     
     bool Warned;
   public:
-    explicit IntrinsicLowering(const TargetData &td) :
+    explicit IntrinsicLowering(const DataLayout &td) :
       TD(td), Warned(false) {}
 
     /// AddPrototypes - This method, if called, causes all of the prototypes
Index: include/llvm/CodeGen/MachineConstantPool.h
===================================================================
--- include/llvm/CodeGen/MachineConstantPool.h	(revision 165037)
+++ include/llvm/CodeGen/MachineConstantPool.h	(working copy)
@@ -25,7 +25,7 @@
 
 class Constant;
 class FoldingSetNodeID;
-class TargetData;
+class DataLayout;
 class TargetMachine;
 class Type;
 class MachineConstantPool;
@@ -132,14 +132,14 @@
 /// address of the function constant pool values.
 /// @brief The machine constant pool.
 class MachineConstantPool {
-  const TargetData *TD;   ///< The machine's TargetData.
+  const DataLayout *TD;   ///< The machine's DataLayout.
   unsigned PoolAlignment; ///< The alignment for the pool.
   std::vector Constants; ///< The pool of constants.
   /// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
   DenseSet MachineCPVsSharingEntries;
 public:
   /// @brief The only constructor.
-  explicit MachineConstantPool(const TargetData *td)
+  explicit MachineConstantPool(const DataLayout *td)
     : TD(td), PoolAlignment(1) {}
   ~MachineConstantPool();
     
Index: include/llvm/CodeGen/MachineFrameInfo.h
===================================================================
--- include/llvm/CodeGen/MachineFrameInfo.h	(revision 165037)
+++ include/llvm/CodeGen/MachineFrameInfo.h	(working copy)
@@ -21,7 +21,7 @@
 
 namespace llvm {
 class raw_ostream;
-class TargetData;
+class DataLayout;
 class TargetRegisterClass;
 class Type;
 class MachineFunction;
Index: include/llvm/CodeGen/MachineJumpTableInfo.h
===================================================================
--- include/llvm/CodeGen/MachineJumpTableInfo.h	(revision 165037)
+++ include/llvm/CodeGen/MachineJumpTableInfo.h	(working copy)
@@ -26,7 +26,7 @@
 namespace llvm {
 
 class MachineBasicBlock;
-class TargetData;
+class DataLayout;
 class raw_ostream;
 
 /// MachineJumpTableEntry - One jump table in the jump table info.
@@ -84,9 +84,9 @@
   JTEntryKind getEntryKind() const { return EntryKind; }
 
   /// getEntrySize - Return the size of each entry in the jump table.
-  unsigned getEntrySize(const TargetData &TD) const;
+  unsigned getEntrySize(const DataLayout &TD) const;
   /// getEntryAlignment - Return the alignment of each entry in the jump table.
-  unsigned getEntryAlignment(const TargetData &TD) const;
+  unsigned getEntryAlignment(const DataLayout &TD) const;
 
   /// createJumpTableIndex - Create a new jump table.
   ///
Index: include/llvm/DerivedTypes.h
===================================================================
--- include/llvm/DerivedTypes.h	(revision 165037)
+++ include/llvm/DerivedTypes.h	(working copy)
@@ -184,7 +184,7 @@
 /// Independent of what kind of struct you have, the body of a struct type are
 /// laid out in memory consequtively with the elements directly one after the
 /// other (if the struct is packed) or (if not packed) with padding between the
-/// elements as defined by TargetData (which is required to match what the code
+/// elements as defined by DataLayout (which is required to match what the code
 /// generator for a target expects).
 ///
 class StructType : public CompositeType {
Index: include/llvm/ExecutionEngine/ExecutionEngine.h
===================================================================
--- include/llvm/ExecutionEngine/ExecutionEngine.h	(revision 165037)
+++ include/llvm/ExecutionEngine/ExecutionEngine.h	(working copy)
@@ -42,7 +42,7 @@
 class MachineCodeInfo;
 class Module;
 class MutexGuard;
-class TargetData;
+class DataLayout;
 class Triple;
 class Type;
 
@@ -104,7 +104,7 @@
   ExecutionEngineState EEState;
 
   /// The target data for the platform for which execution is being performed.
-  const TargetData *TD;
+  const DataLayout *TD;
 
   /// Whether lazy JIT compilation is enabled.
   bool CompilingLazily;
@@ -123,7 +123,7 @@
   /// optimize for the case where there is only one module.
   SmallVector Modules;
 
-  void setTargetData(const TargetData *td) { TD = td; }
+  void setDataLayout(const DataLayout *td) { TD = td; }
 
   /// getMemoryforGV - Allocate memory for a global variable.
   virtual char *getMemoryForGV(const GlobalVariable *GV);
@@ -213,7 +213,7 @@
 
   //===--------------------------------------------------------------------===//
 
-  const TargetData *getTargetData() const { return TD; }
+  const DataLayout *getDataLayout() const { return TD; }
 
   /// removeModule - Remove a Module from the list of modules.  Returns true if
   /// M is found.
Index: include/llvm/InitializePasses.h
===================================================================
--- include/llvm/InitializePasses.h	(revision 165037)
+++ include/llvm/InitializePasses.h	(working copy)
@@ -246,7 +246,7 @@
 void initializeTailCallElimPass(PassRegistry&);
 void initializeTailDuplicatePassPass(PassRegistry&);
 void initializeTargetPassConfigPass(PassRegistry&);
-void initializeTargetDataPass(PassRegistry&);
+void initializeDataLayoutPass(PassRegistry&);
 void initializeTargetLibraryInfoPass(PassRegistry&);
 void initializeTwoAddressInstructionPassPass(PassRegistry&);
 void initializeTypeBasedAliasAnalysisPass(PassRegistry&);
Index: include/llvm/InstrTypes.h
===================================================================
--- include/llvm/InstrTypes.h	(revision 165037)
+++ include/llvm/InstrTypes.h	(working copy)
@@ -563,7 +563,7 @@
   /// IntPtrTy argument is used to make accurate determinations for casts
   /// involving Integer and Pointer types. They are no-op casts if the integer
   /// is the same size as the pointer. However, pointer size varies with
-  /// platform. Generally, the result of TargetData::getIntPtrType() should be
+  /// platform. Generally, the result of DataLayout::getIntPtrType() should be
   /// passed in. If that's not available, use Type::Int64Ty, which will make
   /// the isNoopCast call conservative.
   /// @brief Determine if the described cast is a no-op cast.
Index: include/llvm/Support/DataLayout.h
===================================================================
--- include/llvm/Support/DataLayout.h	(revision 0)
+++ include/llvm/Support/DataLayout.h	(working copy)
@@ -0,0 +1,430 @@
+//===- llvm/Support/DataLayout.h - Data size & alignment info --*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file defines target properties related to datatype size/offset/alignment
+// information.  It uses lazy annotations to cache information about how
+// structure types are laid out and used.
+//
+// This structure should be created once, filled in if the defaults are not
+// correct and then passed around by const&.  None of the members functions
+// require modification to the object.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_DATALAYOUT_H
+#define LLVM_TARGET_DATALAYOUT_H
+
+#include "llvm/Pass.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class Value;
+class Type;
+class IntegerType;
+class StructType;
+class StructLayout;
+class GlobalVariable;
+class LLVMContext;
+template
+class ArrayRef;
+
+/// Enum used to categorize the alignment types stored by TargetAlignElem
+enum AlignTypeEnum {
+  INTEGER_ALIGN = 'i',               ///< Integer type alignment
+  VECTOR_ALIGN = 'v',                ///< Vector type alignment
+  FLOAT_ALIGN = 'f',                 ///< Floating point type alignment
+  AGGREGATE_ALIGN = 'a',             ///< Aggregate alignment
+  STACK_ALIGN = 's'                  ///< Stack objects alignment
+};
+
+/// Target alignment element.
+///
+/// Stores the alignment data associated with a given alignment type (integer,
+/// vector, float) and type bit width.
+///
+/// @note The unusual order of elements in the structure attempts to reduce
+/// padding and make the structure slightly more cache friendly.
+struct TargetAlignElem {
+  AlignTypeEnum       AlignType : 8;  ///< Alignment type (AlignTypeEnum)
+  unsigned            ABIAlign;       ///< ABI alignment for this type/bitw
+  unsigned            PrefAlign;      ///< Pref. alignment for this type/bitw
+  uint32_t            TypeBitWidth;   ///< Type bit width
+
+  /// Initializer
+  static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
+                             unsigned pref_align, uint32_t bit_width);
+  /// Equality predicate
+  bool operator==(const TargetAlignElem &rhs) const;
+};
+
+/// Target pointer alignment element.
+///
+/// Stores the alignment data associated with a given pointer and address space.
+///
+/// @note The unusual order of elements in the structure attempts to reduce
+/// padding and make the structure slightly more cache friendly.
+struct PointerAlignElem {
+  unsigned            ABIAlign;       ///< ABI alignment for this type/bitw
+  unsigned            PrefAlign;      ///< Pref. alignment for this type/bitw
+  uint32_t            TypeBitWidth;   ///< Type bit width
+  uint32_t            AddressSpace;   ///< Address space for the pointer type
+
+  /// Initializer
+  static PointerAlignElem get(uint32_t addr_space, unsigned abi_align,
+                             unsigned pref_align, uint32_t bit_width);
+  /// Equality predicate
+  bool operator==(const PointerAlignElem &rhs) const;
+};
+
+
+/// DataLayout - This class holds a parsed version of the target data layout
+/// string in a module and provides methods for querying it.  The target data
+/// layout string is specified *by the target* - a frontend generating LLVM IR
+/// is required to generate the right target data for the target being codegen'd
+/// to.  If some measure of portability is desired, an empty string may be
+/// specified in the module.
+class DataLayout : public ImmutablePass {
+private:
+  bool          LittleEndian;          ///< Defaults to false
+  unsigned      StackNaturalAlign;     ///< Stack natural alignment
+
+  SmallVector LegalIntWidths; ///< Legal Integers.
+
+  /// Alignments- Where the primitive type alignment data is stored.
+  ///
+  /// @sa init().
+  /// @note Could support multiple size pointer alignments, e.g., 32-bit
+  /// pointers vs. 64-bit pointers by extending TargetAlignment, but for now,
+  /// we don't.
+  SmallVector Alignments;
+  DenseMap Pointers;
+
+  /// InvalidAlignmentElem - This member is a signal that a requested alignment
+  /// type and bit width were not found in the SmallVector.
+  static const TargetAlignElem InvalidAlignmentElem;
+
+  /// InvalidPointerElem - This member is a signal that a requested pointer
+  /// type and bit width were not found in the DenseSet.
+  static const PointerAlignElem InvalidPointerElem;
+
+  // The StructType -> StructLayout map.
+  mutable void *LayoutMap;
+
+  //! Set/initialize target alignments
+  void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+                    unsigned pref_align, uint32_t bit_width);
+  unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
+                            bool ABIAlign, Type *Ty) const;
+
+  //! Set/initialize pointer alignments
+  void setPointerAlignment(uint32_t addr_space, unsigned abi_align,
+      unsigned pref_align, uint32_t bit_width);
+
+  //! Internal helper method that returns requested alignment for type.
+  unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
+
+  /// Valid alignment predicate.
+  ///
+  /// Predicate that tests a TargetAlignElem reference returned by get() against
+  /// InvalidAlignmentElem.
+  bool validAlignment(const TargetAlignElem &align) const {
+    return &align != &InvalidAlignmentElem;
+  }
+
+  /// Valid pointer predicate.
+  ///
+  /// Predicate that tests a PointerAlignElem reference returned by get() against
+  /// InvalidPointerElem.
+  bool validPointer(const PointerAlignElem &align) const {
+    return &align != &InvalidPointerElem;
+  }
+
+  /// Initialise a DataLayout object with default values, ensure that the
+  /// target data pass is registered.
+  void init();
+
+public:
+  /// Default ctor.
+  ///
+  /// @note This has to exist, because this is a pass, but it should never be
+  /// used.
+  DataLayout();
+
+  /// Constructs a DataLayout from a specification string. See init().
+  explicit DataLayout(StringRef TargetDescription)
+    : ImmutablePass(ID) {
+    std::string errMsg = parseSpecifier(TargetDescription, this);
+    assert(errMsg == "" && "Invalid target data layout string.");
+    (void)errMsg;
+  }
+
+  /// Parses a target data specification string. Returns an error message
+  /// if the string is malformed, or the empty string on success. Optionally
+  /// initialises a DataLayout object if passed a non-null pointer.
+  static std::string parseSpecifier(StringRef TargetDescription, DataLayout* td = 0);
+
+  /// Initialize target data from properties stored in the module.
+  explicit DataLayout(const Module *M);
+
+  DataLayout(const DataLayout &TD) :
+    ImmutablePass(ID),
+    LittleEndian(TD.isLittleEndian()),
+    LegalIntWidths(TD.LegalIntWidths),
+    Alignments(TD.Alignments),
+    Pointers(TD.Pointers),
+    LayoutMap(0)
+  { }
+
+  ~DataLayout();  // Not virtual, do not subclass this class
+
+  /// Target endianness...
+  bool isLittleEndian() const { return LittleEndian; }
+  bool isBigEndian() const { return !LittleEndian; }
+
+  /// getStringRepresentation - Return the string representation of the
+  /// DataLayout.  This representation is in the same format accepted by the
+  /// string constructor above.
+  std::string getStringRepresentation() const;
+
+  /// isLegalInteger - This function returns true if the specified type is
+  /// known to be a native integer type supported by the CPU.  For example,
+  /// i64 is not native on most 32-bit CPUs and i37 is not native on any known
+  /// one.  This returns false if the integer width is not legal.
+  ///
+  /// The width is specified in bits.
+  ///
+  bool isLegalInteger(unsigned Width) const {
+    for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
+      if (LegalIntWidths[i] == Width)
+        return true;
+    return false;
+  }
+
+  bool isIllegalInteger(unsigned Width) const {
+    return !isLegalInteger(Width);
+  }
+
+  /// Returns true if the given alignment exceeds the natural stack alignment.
+  bool exceedsNaturalStackAlignment(unsigned Align) const {
+    return (StackNaturalAlign != 0) && (Align > StackNaturalAlign);
+  }
+
+  /// fitsInLegalInteger - This function returns true if the specified type fits
+  /// in a native integer type supported by the CPU.  For example, if the CPU
+  /// only supports i32 as a native integer type, then i27 fits in a legal
+  // integer type but i45 does not.
+  bool fitsInLegalInteger(unsigned Width) const {
+    for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
+      if (Width <= LegalIntWidths[i])
+        return true;
+    return false;
+  }
+
+  /// Target pointer alignment
+  /// FIXME: The defaults need to be removed once all of
+  /// the backends/clients are updated.
+  unsigned getPointerABIAlignment(unsigned AS = 0)  const {
+    DenseMap::const_iterator val = Pointers.find(AS);
+    if (val == Pointers.end()) {
+      val = Pointers.find(0);
+    }
+    return val->second.ABIAlign;
+  }
+  /// Return target's alignment for stack-based pointers
+  /// FIXME: The defaults need to be removed once all of
+  /// the backends/clients are updated.
+  unsigned getPointerPrefAlignment(unsigned AS = 0) const {
+    DenseMap::const_iterator val = Pointers.find(AS);
+    if (val == Pointers.end()) {
+      val = Pointers.find(0);
+    }
+    return val->second.PrefAlign;
+  }
+  /// Target pointer size
+  /// FIXME: The defaults need to be removed once all of
+  /// the backends/clients are updated.
+  unsigned getPointerSize(unsigned AS = 0)          const {
+    DenseMap::const_iterator val = Pointers.find(AS);
+    if (val == Pointers.end()) {
+      val = Pointers.find(0);
+    }
+    return val->second.TypeBitWidth;
+  }
+  /// Target pointer size, in bits
+  /// FIXME: The defaults need to be removed once all of
+  /// the backends/clients are updated.
+  unsigned getPointerSizeInBits(unsigned AS = 0)    const {
+    DenseMap::const_iterator val = Pointers.find(AS);
+    if (val == Pointers.end()) {
+      val = Pointers.find(0);
+    }
+    return 8*val->second.TypeBitWidth;
+  }
+
+  /// Size examples:
+  ///
+  /// Type        SizeInBits  StoreSizeInBits  AllocSizeInBits[*]
+  /// ----        ----------  ---------------  ---------------
+  ///  i1            1           8                8
+  ///  i8            8           8                8
+  ///  i19          19          24               32
+  ///  i32          32          32               32
+  ///  i100        100         104              128
+  ///  i128        128         128              128
+  ///  Float        32          32               32
+  ///  Double       64          64               64
+  ///  X86_FP80     80          80               96
+  ///
+  /// [*] The alloc size depends on the alignment, and thus on the target.
+  ///     These values are for x86-32 linux.
+
+  /// getTypeSizeInBits - Return the number of bits necessary to hold the
+  /// specified type.  For example, returns 36 for i36 and 80 for x86_fp80.
+  uint64_t getTypeSizeInBits(Type* Ty) const;
+
+  /// getTypeStoreSize - Return the maximum number of bytes that may be
+  /// overwritten by storing the specified type.  For example, returns 5
+  /// for i36 and 10 for x86_fp80.
+  uint64_t getTypeStoreSize(Type *Ty) const {
+    return (getTypeSizeInBits(Ty)+7)/8;
+  }
+
+  /// getTypeStoreSizeInBits - Return the maximum number of bits that may be
+  /// overwritten by storing the specified type; always a multiple of 8.  For
+  /// example, returns 40 for i36 and 80 for x86_fp80.
+  uint64_t getTypeStoreSizeInBits(Type *Ty) const {
+    return 8*getTypeStoreSize(Ty);
+  }
+
+  /// getTypeAllocSize - Return the offset in bytes between successive objects
+  /// of the specified type, including alignment padding.  This is the amount
+  /// that alloca reserves for this type.  For example, returns 12 or 16 for
+  /// x86_fp80, depending on alignment.
+  uint64_t getTypeAllocSize(Type* Ty) const {
+    // Round up to the next alignment boundary.
+    return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
+  }
+
+  /// getTypeAllocSizeInBits - Return the offset in bits between successive
+  /// objects of the specified type, including alignment padding; always a
+  /// multiple of 8.  This is the amount that alloca reserves for this type.
+  /// For example, returns 96 or 128 for x86_fp80, depending on alignment.
+  uint64_t getTypeAllocSizeInBits(Type* Ty) const {
+    return 8*getTypeAllocSize(Ty);
+  }
+
+  /// getABITypeAlignment - Return the minimum ABI-required alignment for the
+  /// specified type.
+  unsigned getABITypeAlignment(Type *Ty) const;
+
+  /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
+  /// an integer type of the specified bitwidth.
+  unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
+
+
+  /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
+  /// for the specified type when it is part of a call frame.
+  unsigned getCallFrameTypeAlignment(Type *Ty) const;
+
+
+  /// getPrefTypeAlignment - Return the preferred stack/global alignment for
+  /// the specified type.  This is always at least as good as the ABI alignment.
+  unsigned getPrefTypeAlignment(Type *Ty) const;
+
+  /// getPreferredTypeAlignmentShift - Return the preferred alignment for the
+  /// specified type, returned as log2 of the value (a shift amount).
+  ///
+  unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
+
+  /// getIntPtrType - Return an unsigned integer type that is the same size or
+  /// greater to the host pointer size.
+  /// FIXME: Need to remove the default argument when the rest of the LLVM code
+  /// base has been updated.
+  IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
+
+  /// getIndexedOffset - return the offset from the beginning of the type for
+  /// the specified indices.  This is used to implement getelementptr.
+  ///
+  uint64_t getIndexedOffset(Type *Ty, ArrayRef Indices) const;
+
+  /// getStructLayout - Return a StructLayout object, indicating the alignment
+  /// of the struct, its size, and the offsets of its fields.  Note that this
+  /// information is lazily cached.
+  const StructLayout *getStructLayout(StructType *Ty) const;
+
+  /// getPreferredAlignment - Return the preferred alignment of the specified
+  /// global.  This includes an explicitly requested alignment (if the global
+  /// has one).
+  unsigned getPreferredAlignment(const GlobalVariable *GV) const;
+
+  /// getPreferredAlignmentLog - Return the preferred alignment of the
+  /// specified global, returned in log form.  This includes an explicitly
+  /// requested alignment (if the global has one).
+  unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const;
+
+  /// RoundUpAlignment - Round the specified value up to the next alignment
+  /// boundary specified by Alignment.  For example, 7 rounded up to an
+  /// alignment boundary of 4 is 8.  8 rounded up to the alignment boundary of 4
+  /// is 8 because it is already aligned.
+  template 
+  static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) {
+    assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!");
+    return (Val + (Alignment-1)) & ~UIntTy(Alignment-1);
+  }
+
+  static char ID; // Pass identification, replacement for typeid
+};
+
+/// StructLayout - used to lazily calculate structure layout information for a
+/// target machine, based on the DataLayout structure.
+///
+class StructLayout {
+  uint64_t StructSize;
+  unsigned StructAlignment;
+  unsigned NumElements;
+  uint64_t MemberOffsets[1];  // variable sized array!
+public:
+
+  uint64_t getSizeInBytes() const {
+    return StructSize;
+  }
+
+  uint64_t getSizeInBits() const {
+    return 8*StructSize;
+  }
+
+  unsigned getAlignment() const {
+    return StructAlignment;
+  }
+
+  /// getElementContainingOffset - Given a valid byte offset into the structure,
+  /// return the structure index that contains it.
+  ///
+  unsigned getElementContainingOffset(uint64_t Offset) const;
+
+  uint64_t getElementOffset(unsigned Idx) const {
+    assert(Idx < NumElements && "Invalid element idx!");
+    return MemberOffsets[Idx];
+  }
+
+  uint64_t getElementOffsetInBits(unsigned Idx) const {
+    return getElementOffset(Idx)*8;
+  }
+
+private:
+  friend class DataLayout;   // Only DataLayout can create this class
+  StructLayout(StructType *ST, const DataLayout &TD);
+};
+
+} // End llvm namespace
+
+#endif
Index: include/llvm/Support/TargetFolder.h
===================================================================
--- include/llvm/Support/TargetFolder.h	(revision 165037)
+++ include/llvm/Support/TargetFolder.h	(working copy)
@@ -26,11 +26,11 @@
 
 namespace llvm {
 
-class TargetData;
+class DataLayout;
 
 /// TargetFolder - Create constants with target dependent folding.
 class TargetFolder {
-  const TargetData *TD;
+  const DataLayout *TD;
 
   /// Fold - Fold the constant using target specific information.
   Constant *Fold(Constant *C) const {
@@ -41,7 +41,7 @@
   }
 
 public:
-  explicit TargetFolder(const TargetData *TheTD) : TD(TheTD) {}
+  explicit TargetFolder(const DataLayout *TheTD) : TD(TheTD) {}
 
   //===--------------------------------------------------------------------===//
   // Binary Operators
Index: include/llvm/Target/Mangler.h
===================================================================
--- include/llvm/Target/Mangler.h	(revision 165037)
+++ include/llvm/Target/Mangler.h	(working copy)
@@ -22,7 +22,7 @@
 template  class SmallVectorImpl;
 class MCContext;
 class MCSymbol;
-class TargetData;
+class DataLayout;
 
 class Mangler {
 public:
@@ -34,7 +34,7 @@
 
 private:
   MCContext &Context;
-  const TargetData &TD;
+  const DataLayout &TD;
 
   /// AnonGlobalIDs - We need to give global values the same name every time
   /// they are mangled.  This keeps track of the number we give to anonymous
@@ -47,7 +47,7 @@
   unsigned NextAnonGlobalID;
 
 public:
-  Mangler(MCContext &context, const TargetData &td)
+  Mangler(MCContext &context, const DataLayout &td)
     : Context(context), TD(td), NextAnonGlobalID(1) {}
 
   /// getSymbol - Return the MCSymbol for the specified global value.  This
Index: include/llvm/Target/TargetData.h
===================================================================
--- include/llvm/Target/TargetData.h	(revision 165037)
+++ include/llvm/Target/TargetData.h	(working copy)
@@ -1,363 +0,0 @@
-//===-- llvm/Target/TargetData.h - Data size & alignment info ---*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines target properties related to datatype size/offset/alignment
-// information.  It uses lazy annotations to cache information about how
-// structure types are laid out and used.
-//
-// This structure should be created once, filled in if the defaults are not
-// correct and then passed around by const&.  None of the members functions
-// require modification to the object.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETDATA_H
-#define LLVM_TARGET_TARGETDATA_H
-
-#include "llvm/Pass.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-
-class Value;
-class Type;
-class IntegerType;
-class StructType;
-class StructLayout;
-class GlobalVariable;
-class LLVMContext;
-template
-class ArrayRef;
-
-/// Enum used to categorize the alignment types stored by TargetAlignElem
-enum AlignTypeEnum {
-  INTEGER_ALIGN = 'i',               ///< Integer type alignment
-  VECTOR_ALIGN = 'v',                ///< Vector type alignment
-  FLOAT_ALIGN = 'f',                 ///< Floating point type alignment
-  AGGREGATE_ALIGN = 'a',             ///< Aggregate alignment
-  STACK_ALIGN = 's'                  ///< Stack objects alignment
-};
-
-/// Target alignment element.
-///
-/// Stores the alignment data associated with a given alignment type (pointer,
-/// integer, vector, float) and type bit width.
-///
-/// @note The unusual order of elements in the structure attempts to reduce
-/// padding and make the structure slightly more cache friendly.
-struct TargetAlignElem {
-  unsigned AlignType    : 8;  ///< Alignment type (AlignTypeEnum)
-  unsigned TypeBitWidth : 24; ///< Type bit width
-  unsigned ABIAlign     : 16; ///< ABI alignment for this type/bitw
-  unsigned PrefAlign    : 16; ///< Pref. alignment for this type/bitw
-
-  /// Initializer
-  static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
-                             unsigned pref_align, uint32_t bit_width);
-  /// Equality predicate
-  bool operator==(const TargetAlignElem &rhs) const;
-};
-
-/// TargetData - This class holds a parsed version of the target data layout
-/// string in a module and provides methods for querying it.  The target data
-/// layout string is specified *by the target* - a frontend generating LLVM IR
-/// is required to generate the right target data for the target being codegen'd
-/// to.  If some measure of portability is desired, an empty string may be
-/// specified in the module.
-class TargetData : public ImmutablePass {
-private:
-  bool          LittleEndian;          ///< Defaults to false
-  unsigned      PointerMemSize;        ///< Pointer size in bytes
-  unsigned      PointerABIAlign;       ///< Pointer ABI alignment
-  unsigned      PointerPrefAlign;      ///< Pointer preferred alignment
-  unsigned      StackNaturalAlign;     ///< Stack natural alignment
-
-  SmallVector LegalIntWidths; ///< Legal Integers.
-
-  /// Alignments- Where the primitive type alignment data is stored.
-  ///
-  /// @sa init().
-  /// @note Could support multiple size pointer alignments, e.g., 32-bit
-  /// pointers vs. 64-bit pointers by extending TargetAlignment, but for now,
-  /// we don't.
-  SmallVector Alignments;
-
-  /// InvalidAlignmentElem - This member is a signal that a requested alignment
-  /// type and bit width were not found in the SmallVector.
-  static const TargetAlignElem InvalidAlignmentElem;
-
-  // The StructType -> StructLayout map.
-  mutable void *LayoutMap;
-
-  //! Set/initialize target alignments
-  void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
-                    unsigned pref_align, uint32_t bit_width);
-  unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
-                            bool ABIAlign, Type *Ty) const;
-  //! Internal helper method that returns requested alignment for type.
-  unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
-
-  /// Valid alignment predicate.
-  ///
-  /// Predicate that tests a TargetAlignElem reference returned by get() against
-  /// InvalidAlignmentElem.
-  bool validAlignment(const TargetAlignElem &align) const {
-    return &align != &InvalidAlignmentElem;
-  }
-
-  /// Initialise a TargetData object with default values, ensure that the
-  /// target data pass is registered.
-  void init();
-
-public:
-  /// Default ctor.
-  ///
-  /// @note This has to exist, because this is a pass, but it should never be
-  /// used.
-  TargetData();
-
-  /// Constructs a TargetData from a specification string. See init().
-  explicit TargetData(StringRef TargetDescription)
-    : ImmutablePass(ID) {
-    std::string errMsg = parseSpecifier(TargetDescription, this);
-    assert(errMsg == "" && "Invalid target data layout string.");
-    (void)errMsg;
-  }
-
-  /// Parses a target data specification string. Returns an error message
-  /// if the string is malformed, or the empty string on success. Optionally
-  /// initialises a TargetData object if passed a non-null pointer.
-  static std::string parseSpecifier(StringRef TargetDescription, TargetData* td = 0);
-
-  /// Initialize target data from properties stored in the module.
-  explicit TargetData(const Module *M);
-
-  TargetData(const TargetData &TD) :
-    ImmutablePass(ID),
-    LittleEndian(TD.isLittleEndian()),
-    PointerMemSize(TD.PointerMemSize),
-    PointerABIAlign(TD.PointerABIAlign),
-    PointerPrefAlign(TD.PointerPrefAlign),
-    LegalIntWidths(TD.LegalIntWidths),
-    Alignments(TD.Alignments),
-    LayoutMap(0)
-  { }
-
-  ~TargetData();  // Not virtual, do not subclass this class
-
-  /// Target endianness...
-  bool isLittleEndian() const { return LittleEndian; }
-  bool isBigEndian() const { return !LittleEndian; }
-
-  /// getStringRepresentation - Return the string representation of the
-  /// TargetData.  This representation is in the same format accepted by the
-  /// string constructor above.
-  std::string getStringRepresentation() const;
-
-  /// isLegalInteger - This function returns true if the specified type is
-  /// known to be a native integer type supported by the CPU.  For example,
-  /// i64 is not native on most 32-bit CPUs and i37 is not native on any known
-  /// one.  This returns false if the integer width is not legal.
-  ///
-  /// The width is specified in bits.
-  ///
-  bool isLegalInteger(unsigned Width) const {
-    for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
-      if (LegalIntWidths[i] == Width)
-        return true;
-    return false;
-  }
-
-  bool isIllegalInteger(unsigned Width) const {
-    return !isLegalInteger(Width);
-  }
-
-  /// Returns true if the given alignment exceeds the natural stack alignment.
-  bool exceedsNaturalStackAlignment(unsigned Align) const {
-    return (StackNaturalAlign != 0) && (Align > StackNaturalAlign);
-  }
-
-  /// fitsInLegalInteger - This function returns true if the specified type fits
-  /// in a native integer type supported by the CPU.  For example, if the CPU
-  /// only supports i32 as a native integer type, then i27 fits in a legal
-  // integer type but i45 does not.
-  bool fitsInLegalInteger(unsigned Width) const {
-    for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
-      if (Width <= LegalIntWidths[i])
-        return true;
-    return false;
-  }
-
-  /// Target pointer alignment
-  unsigned getPointerABIAlignment() const { return PointerABIAlign; }
-  /// Return target's alignment for stack-based pointers
-  unsigned getPointerPrefAlignment() const { return PointerPrefAlign; }
-  /// Target pointer size
-  unsigned getPointerSize()         const { return PointerMemSize; }
-  /// Target pointer size, in bits
-  unsigned getPointerSizeInBits()   const { return 8*PointerMemSize; }
-
-  /// Size examples:
-  ///
-  /// Type        SizeInBits  StoreSizeInBits  AllocSizeInBits[*]
-  /// ----        ----------  ---------------  ---------------
-  ///  i1            1           8                8
-  ///  i8            8           8                8
-  ///  i19          19          24               32
-  ///  i32          32          32               32
-  ///  i100        100         104              128
-  ///  i128        128         128              128
-  ///  Float        32          32               32
-  ///  Double       64          64               64
-  ///  X86_FP80     80          80               96
-  ///
-  /// [*] The alloc size depends on the alignment, and thus on the target.
-  ///     These values are for x86-32 linux.
-
-  /// getTypeSizeInBits - Return the number of bits necessary to hold the
-  /// specified type.  For example, returns 36 for i36 and 80 for x86_fp80.
-  uint64_t getTypeSizeInBits(Type* Ty) const;
-
-  /// getTypeStoreSize - Return the maximum number of bytes that may be
-  /// overwritten by storing the specified type.  For example, returns 5
-  /// for i36 and 10 for x86_fp80.
-  uint64_t getTypeStoreSize(Type *Ty) const {
-    return (getTypeSizeInBits(Ty)+7)/8;
-  }
-
-  /// getTypeStoreSizeInBits - Return the maximum number of bits that may be
-  /// overwritten by storing the specified type; always a multiple of 8.  For
-  /// example, returns 40 for i36 and 80 for x86_fp80.
-  uint64_t getTypeStoreSizeInBits(Type *Ty) const {
-    return 8*getTypeStoreSize(Ty);
-  }
-
-  /// getTypeAllocSize - Return the offset in bytes between successive objects
-  /// of the specified type, including alignment padding.  This is the amount
-  /// that alloca reserves for this type.  For example, returns 12 or 16 for
-  /// x86_fp80, depending on alignment.
-  uint64_t getTypeAllocSize(Type* Ty) const {
-    // Round up to the next alignment boundary.
-    return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
-  }
-
-  /// getTypeAllocSizeInBits - Return the offset in bits between successive
-  /// objects of the specified type, including alignment padding; always a
-  /// multiple of 8.  This is the amount that alloca reserves for this type.
-  /// For example, returns 96 or 128 for x86_fp80, depending on alignment.
-  uint64_t getTypeAllocSizeInBits(Type* Ty) const {
-    return 8*getTypeAllocSize(Ty);
-  }
-
-  /// getABITypeAlignment - Return the minimum ABI-required alignment for the
-  /// specified type.
-  unsigned getABITypeAlignment(Type *Ty) const;
-
-  /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
-  /// an integer type of the specified bitwidth.
-  unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
-
-
-  /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
-  /// for the specified type when it is part of a call frame.
-  unsigned getCallFrameTypeAlignment(Type *Ty) const;
-
-
-  /// getPrefTypeAlignment - Return the preferred stack/global alignment for
-  /// the specified type.  This is always at least as good as the ABI alignment.
-  unsigned getPrefTypeAlignment(Type *Ty) const;
-
-  /// getPreferredTypeAlignmentShift - Return the preferred alignment for the
-  /// specified type, returned as log2 of the value (a shift amount).
-  ///
-  unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
-
-  /// getIntPtrType - Return an unsigned integer type that is the same size or
-  /// greater to the host pointer size.
-  ///
-  IntegerType *getIntPtrType(LLVMContext &C) const;
-
-  /// getIndexedOffset - return the offset from the beginning of the type for
-  /// the specified indices.  This is used to implement getelementptr.
-  ///
-  uint64_t getIndexedOffset(Type *Ty, ArrayRef Indices) const;
-
-  /// getStructLayout - Return a StructLayout object, indicating the alignment
-  /// of the struct, its size, and the offsets of its fields.  Note that this
-  /// information is lazily cached.
-  const StructLayout *getStructLayout(StructType *Ty) const;
-
-  /// getPreferredAlignment - Return the preferred alignment of the specified
-  /// global.  This includes an explicitly requested alignment (if the global
-  /// has one).
-  unsigned getPreferredAlignment(const GlobalVariable *GV) const;
-
-  /// getPreferredAlignmentLog - Return the preferred alignment of the
-  /// specified global, returned in log form.  This includes an explicitly
-  /// requested alignment (if the global has one).
-  unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const;
-
-  /// RoundUpAlignment - Round the specified value up to the next alignment
-  /// boundary specified by Alignment.  For example, 7 rounded up to an
-  /// alignment boundary of 4 is 8.  8 rounded up to the alignment boundary of 4
-  /// is 8 because it is already aligned.
-  template 
-  static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) {
-    assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!");
-    return (Val + (Alignment-1)) & ~UIntTy(Alignment-1);
-  }
-
-  static char ID; // Pass identification, replacement for typeid
-};
-
-/// StructLayout - used to lazily calculate structure layout information for a
-/// target machine, based on the TargetData structure.
-///
-class StructLayout {
-  uint64_t StructSize;
-  unsigned StructAlignment;
-  unsigned NumElements;
-  uint64_t MemberOffsets[1];  // variable sized array!
-public:
-
-  uint64_t getSizeInBytes() const {
-    return StructSize;
-  }
-
-  uint64_t getSizeInBits() const {
-    return 8*StructSize;
-  }
-
-  unsigned getAlignment() const {
-    return StructAlignment;
-  }
-
-  /// getElementContainingOffset - Given a valid byte offset into the structure,
-  /// return the structure index that contains it.
-  ///
-  unsigned getElementContainingOffset(uint64_t Offset) const;
-
-  uint64_t getElementOffset(unsigned Idx) const {
-    assert(Idx < NumElements && "Invalid element idx!");
-    return MemberOffsets[Idx];
-  }
-
-  uint64_t getElementOffsetInBits(unsigned Idx) const {
-    return getElementOffset(Idx)*8;
-  }
-
-private:
-  friend class TargetData;   // Only TargetData can create this class
-  StructLayout(StructType *ST, const TargetData &TD);
-};
-
-} // End llvm namespace
-
-#endif
Index: include/llvm/Target/TargetLowering.h
===================================================================
--- include/llvm/Target/TargetLowering.h	(revision 165037)
+++ include/llvm/Target/TargetLowering.h	(working copy)
@@ -50,7 +50,7 @@
   class MCContext;
   class MCExpr;
   template class SmallVectorImpl;
-  class TargetData;
+  class DataLayout;
   class TargetRegisterClass;
   class TargetLibraryInfo;
   class TargetLoweringObjectFile;
@@ -137,7 +137,7 @@
   virtual ~TargetLowering();
 
   const TargetMachine &getTargetMachine() const { return TM; }
-  const TargetData *getTargetData() const { return TD; }
+  const DataLayout *getDataLayout() const { return TD; }
   const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
 
   bool isBigEndian() const { return !IsLittleEndian; }
@@ -1789,7 +1789,7 @@
 
 private:
   const TargetMachine &TM;
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLoweringObjectFile &TLOF;
 
   /// PointerTy - The type to use for pointers, usually i32 or i64.
Index: include/llvm/Target/TargetMachine.h
===================================================================
--- include/llvm/Target/TargetMachine.h	(revision 165037)
+++ include/llvm/Target/TargetMachine.h	(working copy)
@@ -31,7 +31,7 @@
 class MCContext;
 class PassManagerBase;
 class Target;
-class TargetData;
+class DataLayout;
 class TargetELFWriterInfo;
 class TargetFrameLowering;
 class TargetInstrInfo;
@@ -106,7 +106,7 @@
   virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
   virtual const TargetLowering    *getTargetLowering() const { return 0; }
   virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
-  virtual const TargetData             *getTargetData() const { return 0; }
+  virtual const DataLayout             *getDataLayout() const { return 0; }
 
   /// getMCAsmInfo - Return target specific asm information.
   ///
Index: include/llvm/Target/TargetSelectionDAGInfo.h
===================================================================
--- include/llvm/Target/TargetSelectionDAGInfo.h	(revision 165037)
+++ include/llvm/Target/TargetSelectionDAGInfo.h	(working copy)
@@ -20,7 +20,7 @@
 
 namespace llvm {
 
-class TargetData;
+class DataLayout;
 class TargetMachine;
 
 //===----------------------------------------------------------------------===//
@@ -31,10 +31,10 @@
   TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
   void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
 
-  const TargetData *TD;
+  const DataLayout *TD;
 
 protected:
-  const TargetData *getTargetData() const { return TD; }
+  const DataLayout *getDataLayout() const { return TD; }
 
 public:
   explicit TargetSelectionDAGInfo(const TargetMachine &TM);
Index: include/llvm/Transforms/IPO/InlinerPass.h
===================================================================
--- include/llvm/Transforms/IPO/InlinerPass.h	(revision 165037)
+++ include/llvm/Transforms/IPO/InlinerPass.h	(working copy)
@@ -21,7 +21,7 @@
 
 namespace llvm {
   class CallSite;
-  class TargetData;
+  class DataLayout;
   class InlineCost;
   template
   class SmallPtrSet;
Index: include/llvm/Transforms/Utils/BuildLibCalls.h
===================================================================
--- include/llvm/Transforms/Utils/BuildLibCalls.h	(revision 165037)
+++ include/llvm/Transforms/Utils/BuildLibCalls.h	(working copy)
@@ -19,7 +19,7 @@
 
 namespace llvm {
   class Value;
-  class TargetData;
+  class DataLayout;
   class TargetLibraryInfo;
   
   /// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
@@ -28,52 +28,52 @@
   /// EmitStrLen - Emit a call to the strlen function to the builder, for the
   /// specified pointer.  Ptr is required to be some pointer type, and the
   /// return value has 'intptr_t' type.
-  Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD,
+  Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
                     const TargetLibraryInfo *TLI);
 
   /// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
   /// specified pointer.  Ptr is required to be some pointer type, MaxLen must
   /// be of size_t type, and the return value has 'intptr_t' type.
   Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
-                     const TargetData *TD, const TargetLibraryInfo *TLI);
+                     const DataLayout *TD, const TargetLibraryInfo *TLI);
 
   /// EmitStrChr - Emit a call to the strchr function to the builder, for the
   /// specified pointer and character.  Ptr is required to be some pointer type,
   /// and the return value has 'i8*' type.
-  Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD,
+  Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const DataLayout *TD,
                     const TargetLibraryInfo *TLI);
 
   /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
   Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
-                     const TargetData *TD, const TargetLibraryInfo *TLI);
+                     const DataLayout *TD, const TargetLibraryInfo *TLI);
 
   /// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
   /// specified pointer arguments.
   Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
-                    const TargetData *TD, const TargetLibraryInfo *TLI,
+                    const DataLayout *TD, const TargetLibraryInfo *TLI,
                     StringRef Name = "strcpy");
 
   /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
   /// specified pointer arguments and length.
   Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
-                     const TargetData *TD, const TargetLibraryInfo *TLI,
+                     const DataLayout *TD, const TargetLibraryInfo *TLI,
                      StringRef Name = "strncpy");
 
   /// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
   /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
   /// are pointers.
   Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
-                       IRBuilder<> &B, const TargetData *TD,
+                       IRBuilder<> &B, const DataLayout *TD,
                        const TargetLibraryInfo *TLI);
 
   /// EmitMemChr - Emit a call to the memchr function.  This assumes that Ptr is
   /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
   Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
-                    const TargetData *TD, const TargetLibraryInfo *TLI);
+                    const DataLayout *TD, const TargetLibraryInfo *TLI);
 
   /// EmitMemCmp - Emit a call to the memcmp function.
   Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
-                    const TargetData *TD, const TargetLibraryInfo *TLI);
+                    const DataLayout *TD, const TargetLibraryInfo *TLI);
 
   /// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
   /// (e.g.  'floor').  This function is known to take a single of type matching
@@ -85,28 +85,28 @@
 
   /// EmitPutChar - Emit a call to the putchar function.  This assumes that Char
   /// is an integer.
-  Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD,
+  Value *EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
                      const TargetLibraryInfo *TLI);
 
   /// EmitPutS - Emit a call to the puts function.  This assumes that Str is
   /// some pointer.
-  Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD,
+  Value *EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
                   const TargetLibraryInfo *TLI);
 
   /// EmitFPutC - Emit a call to the fputc function.  This assumes that Char is
   /// an i32, and File is a pointer to FILE.
   Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
-                   const TargetData *TD, const TargetLibraryInfo *TLI);
+                   const DataLayout *TD, const TargetLibraryInfo *TLI);
 
   /// EmitFPutS - Emit a call to the puts function.  Str is required to be a
   /// pointer and File is a pointer to FILE.
-  Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD,
+  Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const DataLayout *TD,
                    const TargetLibraryInfo *TLI);
 
   /// EmitFWrite - Emit a call to the fwrite function.  This assumes that Ptr is
   /// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
   Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
-                    const TargetData *TD, const TargetLibraryInfo *TLI);
+                    const DataLayout *TD, const TargetLibraryInfo *TLI);
 
   /// SimplifyFortifiedLibCalls - Helper class for folding checked library
   /// calls (e.g. __strcpy_chk) into their unchecked counterparts.
@@ -118,7 +118,7 @@
                             bool isString) const = 0;
   public:
     virtual ~SimplifyFortifiedLibCalls();
-    bool fold(CallInst *CI, const TargetData *TD, const TargetLibraryInfo *TLI);
+    bool fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI);
   };
 }
 
Index: include/llvm/Transforms/Utils/Cloning.h
===================================================================
--- include/llvm/Transforms/Utils/Cloning.h	(revision 165037)
+++ include/llvm/Transforms/Utils/Cloning.h	(working copy)
@@ -39,7 +39,7 @@
 class CallSite;
 class Trace;
 class CallGraph;
-class TargetData;
+class DataLayout;
 class Loop;
 class LoopInfo;
 class AllocaInst;
@@ -150,7 +150,7 @@
                                SmallVectorImpl &Returns,
                                const char *NameSuffix = "", 
                                ClonedCodeInfo *CodeInfo = 0,
-                               const TargetData *TD = 0,
+                               const DataLayout *TD = 0,
                                Instruction *TheCall = 0);
 
   
@@ -158,13 +158,13 @@
 /// InlineFunction call, and records the auxiliary results produced by it. 
 class InlineFunctionInfo {
 public:
-  explicit InlineFunctionInfo(CallGraph *cg = 0, const TargetData *td = 0)
+  explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *td = 0)
     : CG(cg), TD(td) {}
   
   /// CG - If non-null, InlineFunction will update the callgraph to reflect the
   /// changes it makes.
   CallGraph *CG;
-  const TargetData *TD;
+  const DataLayout *TD;
 
   /// StaticAllocas - InlineFunction fills this in with all static allocas that
   /// get copied into the caller.
Index: include/llvm/Transforms/Utils/Local.h
===================================================================
--- include/llvm/Transforms/Utils/Local.h	(revision 165037)
+++ include/llvm/Transforms/Utils/Local.h	(working copy)
@@ -18,7 +18,7 @@
 #include "llvm/IRBuilder.h"
 #include "llvm/Operator.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 namespace llvm {
 
@@ -35,7 +35,7 @@
 class PHINode;
 class AllocaInst;
 class ConstantExpr;
-class TargetData;
+class DataLayout;
 class TargetLibraryInfo;
 class DIBuilder;
 
@@ -84,7 +84,7 @@
 ///
 /// This returns true if it changed the code, note that it can delete
 /// instructions in other blocks as well in this block.
-bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0,
+bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0,
                                  const TargetLibraryInfo *TLI = 0);
     
 //===----------------------------------------------------------------------===//
@@ -103,7 +103,7 @@
 /// .. and delete the predecessor corresponding to the '1', this will attempt to
 /// recursively fold the 'and' to 0.
 void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
-                                  TargetData *TD = 0);
+                                  DataLayout *TD = 0);
     
   
 /// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
@@ -134,7 +134,7 @@
 /// of the CFG.  It returns true if a modification was made, possibly deleting
 /// the basic block that was pointed to.
 ///
-bool SimplifyCFG(BasicBlock *BB, const TargetData *TD = 0);
+bool SimplifyCFG(BasicBlock *BB, const DataLayout *TD = 0);
 
 /// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
 /// and if a predecessor branches to us and one of our successors, fold the
@@ -162,10 +162,10 @@
 /// and it is more than the alignment of the ultimate object, see if we can
 /// increase the alignment of the ultimate object, making this check succeed.
 unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
-                                    const TargetData *TD = 0);
+                                    const DataLayout *TD = 0);
 
 /// getKnownAlignment - Try to infer an alignment for the specified pointer.
-static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) {
+static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
   return getOrEnforceKnownAlignment(V, 0, TD);
 }
 
@@ -175,7 +175,7 @@
 /// When NoAssumptions is true, no assumptions about index computation not
 /// overflowing is made.
 template
-Value *EmitGEPOffset(IRBuilderTy *Builder, const TargetData &TD, User *GEP,
+Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
                      bool NoAssumptions = false) {
   gep_type_iterator GTI = gep_type_begin(GEP);
   Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
Index: include/llvm/Type.h
===================================================================
--- include/llvm/Type.h	(revision 165037)
+++ include/llvm/Type.h	(working copy)
@@ -252,7 +252,7 @@
 
   /// isSized - Return true if it makes sense to take the size of this type.  To
   /// get the actual size for a particular target, it is reasonable to use the
-  /// TargetData subsystem to do this.
+  /// DataLayout subsystem to do this.
   ///
   bool isSized() const {
     // If it's a primitive, it is always sized.
@@ -276,7 +276,7 @@
   ///
   /// Note that this may not reflect the size of memory allocated for an
   /// instance of the type or the number of bytes that are written when an
-  /// instance of the type is stored to memory. The TargetData class provides
+  /// instance of the type is stored to memory. The DataLayout class provides
   /// additional query functions to provide this information.
   ///
   unsigned getPrimitiveSizeInBits() const;
Index: lib/Analysis/AliasAnalysis.cpp
===================================================================
--- lib/Analysis/AliasAnalysis.cpp	(revision 165037)
+++ lib/Analysis/AliasAnalysis.cpp	(working copy)
@@ -35,7 +35,7 @@
 #include "llvm/Instructions.h"
 #include "llvm/LLVMContext.h"
 #include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 using namespace llvm;
 
@@ -452,7 +452,7 @@
 /// AliasAnalysis interface before any other methods are called.
 ///
 void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
-  TD = P->getAnalysisIfAvailable();
+  TD = P->getAnalysisIfAvailable();
   TLI = P->getAnalysisIfAvailable();
   AA = &P->getAnalysis();
 }
@@ -463,7 +463,7 @@
   AU.addRequired();         // All AA's chain
 }
 
-/// getTypeStoreSize - Return the TargetData store size for the given type,
+/// getTypeStoreSize - Return the DataLayout store size for the given type,
 /// if known, or a conservative value otherwise.
 ///
 uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
Index: lib/Analysis/AliasSetTracker.cpp
===================================================================
--- lib/Analysis/AliasSetTracker.cpp	(revision 165037)
+++ lib/Analysis/AliasSetTracker.cpp	(working copy)
@@ -18,12 +18,12 @@
 #include "llvm/LLVMContext.h"
 #include "llvm/Pass.h"
 #include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Assembly/Writer.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/InstIterator.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 /// mergeSetIn - Merge the specified alias set into this alias set.
Index: lib/Analysis/BasicAliasAnalysis.cpp
===================================================================
--- lib/Analysis/BasicAliasAnalysis.cpp	(revision 165037)
+++ lib/Analysis/BasicAliasAnalysis.cpp	(working copy)
@@ -29,12 +29,12 @@
 #include "llvm/Analysis/MemoryBuiltins.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Support/DataLayout.h"
 #include 
 using namespace llvm;
 
@@ -84,7 +84,7 @@
 
 /// getObjectSize - Return the size of the object specified by V, or
 /// UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const TargetData &TD,
+static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
                               const TargetLibraryInfo &TLI,
                               bool RoundToAlign = false) {
   uint64_t Size;
@@ -96,7 +96,7 @@
 /// isObjectSmallerThan - Return true if we can prove that the object specified
 /// by V is smaller than Size.
 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
-                                const TargetData &TD,
+                                const DataLayout &TD,
                                 const TargetLibraryInfo &TLI) {
   // This function needs to use the aligned object size because we allow
   // reads a bit past the end given sufficient alignment.
@@ -108,7 +108,7 @@
 /// isObjectSize - Return true if we can prove that the object specified
 /// by V has size Size.
 static bool isObjectSize(const Value *V, uint64_t Size,
-                         const TargetData &TD, const TargetLibraryInfo &TLI) {
+                         const DataLayout &TD, const TargetLibraryInfo &TLI) {
   uint64_t ObjectSize = getObjectSize(V, TD, TLI);
   return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
 }
@@ -151,7 +151,7 @@
 /// represented in the result.
 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
                                   ExtensionKind &Extension,
-                                  const TargetData &TD, unsigned Depth) {
+                                  const DataLayout &TD, unsigned Depth) {
   assert(V->getType()->isIntegerTy() && "Not an integer value");
 
   // Limit our recursion depth.
@@ -226,14 +226,14 @@
 /// specified amount, but which may have other unrepresented high bits. As such,
 /// the gep cannot necessarily be reconstructed from its decomposed form.
 ///
-/// When TargetData is around, this function is capable of analyzing everything
+/// When DataLayout is around, this function is capable of analyzing everything
 /// that GetUnderlyingObject can look through.  When not, it just looks
 /// through pointer casts.
 ///
 static const Value *
 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
                        SmallVectorImpl &VarIndices,
-                       const TargetData *TD) {
+                       const DataLayout *TD) {
   // Limit recursion depth to limit compile time in crazy cases.
   unsigned MaxLookup = 6;
   
@@ -277,7 +277,7 @@
         ->getElementType()->isSized())
       return V;
     
-    // If we are lacking TargetData information, we can't compute the offets of
+    // If we are lacking DataLayout information, we can't compute the offets of
     // elements computed by GEPs.  However, we can handle bitcast equivalent
     // GEPs.
     if (TD == 0) {
@@ -868,7 +868,7 @@
         const Value *GEP1BasePtr =
           DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
         // DecomposeGEPExpression and GetUnderlyingObject should return the
-        // same result except when DecomposeGEPExpression has no TargetData.
+        // same result except when DecomposeGEPExpression has no DataLayout.
         if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
           assert(TD == 0 &&
              "DecomposeGEPExpression and GetUnderlyingObject disagree!");
@@ -902,7 +902,7 @@
       DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
     
     // DecomposeGEPExpression and GetUnderlyingObject should return the
-    // same result except when DecomposeGEPExpression has no TargetData.
+    // same result except when DecomposeGEPExpression has no DataLayout.
     if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
       assert(TD == 0 &&
              "DecomposeGEPExpression and GetUnderlyingObject disagree!");
@@ -937,7 +937,7 @@
       DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
     
     // DecomposeGEPExpression and GetUnderlyingObject should return the
-    // same result except when DecomposeGEPExpression has no TargetData.
+    // same result except when DecomposeGEPExpression has no DataLayout.
     if (GEP1BasePtr != UnderlyingV1) {
       assert(TD == 0 &&
              "DecomposeGEPExpression and GetUnderlyingObject disagree!");
Index: lib/Analysis/CodeMetrics.cpp
===================================================================
--- lib/Analysis/CodeMetrics.cpp	(revision 165037)
+++ lib/Analysis/CodeMetrics.cpp	(working copy)
@@ -13,9 +13,9 @@
 
 #include "llvm/Analysis/CodeMetrics.h"
 #include "llvm/Function.h"
+#include "llvm/IntrinsicInst.h"
 #include "llvm/Support/CallSite.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 using namespace llvm;
 
@@ -54,7 +54,7 @@
   return false;
 }
 
-bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
+bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
   if (isa(I))
     return true;
 
@@ -119,7 +119,7 @@
 /// analyzeBasicBlock - Fill in the current structure with information gleaned
 /// from the specified block.
 void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
-                                    const TargetData *TD) {
+                                    const DataLayout *TD) {
   ++NumBlocks;
   unsigned NumInstsBeforeThisBB = NumInsts;
   for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
@@ -189,7 +189,7 @@
   NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
 }
 
-void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
+void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) {
   // If this function contains a call that "returns twice" (e.g., setjmp or
   // _setjmp) and it isn't marked with "returns twice" itself, never inline it.
   // This is a hack because we depend on the user marking their local variables
Index: lib/Analysis/ConstantFolding.cpp
===================================================================
--- lib/Analysis/ConstantFolding.cpp	(revision 165037)
+++ lib/Analysis/ConstantFolding.cpp	(working copy)
@@ -11,7 +11,7 @@
 //
 // Also, to supplement the basic VMCore ConstantExpr simplifications,
 // this file defines some additional folding routines that can make use of
-// TargetData information. These functions cannot go in VMCore due to library
+// DataLayout information. These functions cannot go in VMCore due to library
 // dependency issues.
 //
 //===----------------------------------------------------------------------===//
@@ -25,7 +25,6 @@
 #include "llvm/Intrinsics.h"
 #include "llvm/Operator.h"
 #include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringMap.h"
@@ -33,6 +32,7 @@
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/FEnv.h"
+#include "llvm/Support/DataLayout.h"
 #include 
 #include 
 using namespace llvm;
@@ -42,10 +42,10 @@
 //===----------------------------------------------------------------------===//
 
 /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with 
-/// TargetData.  This always returns a non-null constant, but it may be a
+/// DataLayout.  This always returns a non-null constant, but it may be a
 /// ConstantExpr if unfoldable.
 static Constant *FoldBitCast(Constant *C, Type *DestTy,
-                             const TargetData &TD) {
+                             const DataLayout &TD) {
   // Catch the obvious splat cases.
   if (C->isNullValue() && !DestTy->isX86_MMXTy())
     return Constant::getNullValue(DestTy);
@@ -218,7 +218,7 @@
 /// from a global, return the global and the constant.  Because of
 /// constantexprs, this function is recursive.
 static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
-                                       int64_t &Offset, const TargetData &TD) {
+                                       int64_t &Offset, const DataLayout &TD) {
   // Trivial case, constant is the global.
   if ((GV = dyn_cast(C))) {
     Offset = 0;
@@ -274,7 +274,7 @@
 /// the CurPtr buffer.  TD is the target data.
 static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
                                unsigned char *CurPtr, unsigned BytesLeft,
-                               const TargetData &TD) {
+                               const DataLayout &TD) {
   assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
          "Out of range access");
   
@@ -388,7 +388,7 @@
 }
 
 static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
-                                                 const TargetData &TD) {
+                                                 const DataLayout &TD) {
   Type *LoadTy = cast(C->getType())->getElementType();
   IntegerType *IntType = dyn_cast(LoadTy);
   
@@ -455,7 +455,7 @@
 /// produce if it is constant and determinable.  If this is not determinable,
 /// return null.
 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
-                                             const TargetData *TD) {
+                                             const DataLayout *TD) {
   // First, try the easy cases:
   if (GlobalVariable *GV = dyn_cast(C))
     if (GV->isConstant() && GV->hasDefinitiveInitializer())
@@ -529,7 +529,7 @@
   return 0;
 }
 
-static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){
+static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
   if (LI->isVolatile()) return 0;
   
   if (Constant *C = dyn_cast(LI->getOperand(0)))
@@ -543,7 +543,7 @@
 /// these together.  If target data info is available, it is provided as TD, 
 /// otherwise TD is null.
 static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
-                                           Constant *Op1, const TargetData *TD){
+                                           Constant *Op1, const DataLayout *TD){
   // SROA
   
   // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
@@ -572,7 +572,7 @@
 /// explicitly cast them so that they aren't implicitly casted by the
 /// getelementptr.
 static Constant *CastGEPIndices(ArrayRef Ops,
-                                Type *ResultTy, const TargetData *TD,
+                                Type *ResultTy, const DataLayout *TD,
                                 const TargetLibraryInfo *TLI) {
   if (!TD) return 0;
   Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
@@ -622,7 +622,7 @@
 /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
 /// constant expression, do so.
 static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops,
-                                         Type *ResultTy, const TargetData *TD,
+                                         Type *ResultTy, const DataLayout *TD,
                                          const TargetLibraryInfo *TLI) {
   Constant *Ptr = Ops[0];
   if (!TD || !cast(Ptr->getType())->getElementType()->isSized() ||
@@ -786,7 +786,7 @@
 /// this function can only fail when attempting to fold instructions like loads
 /// and stores, which have no constant expression form.
 Constant *llvm::ConstantFoldInstruction(Instruction *I,
-                                        const TargetData *TD,
+                                        const DataLayout *TD,
                                         const TargetLibraryInfo *TLI) {
   // Handle PHI nodes quickly here...
   if (PHINode *PN = dyn_cast(I)) {
@@ -856,10 +856,10 @@
 }
 
 /// ConstantFoldConstantExpression - Attempt to fold the constant expression
-/// using the specified TargetData.  If successful, the constant result is
+/// using the specified DataLayout.  If successful, the constant result is
 /// result is returned, if not, null is returned.
 Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
-                                               const TargetData *TD,
+                                               const DataLayout *TD,
                                                const TargetLibraryInfo *TLI) {
   SmallVector Ops;
   for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
@@ -889,7 +889,7 @@
 ///
 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, 
                                          ArrayRef Ops,
-                                         const TargetData *TD,
+                                         const DataLayout *TD,
                                          const TargetLibraryInfo *TLI) {                                         
   // Handle easy binops first.
   if (Instruction::isBinaryOp(Opcode)) {
@@ -976,7 +976,7 @@
 ///
 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
                                                 Constant *Ops0, Constant *Ops1, 
-                                                const TargetData *TD,
+                                                const DataLayout *TD,
                                                 const TargetLibraryInfo *TLI) {
   // fold: icmp (inttoptr x), null         -> icmp x, 0
   // fold: icmp (ptrtoint x), 0            -> icmp x, null
Index: lib/Analysis/InlineCost.cpp
===================================================================
--- lib/Analysis/InlineCost.cpp	(revision 165037)
+++ lib/Analysis/InlineCost.cpp	(working copy)
@@ -20,11 +20,11 @@
 #include "llvm/Support/InstVisitor.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/CallingConv.h"
 #include "llvm/IntrinsicInst.h"
 #include "llvm/Operator.h"
 #include "llvm/GlobalAlias.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/SmallVector.h"
@@ -41,8 +41,8 @@
   typedef InstVisitor Base;
   friend class InstVisitor;
 
-  // TargetData if available, or null.
-  const TargetData *const TD;
+  // DataLayout if available, or null.
+  const DataLayout *const TD;
 
   // The called function.
   Function &F;
@@ -126,7 +126,7 @@
   bool visitCallSite(CallSite CS);
 
 public:
-  CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold)
+  CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold)
     : TD(TD), F(Callee), Threshold(Threshold), Cost(0),
       AlwaysInline(F.getFnAttributes().hasAlwaysInlineAttr()),
       IsCallerRecursive(false), IsRecursiveCall(false),
@@ -832,7 +832,7 @@
         // one load and one store per word copied.
         // FIXME: The maxStoresPerMemcpy setting from the target should be used
         // here instead of a magic number of 8, but it's not available via
-        // TargetData.
+        // DataLayout.
         NumStores = std::min(NumStores, 8U);
 
         Cost -= 2 * NumStores * InlineConstants::InstrCost;
Index: lib/Analysis/InstructionSimplify.cpp
===================================================================
--- lib/Analysis/InstructionSimplify.cpp	(revision 165037)
+++ lib/Analysis/InstructionSimplify.cpp	(working copy)
@@ -31,7 +31,7 @@
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/PatternMatch.h"
 #include "llvm/Support/ValueHandle.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 using namespace llvm::PatternMatch;
 
@@ -42,11 +42,11 @@
 STATISTIC(NumReassoc, "Number of reassociations");
 
 struct Query {
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLibraryInfo *TLI;
   const DominatorTree *DT;
 
-  Query(const TargetData *td, const TargetLibraryInfo *tli,
+  Query(const DataLayout *td, const TargetLibraryInfo *tli,
         const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
 };
 
@@ -651,7 +651,7 @@
 }
 
 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
-                             const TargetData *TD, const TargetLibraryInfo *TLI,
+                             const DataLayout *TD, const TargetLibraryInfo *TLI,
                              const DominatorTree *DT) {
   return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
                            RecursionLimit);
@@ -664,7 +664,7 @@
 /// if the GEP has all-constant indices. Returns false if any non-constant
 /// index is encountered leaving the 'Offset' in an undefined state. The
 /// 'Offset' APInt must be the bitwidth of the target's pointer size.
-static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP,
+static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
                                 APInt &Offset) {
   unsigned IntPtrWidth = TD.getPointerSizeInBits();
   assert(IntPtrWidth == Offset.getBitWidth());
@@ -696,7 +696,7 @@
 /// accumulates the total constant offset applied in the returned constant. It
 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
 /// no constant offsets applied.
-static Constant *stripAndComputeConstantOffsets(const TargetData &TD,
+static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
                                                 Value *&V) {
   if (!V->getType()->isPointerTy())
     return 0;
@@ -731,7 +731,7 @@
 
 /// \brief Compute the constant difference between two pointer values.
 /// If the difference is not a constant, returns zero.
-static Constant *computePointerDifference(const TargetData &TD,
+static Constant *computePointerDifference(const DataLayout &TD,
                                           Value *LHS, Value *RHS) {
   Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
   if (!LHSOffset)
@@ -880,7 +880,7 @@
 }
 
 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
-                             const TargetData *TD, const TargetLibraryInfo *TLI,
+                             const DataLayout *TD, const TargetLibraryInfo *TLI,
                              const DominatorTree *DT) {
   return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
                            RecursionLimit);
@@ -951,7 +951,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,
                              const TargetLibraryInfo *TLI,
                              const DominatorTree *DT) {
   return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1039,7 +1039,7 @@
   return 0;
 }
 
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1055,7 +1055,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1074,7 +1074,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1144,7 +1144,7 @@
   return 0;
 }
 
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1160,7 +1160,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1179,7 +1179,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1248,7 +1248,7 @@
 }
 
 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
-                             const TargetData *TD, const TargetLibraryInfo *TLI,
+                             const DataLayout *TD, const TargetLibraryInfo *TLI,
                              const DominatorTree *DT) {
   return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
                            RecursionLimit);
@@ -1275,7 +1275,7 @@
 }
 
 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
-                              const TargetData *TD,
+                              const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@@ -1307,7 +1307,7 @@
 }
 
 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
-                              const TargetData *TD,
+                              const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@@ -1407,7 +1407,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,
                              const TargetLibraryInfo *TLI,
                              const DominatorTree *DT) {
   return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1501,7 +1501,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,
                             const TargetLibraryInfo *TLI,
                             const DominatorTree *DT) {
   return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1561,7 +1561,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,
                              const TargetLibraryInfo *TLI,
                              const DominatorTree *DT) {
   return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1591,7 +1591,7 @@
   return 0;
 }
 
-static Constant *computePointerICmp(const TargetData &TD,
+static Constant *computePointerICmp(const DataLayout &TD,
                                     CmpInst::Predicate Pred,
                                     Value *LHS, Value *RHS) {
   // We can only fold certain predicates on pointer comparisons.
@@ -2399,7 +2399,7 @@
 }
 
 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
-                              const TargetData *TD,
+                              const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@@ -2496,7 +2496,7 @@
 }
 
 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
-                              const TargetData *TD,
+                              const DataLayout *TD,
                               const TargetLibraryInfo *TLI,
                               const DominatorTree *DT) {
   return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@@ -2531,7 +2531,7 @@
 }
 
 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
-                                const TargetData *TD,
+                                const DataLayout *TD,
                                 const TargetLibraryInfo *TLI,
                                 const DominatorTree *DT) {
   return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),
@@ -2579,7 +2579,7 @@
   return ConstantExpr::getGetElementPtr(cast(Ops[0]), Ops.slice(1));
 }
 
-Value *llvm::SimplifyGEPInst(ArrayRef Ops, const TargetData *TD,
+Value *llvm::SimplifyGEPInst(ArrayRef Ops, const DataLayout *TD,
                              const TargetLibraryInfo *TLI,
                              const DominatorTree *DT) {
   return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
@@ -2616,7 +2616,7 @@
 
 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
                                      ArrayRef Idxs,
-                                     const TargetData *TD,
+                                     const DataLayout *TD,
                                      const TargetLibraryInfo *TLI,
                                      const DominatorTree *DT) {
   return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
@@ -2664,7 +2664,7 @@
   return 0;
 }
 
-Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD,
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,
                                const TargetLibraryInfo *TLI,
                                const DominatorTree *DT) {
   return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
@@ -2730,7 +2730,7 @@
 }
 
 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
-                           const TargetData *TD, const TargetLibraryInfo *TLI,
+                           const DataLayout *TD, const TargetLibraryInfo *TLI,
                            const DominatorTree *DT) {
   return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);
 }
@@ -2745,7 +2745,7 @@
 }
 
 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
-                             const TargetData *TD, const TargetLibraryInfo *TLI,
+                             const DataLayout *TD, const TargetLibraryInfo *TLI,
                              const DominatorTree *DT) {
   return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
                            RecursionLimit);
@@ -2761,7 +2761,7 @@
 
 /// SimplifyInstruction - See if we can compute a simplified version of this
 /// instruction.  If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
+Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
                                  const TargetLibraryInfo *TLI,
                                  const DominatorTree *DT) {
   Value *Result;
@@ -2881,7 +2881,7 @@
 /// This routine returns 'true' only when *it* simplifies something. The passed
 /// in simplified value does not count toward this.
 static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
-                                              const TargetData *TD,
+                                              const DataLayout *TD,
                                               const TargetLibraryInfo *TLI,
                                               const DominatorTree *DT) {
   bool Simplified = false;
@@ -2936,14 +2936,14 @@
 }
 
 bool llvm::recursivelySimplifyInstruction(Instruction *I,
-                                          const TargetData *TD,
+                                          const DataLayout *TD,
                                           const TargetLibraryInfo *TLI,
                                           const DominatorTree *DT) {
   return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
 }
 
 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
-                                         const TargetData *TD,
+                                         const DataLayout *TD,
                                          const TargetLibraryInfo *TLI,
                                          const DominatorTree *DT) {
   assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
Index: lib/Analysis/IVUsers.cpp
===================================================================
--- lib/Analysis/IVUsers.cpp	(revision 165037)
+++ lib/Analysis/IVUsers.cpp	(working copy)
@@ -22,11 +22,11 @@
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
 #include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Assembly/Writer.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include 
 using namespace llvm;
 
@@ -235,7 +235,7 @@
   LI = &getAnalysis();
   DT = &getAnalysis();
   SE = &getAnalysis();
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
 
   // Find all uses of induction variables in this loop, and categorize
   // them by stride.  Start by finding all of the PHI nodes in the header for
Index: lib/Analysis/LazyValueInfo.cpp
===================================================================
--- lib/Analysis/LazyValueInfo.cpp	(revision 165037)
+++ lib/Analysis/LazyValueInfo.cpp	(working copy)
@@ -19,7 +19,6 @@
 #include "llvm/Instructions.h"
 #include "llvm/IntrinsicInst.h"
 #include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Support/CFG.h"
 #include "llvm/Support/ConstantRange.h"
@@ -27,6 +26,7 @@
 #include "llvm/Support/PatternMatch.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/ADT/STLExtras.h"
 #include 
@@ -212,7 +212,7 @@
 
         // Unless we can prove that the two Constants are different, we must
         // move to overdefined.
-        // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
+        // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
         if (ConstantInt *Res = dyn_cast(
                 ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
                                                 getConstant(),
@@ -238,7 +238,7 @@
 
         // Unless we can prove that the two Constants are different, we must
         // move to overdefined.
-        // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
+        // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
         if (ConstantInt *Res = dyn_cast(
                 ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
                                                 getNotConstant(),
@@ -1009,7 +1009,7 @@
   if (PImpl)
     getCache(PImpl).clear();
 
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
 
   // Fully lazy.
Index: lib/Analysis/Lint.cpp
===================================================================
--- lib/Analysis/Lint.cpp	(revision 165037)
+++ lib/Analysis/Lint.cpp	(working copy)
@@ -43,7 +43,6 @@
 #include "llvm/Analysis/Loads.h"
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Pass.h"
 #include "llvm/PassManager.h"
@@ -53,6 +52,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/InstVisitor.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/STLExtras.h"
 using namespace llvm;
 
@@ -103,7 +103,7 @@
     Module *Mod;
     AliasAnalysis *AA;
     DominatorTree *DT;
-    TargetData *TD;
+    DataLayout *TD;
     TargetLibraryInfo *TLI;
 
     std::string Messages;
@@ -177,7 +177,7 @@
   Mod = F.getParent();
   AA = &getAnalysis();
   DT = &getAnalysis();
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
   visit(F);
   dbgs() << MessagesStr.str();
@@ -506,7 +506,7 @@
             "Undefined result: Shift count out of range", &I);
 }
 
-static bool isZero(Value *V, TargetData *TD) {
+static bool isZero(Value *V, DataLayout *TD) {
   // Assume undef could be zero.
   if (isa(V)) return true;
 
Index: lib/Analysis/Loads.cpp
===================================================================
--- lib/Analysis/Loads.cpp	(revision 165037)
+++ lib/Analysis/Loads.cpp	(working copy)
@@ -13,12 +13,12 @@
 
 #include "llvm/Analysis/Loads.h"
 #include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/GlobalAlias.h"
 #include "llvm/GlobalVariable.h"
 #include "llvm/IntrinsicInst.h"
 #include "llvm/LLVMContext.h"
 #include "llvm/Operator.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 /// AreEquivalentAddressValues - Test if A and B will obviously have the same
@@ -52,8 +52,8 @@
 /// bitcasts to get back to the underlying object being addressed, keeping
 /// track of the offset in bytes from the GEPs relative to the result.
 /// This is closely related to GetUnderlyingObject but is located
-/// here to avoid making VMCore depend on TargetData.
-static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
+/// here to avoid making VMCore depend on DataLayout.
+static Value *getUnderlyingObjectWithOffset(Value *V, const DataLayout *TD,
                                             uint64_t &ByteOffset,
                                             unsigned MaxLookup = 6) {
   if (!V->getType()->isPointerTy())
@@ -85,7 +85,7 @@
 /// specified pointer, we do a quick local scan of the basic block containing
 /// ScanFrom, to determine if the address is already accessed.
 bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
-                                       unsigned Align, const TargetData *TD) {
+                                       unsigned Align, const DataLayout *TD) {
   uint64_t ByteOffset = 0;
   Value *Base = V;
   if (TD)
Index: lib/Analysis/LoopDependenceAnalysis.cpp
===================================================================
--- lib/Analysis/LoopDependenceAnalysis.cpp	(revision 165037)
+++ lib/Analysis/LoopDependenceAnalysis.cpp	(working copy)
@@ -35,7 +35,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 STATISTIC(NumAnswered,    "Number of dependence queries answered");
Index: lib/Analysis/MemoryBuiltins.cpp
===================================================================
--- lib/Analysis/MemoryBuiltins.cpp	(revision 165037)
+++ lib/Analysis/MemoryBuiltins.cpp	(working copy)
@@ -25,7 +25,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Utils/Local.h"
 using namespace llvm;
@@ -190,7 +190,7 @@
   return isMallocLikeFn(I, TLI) ? dyn_cast(I) : 0;
 }
 
-static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
+static Value *computeArraySize(const CallInst *CI, const DataLayout *TD,
                                const TargetLibraryInfo *TLI,
                                bool LookThroughSExt = false) {
   if (!CI)
@@ -220,7 +220,7 @@
 /// is a call to malloc whose array size can be determined and the array size
 /// is not constant 1.  Otherwise, return NULL.
 const CallInst *llvm::isArrayMalloc(const Value *I,
-                                    const TargetData *TD,
+                                    const DataLayout *TD,
                                     const TargetLibraryInfo *TLI) {
   const CallInst *CI = extractMallocCall(I, TLI);
   Value *ArraySize = computeArraySize(CI, TD, TLI);
@@ -281,7 +281,7 @@
 /// then return that multiple.  For non-array mallocs, the multiple is
 /// constant 1.  Otherwise, return NULL for mallocs whose array size cannot be
 /// determined.
-Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
+Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *TD,
                                 const TargetLibraryInfo *TLI,
                                 bool LookThroughSExt) {
   assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
@@ -341,7 +341,7 @@
 /// object size in Size if successful, and false otherwise.
 /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
 /// byval arguments, and global variables.
-bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
+bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
                          const TargetLibraryInfo *TLI, bool RoundToAlign) {
   if (!TD)
     return false;
@@ -373,7 +373,7 @@
   return Size;
 }
 
-ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
+ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
                                                  const TargetLibraryInfo *TLI,
                                                  LLVMContext &Context,
                                                  bool RoundToAlign)
@@ -559,7 +559,7 @@
 }
 
 
-ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD,
+ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
                                                    const TargetLibraryInfo *TLI,
                                                      LLVMContext &Context)
 : TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
Index: lib/Analysis/MemoryDependenceAnalysis.cpp
===================================================================
--- lib/Analysis/MemoryDependenceAnalysis.cpp	(revision 165037)
+++ lib/Analysis/MemoryDependenceAnalysis.cpp	(working copy)
@@ -30,7 +30,7 @@
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/PredIteratorCache.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
@@ -89,7 +89,7 @@
 
 bool MemoryDependenceAnalysis::runOnFunction(Function &) {
   AA = &getAnalysis();
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   DT = getAnalysisIfAvailable();
   if (PredCache == 0)
     PredCache.reset(new PredIteratorCache());
@@ -256,7 +256,7 @@
                                        const Value *&MemLocBase,
                                        int64_t &MemLocOffs,
                                        const LoadInst *LI,
-                                       const TargetData *TD) {
+                                       const DataLayout *TD) {
   // If we have no target data, we can't do this.
   if (TD == 0) return false;
 
@@ -280,7 +280,7 @@
 unsigned MemoryDependenceAnalysis::
 getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
                                 unsigned MemLocSize, const LoadInst *LI,
-                                const TargetData &TD) {
+                                const DataLayout &TD) {
   // We can only extend simple integer loads.
   if (!isa(LI->getType()) || !LI->isSimple()) return 0;
   
Index: lib/Analysis/NoAliasAnalysis.cpp
===================================================================
--- lib/Analysis/NoAliasAnalysis.cpp	(revision 165037)
+++ lib/Analysis/NoAliasAnalysis.cpp	(working copy)
@@ -15,7 +15,7 @@
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/Passes.h"
 #include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 namespace {
@@ -36,7 +36,7 @@
     virtual void initializePass() {
       // Note: NoAA does not call InitializeAliasAnalysis because it's
       // special and does not support chaining.
-      TD = getAnalysisIfAvailable();
+      TD = getAnalysisIfAvailable();
     }
 
     virtual AliasResult alias(const Location &LocA, const Location &LocB) {
Index: lib/Analysis/ScalarEvolution.cpp
===================================================================
--- lib/Analysis/ScalarEvolution.cpp	(revision 165037)
+++ lib/Analysis/ScalarEvolution.cpp	(working copy)
@@ -73,7 +73,6 @@
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ConstantRange.h"
@@ -83,6 +82,7 @@
 #include "llvm/Support/InstIterator.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallPtrSet.h"
@@ -2582,7 +2582,7 @@
 }
 
 const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
-  // If we have TargetData, we can bypass creating a target-independent
+  // If we have DataLayout, we can bypass creating a target-independent
   // constant expression and then folding it back into a ConstantInt.
   // This is just a compile-time optimization.
   if (TD)
@@ -2608,7 +2608,7 @@
 
 const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
                                              unsigned FieldNo) {
-  // If we have TargetData, we can bypass creating a target-independent
+  // If we have DataLayout, we can bypass creating a target-independent
   // constant expression and then folding it back into a ConstantInt.
   // This is just a compile-time optimization.
   if (TD)
@@ -2673,7 +2673,7 @@
 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
   assert(isSCEVable(Ty) && "Type is not SCEVable!");
 
-  // If we have a TargetData, use it!
+  // If we have a DataLayout, use it!
   if (TD)
     return TD->getTypeSizeInBits(Ty);
 
@@ -2681,7 +2681,7 @@
   if (Ty->isIntegerTy())
     return Ty->getPrimitiveSizeInBits();
 
-  // The only other support type is pointer. Without TargetData, conservatively
+  // The only other support type is pointer. Without DataLayout, conservatively
   // assume pointers are 64-bit.
   assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
   return 64;
@@ -2701,7 +2701,7 @@
   assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
   if (TD) return TD->getIntPtrType(getContext());
 
-  // Without TargetData, conservatively assume pointers are 64-bit.
+  // Without DataLayout, conservatively assume pointers are 64-bit.
   return Type::getInt64Ty(getContext());
 }
 
@@ -4751,7 +4751,7 @@
 /// reason, return null.
 static Constant *EvaluateExpression(Value *V, const Loop *L,
                                     DenseMap &Vals,
-                                    const TargetData *TD,
+                                    const DataLayout *TD,
                                     const TargetLibraryInfo *TLI) {
   // Convenient constant check, but redundant for recursive calls.
   if (Constant *C = dyn_cast(V)) return C;
@@ -6590,7 +6590,7 @@
 bool ScalarEvolution::runOnFunction(Function &F) {
   this->F = &F;
   LI = &getAnalysis();
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
   DT = &getAnalysis();
   return false;
Index: lib/Analysis/ScalarEvolutionExpander.cpp
===================================================================
--- lib/Analysis/ScalarEvolutionExpander.cpp	(revision 165037)
+++ lib/Analysis/ScalarEvolutionExpander.cpp	(working copy)
@@ -18,8 +18,8 @@
 #include "llvm/IntrinsicInst.h"
 #include "llvm/LLVMContext.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLowering.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/STLExtras.h"
 
 using namespace llvm;
@@ -212,7 +212,7 @@
                               const SCEV *&Remainder,
                               const SCEV *Factor,
                               ScalarEvolution &SE,
-                              const TargetData *TD) {
+                              const DataLayout *TD) {
   // Everything is divisible by one.
   if (Factor->isOne())
     return true;
@@ -253,7 +253,7 @@
   // of the given factor.
   if (const SCEVMulExpr *M = dyn_cast(S)) {
     if (TD) {
-      // With TargetData, the size is known. Check if there is a constant
+      // With DataLayout, the size is known. Check if there is a constant
       // operand which is a multiple of the given factor. If so, we can
       // factor it.
       const SCEVConstant *FC = cast(Factor);
@@ -267,7 +267,7 @@
           return true;
         }
     } else {
-      // Without TargetData, check if Factor can be factored out of any of the
+      // Without DataLayout, check if Factor can be factored out of any of the
       // Mul's operands. If so, we can just remove it.
       for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
         const SCEV *SOp = M->getOperand(i);
@@ -458,7 +458,7 @@
       // An empty struct has no fields.
       if (STy->getNumElements() == 0) break;
       if (SE.TD) {
-        // With TargetData, field offsets are known. See if a constant offset
+        // With DataLayout, field offsets are known. See if a constant offset
         // falls within any of the struct fields.
         if (Ops.empty()) break;
         if (const SCEVConstant *C = dyn_cast(Ops[0]))
@@ -477,7 +477,7 @@
             }
           }
       } else {
-        // Without TargetData, just check for an offsetof expression of the
+        // Without DataLayout, just check for an offsetof expression of the
         // appropriate struct type.
         for (unsigned i = 0, e = Ops.size(); i != e; ++i)
           if (const SCEVUnknown *U = dyn_cast(Ops[i])) {
Index: lib/Analysis/ValueTracking.cpp
===================================================================
--- lib/Analysis/ValueTracking.cpp	(revision 165037)
+++ lib/Analysis/ValueTracking.cpp	(working copy)
@@ -22,11 +22,11 @@
 #include "llvm/LLVMContext.h"
 #include "llvm/Metadata.h"
 #include "llvm/Operator.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Support/ConstantRange.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/PatternMatch.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include 
 using namespace llvm;
@@ -36,7 +36,7 @@
 
 /// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
 /// unknown returns 0).  For vector types, returns the element type's bitwidth.
-static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
+static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
   if (unsigned BitWidth = Ty->getScalarSizeInBits())
     return BitWidth;
   assert(isa(Ty) && "Expected a pointer type!");
@@ -46,7 +46,7 @@
 static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
                                     APInt &KnownZero, APInt &KnownOne,
                                     APInt &KnownZero2, APInt &KnownOne2,
-                                    const TargetData *TD, unsigned Depth) {
+                                    const DataLayout *TD, unsigned Depth) {
   if (!Add) {
     if (ConstantInt *CLHS = dyn_cast(Op0)) {
       // We know that the top bits of C-X are clear if X contains less bits
@@ -132,7 +132,7 @@
 static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
                                  APInt &KnownZero, APInt &KnownOne,
                                  APInt &KnownZero2, APInt &KnownOne2,
-                                 const TargetData *TD, unsigned Depth) {
+                                 const DataLayout *TD, unsigned Depth) {
   unsigned BitWidth = KnownZero.getBitWidth();
   ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1);
   ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
@@ -226,7 +226,7 @@
 /// same width as the vector element, and the bit is set only if it is true
 /// for all of the elements in the vector.
 void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
-                             const TargetData *TD, unsigned Depth) {
+                             const DataLayout *TD, unsigned Depth) {
   assert(V && "No Value?");
   assert(Depth <= MaxDepth && "Limit Search Depth");
   unsigned BitWidth = KnownZero.getBitWidth();
@@ -778,7 +778,7 @@
 /// ComputeSignBit - Determine whether the sign bit is known to be zero or
 /// one.  Convenience wrapper around ComputeMaskedBits.
 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
-                          const TargetData *TD, unsigned Depth) {
+                          const DataLayout *TD, unsigned Depth) {
   unsigned BitWidth = getBitWidth(V->getType(), TD);
   if (!BitWidth) {
     KnownZero = false;
@@ -796,7 +796,7 @@
 /// bit set when defined. For vectors return true if every element is known to
 /// be a power of two when defined.  Supports values with integer or pointer
 /// types and vectors of integers.
-bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
+bool llvm::isPowerOfTwo(Value *V, const DataLayout *TD, bool OrZero,
                         unsigned Depth) {
   if (Constant *C = dyn_cast(V)) {
     if (C->isNullValue())
@@ -859,7 +859,7 @@
 /// when defined.  For vectors return true if every element is known to be
 /// non-zero when defined.  Supports values with integer or pointer type and
 /// vectors of integers.
-bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
+bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
   if (Constant *C = dyn_cast(V)) {
     if (C->isNullValue())
       return false;
@@ -986,7 +986,7 @@
 /// same width as the vector element, and the bit is set only if it is true
 /// for all of the elements in the vector.
 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
-                             const TargetData *TD, unsigned Depth) {
+                             const DataLayout *TD, unsigned Depth) {
   APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
   ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 
@@ -1003,10 +1003,10 @@
 ///
 /// 'Op' must have a scalar integer type.
 ///
-unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
+unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
                                   unsigned Depth) {
   assert((TD || V->getType()->isIntOrIntVectorTy()) &&
-         "ComputeNumSignBits requires a TargetData object to operate "
+         "ComputeNumSignBits requires a DataLayout object to operate "
          "on non-integer values!");
   Type *Ty = V->getType();
   unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
@@ -1582,7 +1582,7 @@
 /// it can be expressed as a base pointer plus a constant offset.  Return the
 /// base and offset to the caller.
 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
-                                              const TargetData &TD) {
+                                              const DataLayout &TD) {
   Operator *PtrOp = dyn_cast(Ptr);
   if (PtrOp == 0 || Ptr->getType()->isVectorTy())
     return Ptr;
@@ -1768,7 +1768,7 @@
 }
 
 Value *
-llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
+llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
   if (!V->getType()->isPointerTy())
     return V;
   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
@@ -1799,7 +1799,7 @@
 void
 llvm::GetUnderlyingObjects(Value *V,
                            SmallVectorImpl &Objects,
-                           const TargetData *TD,
+                           const DataLayout *TD,
                            unsigned MaxLookup) {
   SmallPtrSet Visited;
   SmallVector Worklist;
@@ -1844,7 +1844,7 @@
 }
 
 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
-                                        const TargetData *TD) {
+                                        const DataLayout *TD) {
   const Operator *Inst = dyn_cast(V);
   if (!Inst)
     return false;
Index: lib/Bitcode/Reader/BitcodeReader.cpp
===================================================================
--- lib/Bitcode/Reader/BitcodeReader.cpp	(revision 165037)
+++ lib/Bitcode/Reader/BitcodeReader.cpp	(working copy)
@@ -25,6 +25,7 @@
 #include "llvm/Support/DataStream.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/InstIterator.h"
 #include "llvm/OperandTraits.h"
 using namespace llvm;
 
Index: lib/CodeGen/Analysis.cpp
===================================================================
--- lib/CodeGen/Analysis.cpp	(revision 165037)
+++ lib/CodeGen/Analysis.cpp	(working copy)
@@ -21,11 +21,11 @@
 #include "llvm/Module.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MathExtras.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
@@ -79,7 +79,7 @@
                            uint64_t StartingOffset) {
   // Given a struct type, recursively traverse the elements.
   if (StructType *STy = dyn_cast(Ty)) {
-    const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
+    const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
     for (StructType::element_iterator EB = STy->element_begin(),
                                       EI = EB,
                                       EE = STy->element_end();
@@ -91,7 +91,7 @@
   // Given an array type, recursively traverse the elements.
   if (ArrayType *ATy = dyn_cast(Ty)) {
     Type *EltTy = ATy->getElementType();
-    uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
+    uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
       ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
                       StartingOffset + i * EltSize);
Index: lib/CodeGen/AsmPrinter/ARMException.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/ARMException.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/ARMException.cpp	(working copy)
@@ -24,7 +24,7 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/AsmPrinter.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp	(working copy)
@@ -33,7 +33,7 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
@@ -67,7 +67,7 @@
 /// getGVAlignmentLog2 - Return the alignment to use for the specified global
 /// value in log2 form.  This rounds up to the preferred alignment if possible
 /// and legal.
-static unsigned getGVAlignmentLog2(const GlobalValue *GV, const TargetData &TD,
+static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &TD,
                                    unsigned InBits = 0) {
   unsigned NumBits = 0;
   if (const GlobalVariable *GVar = dyn_cast(GV))
@@ -131,9 +131,9 @@
 }
 
 
-/// getTargetData - Return information about data layout.
-const TargetData &AsmPrinter::getTargetData() const {
-  return *TM.getTargetData();
+/// getDataLayout - Return information about data layout.
+const DataLayout &AsmPrinter::getDataLayout() const {
+  return *TM.getDataLayout();
 }
 
 /// getCurrentSection() - Return the current section we are emitting to.
@@ -160,7 +160,7 @@
   const_cast(getObjFileLowering())
     .Initialize(OutContext, TM);
 
-  Mang = new Mangler(OutContext, *TM.getTargetData());
+  Mang = new Mangler(OutContext, *TM.getDataLayout());
 
   // Allow the target to emit any magic that it wants at the start of the file.
   EmitStartOfAsmFile(M);
@@ -280,7 +280,7 @@
 
   SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM);
 
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
 
   // If the alignment is specified, we *must* obey it.  Overaligning a global
@@ -991,7 +991,7 @@
       Kind = SectionKind::getReadOnlyWithRelLocal();
       break;
     case 0:
-    switch (TM.getTargetData()->getTypeAllocSize(CPE.getType())) {
+    switch (TM.getDataLayout()->getTypeAllocSize(CPE.getType())) {
     case 4:  Kind = SectionKind::getMergeableConst4(); break;
     case 8:  Kind = SectionKind::getMergeableConst8(); break;
     case 16: Kind = SectionKind::getMergeableConst16();break;
@@ -1037,7 +1037,7 @@
       OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/);
 
       Type *Ty = CPE.getType();
-      Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
+      Offset = NewOffset + TM.getDataLayout()->getTypeAllocSize(Ty);
       OutStreamer.EmitLabel(GetCPISymbol(CPI));
 
       if (CPE.isMachineConstantPoolEntry())
@@ -1080,7 +1080,7 @@
     JTInDiffSection = true;
   }
 
-  EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getTargetData())));
+  EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getDataLayout())));
 
   // Jump tables in code sections are marked with a data_region directive
   // where that's supported.
@@ -1196,7 +1196,7 @@
 
   assert(Value && "Unknown entry kind!");
 
-  unsigned EntrySize = MJTI->getEntrySize(*TM.getTargetData());
+  unsigned EntrySize = MJTI->getEntrySize(*TM.getDataLayout());
   OutStreamer.EmitValue(Value, EntrySize, /*addrspace*/0);
 }
 
@@ -1298,7 +1298,7 @@
   }
 
   // Emit the function pointers in the target-specific order
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   unsigned Align = Log2_32(TD->getPointerPrefAlignment());
   std::stable_sort(Structors.begin(), Structors.end(), priority_order);
   for (unsigned i = 0, e = Structors.size(); i != e; ++i) {
@@ -1414,7 +1414,7 @@
 // if required for correctness.
 //
 void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const {
-  if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getTargetData(), NumBits);
+  if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getDataLayout(), NumBits);
 
   if (NumBits == 0) return;   // 1-byte aligned: no need to emit alignment.
 
@@ -1453,10 +1453,10 @@
   switch (CE->getOpcode()) {
   default:
     // If the code isn't optimized, there may be outstanding folding
-    // opportunities. Attempt to fold the expression using TargetData as a
+    // opportunities. Attempt to fold the expression using DataLayout as a
     // last resort before giving up.
     if (Constant *C =
-          ConstantFoldConstantExpression(CE, AP.TM.getTargetData()))
+          ConstantFoldConstantExpression(CE, AP.TM.getDataLayout()))
       if (C != CE)
         return lowerConstant(C, AP);
 
@@ -1470,7 +1470,7 @@
       report_fatal_error(OS.str());
     }
   case Instruction::GetElementPtr: {
-    const TargetData &TD = *AP.TM.getTargetData();
+    const DataLayout &TD = *AP.TM.getDataLayout();
     // Generate a symbolic expression for the byte address
     const Constant *PtrVal = CE->getOperand(0);
     SmallVector IdxVec(CE->op_begin()+1, CE->op_end());
@@ -1499,7 +1499,7 @@
     return lowerConstant(CE->getOperand(0), AP);
 
   case Instruction::IntToPtr: {
-    const TargetData &TD = *AP.TM.getTargetData();
+    const DataLayout &TD = *AP.TM.getDataLayout();
     // Handle casts to pointers by changing them into casts to the appropriate
     // integer type.  This promotes constant folding and simplifies this code.
     Constant *Op = CE->getOperand(0);
@@ -1509,7 +1509,7 @@
   }
 
   case Instruction::PtrToInt: {
-    const TargetData &TD = *AP.TM.getTargetData();
+    const DataLayout &TD = *AP.TM.getDataLayout();
     // Support only foldable casts to/from pointers that can be eliminated by
     // changing the pointer to the appropriately sized integer type.
     Constant *Op = CE->getOperand(0);
@@ -1583,7 +1583,7 @@
   if (const ConstantInt *CI = dyn_cast(V)) {
     if (CI->getBitWidth() > 64) return -1;
 
-    uint64_t Size = TM.getTargetData()->getTypeAllocSize(V->getType());
+    uint64_t Size = TM.getDataLayout()->getTypeAllocSize(V->getType());
     uint64_t Value = CI->getZExtValue();
 
     // Make sure the constant is at least 8 bits long and has a power
@@ -1627,7 +1627,7 @@
   // See if we can aggregate this into a .fill, if so, emit it as such.
   int Value = isRepeatedByteSequence(CDS, AP.TM);
   if (Value != -1) {
-    uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CDS->getType());
+    uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CDS->getType());
     // Don't emit a 1-byte object as a .fill.
     if (Bytes > 1)
       return AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
@@ -1677,7 +1677,7 @@
     }
   }
 
-  const TargetData &TD = *AP.TM.getTargetData();
+  const DataLayout &TD = *AP.TM.getDataLayout();
   unsigned Size = TD.getTypeAllocSize(CDS->getType());
   unsigned EmittedSize = TD.getTypeAllocSize(CDS->getType()->getElementType()) *
                         CDS->getNumElements();
@@ -1693,7 +1693,7 @@
   int Value = isRepeatedByteSequence(CA, AP.TM);
 
   if (Value != -1) {
-    uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CA->getType());
+    uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CA->getType());
     AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
   }
   else {
@@ -1707,7 +1707,7 @@
   for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
     emitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP);
 
-  const TargetData &TD = *AP.TM.getTargetData();
+  const DataLayout &TD = *AP.TM.getDataLayout();
   unsigned Size = TD.getTypeAllocSize(CV->getType());
   unsigned EmittedSize = TD.getTypeAllocSize(CV->getType()->getElementType()) *
                          CV->getType()->getNumElements();
@@ -1718,7 +1718,7 @@
 static void emitGlobalConstantStruct(const ConstantStruct *CS,
                                      unsigned AddrSpace, AsmPrinter &AP) {
   // Print the fields in successive locations. Pad to align if needed!
-  const TargetData *TD = AP.TM.getTargetData();
+  const DataLayout *TD = AP.TM.getDataLayout();
   unsigned Size = TD->getTypeAllocSize(CS->getType());
   const StructLayout *Layout = TD->getStructLayout(CS->getType());
   uint64_t SizeSoFar = 0;
@@ -1798,7 +1798,7 @@
         << DoubleVal.convertToDouble() << '\n';
     }
 
-    if (AP.TM.getTargetData()->isBigEndian()) {
+    if (AP.TM.getDataLayout()->isBigEndian()) {
       AP.OutStreamer.EmitIntValue(p[1], 2, AddrSpace);
       AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
     } else {
@@ -1807,7 +1807,7 @@
     }
 
     // Emit the tail padding for the long double.
-    const TargetData &TD = *AP.TM.getTargetData();
+    const DataLayout &TD = *AP.TM.getDataLayout();
     AP.OutStreamer.EmitZeros(TD.getTypeAllocSize(CFP->getType()) -
                              TD.getTypeStoreSize(CFP->getType()), AddrSpace);
     return;
@@ -1819,7 +1819,7 @@
   // API needed to prevent premature destruction.
   APInt API = CFP->getValueAPF().bitcastToAPInt();
   const uint64_t *p = API.getRawData();
-  if (AP.TM.getTargetData()->isBigEndian()) {
+  if (AP.TM.getDataLayout()->isBigEndian()) {
     AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
     AP.OutStreamer.EmitIntValue(p[1], 8, AddrSpace);
   } else {
@@ -1830,7 +1830,7 @@
 
 static void emitGlobalConstantLargeInt(const ConstantInt *CI,
                                        unsigned AddrSpace, AsmPrinter &AP) {
-  const TargetData *TD = AP.TM.getTargetData();
+  const DataLayout *TD = AP.TM.getDataLayout();
   unsigned BitWidth = CI->getBitWidth();
   assert((BitWidth & 63) == 0 && "only support multiples of 64-bits");
 
@@ -1846,7 +1846,7 @@
 
 static void emitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
                                    AsmPrinter &AP) {
-  const TargetData *TD = AP.TM.getTargetData();
+  const DataLayout *TD = AP.TM.getDataLayout();
   uint64_t Size = TD->getTypeAllocSize(CV->getType());
   if (isa(CV) || isa(CV))
     return AP.OutStreamer.EmitZeros(Size, AddrSpace);
@@ -1911,7 +1911,7 @@
 
 /// EmitGlobalConstant - Print a general LLVM constant to the .s file.
 void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) {
-  uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
+  uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
   if (Size)
     emitGlobalConstantImpl(CV, AddrSpace, *this);
   else if (MAI->hasSubsectionsViaSymbols()) {
Index: lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp	(working copy)
@@ -18,7 +18,7 @@
 #include "llvm/MC/MCSection.h"
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetMachine.h"
@@ -112,7 +112,7 @@
   
   switch (Encoding & 0x07) {
   default: llvm_unreachable("Invalid encoded value.");
-  case dwarf::DW_EH_PE_absptr: return TM.getTargetData()->getPointerSize();
+  case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize();
   case dwarf::DW_EH_PE_udata2: return 2;
   case dwarf::DW_EH_PE_udata4: return 4;
   case dwarf::DW_EH_PE_udata8: return 8;
Index: lib/CodeGen/AsmPrinter/DIE.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/DIE.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/DIE.cpp	(working copy)
@@ -17,7 +17,7 @@
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/Allocator.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -200,7 +200,7 @@
   case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
   case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
   case dwarf::DW_FORM_addr:
-    Size = Asm->getTargetData().getPointerSize(); break;
+    Size = Asm->getDataLayout().getPointerSize(); break;
   default: llvm_unreachable("DIE Value form not supported yet");
   }
   Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
@@ -222,7 +222,7 @@
   case dwarf::DW_FORM_data8: return sizeof(int64_t);
   case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
   case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
-  case dwarf::DW_FORM_addr:  return AP->getTargetData().getPointerSize();
+  case dwarf::DW_FORM_addr:  return AP->getDataLayout().getPointerSize();
   default: llvm_unreachable("DIE Value form not supported yet");
   }
 }
@@ -249,7 +249,7 @@
 unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const {
   if (Form == dwarf::DW_FORM_data4) return 4;
   if (Form == dwarf::DW_FORM_strp) return 4;
-  return AP->getTargetData().getPointerSize();
+  return AP->getDataLayout().getPointerSize();
 }
 
 #ifndef NDEBUG
@@ -273,7 +273,7 @@
 unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const {
   if (Form == dwarf::DW_FORM_data4) return 4;
   if (Form == dwarf::DW_FORM_strp) return 4;
-  return AP->getTargetData().getPointerSize();
+  return AP->getDataLayout().getPointerSize();
 }
 
 #ifndef NDEBUG
Index: lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/DwarfCFIException.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/DwarfCFIException.cpp	(working copy)
@@ -25,7 +25,7 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetMachine.h"
Index: lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/Instructions.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -510,7 +510,7 @@
   const char *FltPtr = (const char*)FltVal.getRawData();
 
   int NumBytes = FltVal.getBitWidth() / 8; // 8 bits per byte.
-  bool LittleEndian = Asm->getTargetData().isLittleEndian();
+  bool LittleEndian = Asm->getDataLayout().isLittleEndian();
   int Incr = (LittleEndian ? 1 : -1);
   int Start = (LittleEndian ? 0 : NumBytes - 1);
   int Stop = (LittleEndian ? NumBytes : -1);
@@ -552,7 +552,7 @@
   const uint64_t *Ptr64 = Val.getRawData();
 
   int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte.
-  bool LittleEndian = Asm->getTargetData().isLittleEndian();
+  bool LittleEndian = Asm->getDataLayout().isLittleEndian();
 
   // Output the constant to DWARF one byte at a time.
   for (int i = 0; i < NumBytes; i++) {
@@ -1227,7 +1227,7 @@
     addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
     SmallVector Idx(CE->op_begin()+1, CE->op_end());
     addUInt(Block, 0, dwarf::DW_FORM_udata, 
-                   Asm->getTargetData().getIndexedOffset(Ptr->getType(), Idx));
+                   Asm->getDataLayout().getIndexedOffset(Ptr->getType(), Idx));
     addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
     addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
   }
@@ -1459,7 +1459,7 @@
     Offset -= FieldOffset;
 
     // Maybe we need to work from the other end.
-    if (Asm->getTargetData().isLittleEndian())
+    if (Asm->getDataLayout().isLittleEndian())
       Offset = FieldSize - (Offset + Size);
     addUInt(MemberDie, dwarf::DW_AT_bit_offset, 0, Offset);
 
Index: lib/CodeGen/AsmPrinter/DwarfDebug.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/DwarfDebug.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/DwarfDebug.cpp	(working copy)
@@ -27,7 +27,7 @@
 #include "llvm/MC/MCSection.h"
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetMachine.h"
@@ -384,7 +384,7 @@
     // DW_AT_ranges appropriately.
     TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
                    DebugRangeSymbols.size() 
-                   * Asm->getTargetData().getPointerSize());
+                   * Asm->getDataLayout().getPointerSize());
     for (SmallVector::const_iterator RI = Ranges.begin(),
          RE = Ranges.end(); RI != RE; ++RI) {
       DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@@ -450,7 +450,7 @@
     // DW_AT_ranges appropriately.
     TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
                    DebugRangeSymbols.size() 
-                   * Asm->getTargetData().getPointerSize());
+                   * Asm->getDataLayout().getPointerSize());
     for (SmallVector::const_iterator RI = Ranges.begin(),
          RE = Ranges.end(); RI != RE; ++RI) {
       DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@@ -1764,7 +1764,7 @@
     Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"),
                            DwarfAbbrevSectionSym);
     Asm->OutStreamer.AddComment("Address Size (in bytes)");
-    Asm->EmitInt8(Asm->getTargetData().getPointerSize());
+    Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
 
     emitDIE(Die);
     Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID()));
@@ -1810,14 +1810,14 @@
   Asm->EmitInt8(0);
 
   Asm->OutStreamer.AddComment("Op size");
-  Asm->EmitInt8(Asm->getTargetData().getPointerSize() + 1);
+  Asm->EmitInt8(Asm->getDataLayout().getPointerSize() + 1);
   Asm->OutStreamer.AddComment("DW_LNE_set_address");
   Asm->EmitInt8(dwarf::DW_LNE_set_address);
 
   Asm->OutStreamer.AddComment("Section end label");
 
   Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd),
-                                   Asm->getTargetData().getPointerSize(),
+                                   Asm->getDataLayout().getPointerSize(),
                                    0/*AddrSpace*/);
 
   // Mark end of matrix.
@@ -2046,7 +2046,7 @@
   // Start the dwarf loc section.
   Asm->OutStreamer.SwitchSection(
     Asm->getObjFileLowering().getDwarfLocSection());
-  unsigned char Size = Asm->getTargetData().getPointerSize();
+  unsigned char Size = Asm->getDataLayout().getPointerSize();
   Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0));
   unsigned index = 1;
   for (SmallVector::iterator
@@ -2143,7 +2143,7 @@
   // Start the dwarf ranges section.
   Asm->OutStreamer.SwitchSection(
     Asm->getObjFileLowering().getDwarfRangesSection());
-  unsigned char Size = Asm->getTargetData().getPointerSize();
+  unsigned char Size = Asm->getDataLayout().getPointerSize();
   for (SmallVector::iterator
          I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end();
        I != E; ++I) {
@@ -2201,7 +2201,7 @@
   Asm->OutStreamer.AddComment("Dwarf Version");
   Asm->EmitInt16(dwarf::DWARF_VERSION);
   Asm->OutStreamer.AddComment("Address Size (in bytes)");
-  Asm->EmitInt8(Asm->getTargetData().getPointerSize());
+  Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
 
   for (SmallVector::iterator I = InlinedSPNodes.begin(),
          E = InlinedSPNodes.end(); I != E; ++I) {
@@ -2232,7 +2232,7 @@
 
       if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc");
       Asm->OutStreamer.EmitSymbolValue(LI->first,
-                                       Asm->getTargetData().getPointerSize(),0);
+                                       Asm->getDataLayout().getPointerSize(),0);
     }
   }
 
Index: lib/CodeGen/AsmPrinter/DwarfException.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/DwarfException.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/DwarfException.cpp	(working copy)
@@ -24,7 +24,7 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetMachine.h"
@@ -417,7 +417,7 @@
     // that we're omitting that bit.
     TTypeEncoding = dwarf::DW_EH_PE_omit;
     // dwarf::DW_EH_PE_absptr
-    TypeFormatSize = Asm->getTargetData().getPointerSize();
+    TypeFormatSize = Asm->getDataLayout().getPointerSize();
   } else {
     // Okay, we have actual filters or typeinfos to emit.  As such, we need to
     // pick a type encoding for them.  We're about to emit a list of pointers to
Index: lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp	(working copy)
@@ -20,7 +20,7 @@
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/ADT/SmallString.h"
@@ -91,7 +91,7 @@
 /// either condition is detected in a function which uses the GC.
 ///
 void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) {
-  unsigned IntPtrSize = AP.TM.getTargetData()->getPointerSize();
+  unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
 
   AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
   EmitCamlGlobal(getModule(), AP, "code_end");
Index: lib/CodeGen/AsmPrinter/Win64Exception.cpp
===================================================================
--- lib/CodeGen/AsmPrinter/Win64Exception.cpp	(revision 165037)
+++ lib/CodeGen/AsmPrinter/Win64Exception.cpp	(working copy)
@@ -24,7 +24,7 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetMachine.h"
Index: lib/CodeGen/CallingConvLower.cpp
===================================================================
--- lib/CodeGen/CallingConvLower.cpp	(revision 165037)
+++ lib/CodeGen/CallingConvLower.cpp	(working copy)
@@ -17,8 +17,8 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetLowering.h"
 using namespace llvm;
Index: lib/CodeGen/IntrinsicLowering.cpp
===================================================================
--- lib/CodeGen/IntrinsicLowering.cpp	(revision 165037)
+++ lib/CodeGen/IntrinsicLowering.cpp	(working copy)
@@ -21,7 +21,7 @@
 #include "llvm/Support/CallSite.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 template 
Index: lib/CodeGen/MachineBasicBlock.cpp
===================================================================
--- lib/CodeGen/MachineBasicBlock.cpp	(revision 165037)
+++ lib/CodeGen/MachineBasicBlock.cpp	(working copy)
@@ -21,7 +21,6 @@
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Assembly/Writer.h"
@@ -30,6 +29,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/LeakDetector.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include 
 using namespace llvm;
 
Index: lib/CodeGen/MachineFunction.cpp
===================================================================
--- lib/CodeGen/MachineFunction.cpp	(revision 165037)
+++ lib/CodeGen/MachineFunction.cpp	(working copy)
@@ -28,7 +28,6 @@
 #include "llvm/MC/MCContext.h"
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetFrameLowering.h"
@@ -36,6 +35,7 @@
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/GraphWriter.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 //===----------------------------------------------------------------------===//
@@ -62,7 +62,7 @@
   if (Fn->getFnAttributes().hasStackAlignmentAttr())
     FrameInfo->ensureMaxAlignment(Fn->getAttributes().
                                   getFnAttributes().getStackAlignment());
-  ConstantPool = new (Allocator) MachineConstantPool(TM.getTargetData());
+  ConstantPool = new (Allocator) MachineConstantPool(TM.getDataLayout());
   Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
   // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
   if (!Fn->getFnAttributes().hasOptimizeForSizeAttr())
@@ -545,7 +545,7 @@
 //===----------------------------------------------------------------------===//
 
 /// getEntrySize - Return the size of each entry in the jump table.
-unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const {
+unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
   // The size of a jump table entry is 4 bytes unless the entry is just the
   // address of a block, in which case it is the pointer size.
   switch (getEntryKind()) {
@@ -564,7 +564,7 @@
 }
 
 /// getEntryAlignment - Return the alignment of each entry in the jump table.
-unsigned MachineJumpTableInfo::getEntryAlignment(const TargetData &TD) const {
+unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
   // The alignment of a jump table entry is the alignment of int32 unless the
   // entry is just the address of a block, in which case it is the pointer
   // alignment.
@@ -670,7 +670,7 @@
 /// CanShareConstantPoolEntry - Test whether the given two constants
 /// can be allocated the same constant pool entry.
 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
-                                      const TargetData *TD) {
+                                      const DataLayout *TD) {
   // Handle the trivial case quickly.
   if (A == B) return true;
 
@@ -694,7 +694,7 @@
   // Try constant folding a bitcast of both instructions to an integer.  If we
   // get two identical ConstantInt's, then we are good to share them.  We use
   // the constant folding APIs to do this so that we get the benefit of
-  // TargetData.
+  // DataLayout.
   if (isa(A->getType()))
     A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
                                  const_cast(A), TD);
Index: lib/CodeGen/MachineModuleInfo.cpp
===================================================================
--- lib/CodeGen/MachineModuleInfo.cpp	(revision 165037)
+++ lib/CodeGen/MachineModuleInfo.cpp	(working copy)
@@ -25,7 +25,7 @@
 using namespace llvm;
 using namespace llvm::dwarf;
 
-// Handle the Pass registration stuff necessary to use TargetData's.
+// Handle the Pass registration stuff necessary to use DataLayout's.
 INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo",
                 "Machine Module Information", false, false)
 char MachineModuleInfo::ID = 0;
Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/DAGCombiner.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
@@ -5340,7 +5340,7 @@
       !LD2->isVolatile() &&
       DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
     unsigned Align = LD1->getAlignment();
-    unsigned NewAlign = TLI.getTargetData()->
+    unsigned NewAlign = TLI.getDataLayout()->
       getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
 
     if (NewAlign <= Align &&
@@ -5409,7 +5409,7 @@
       !cast(N0)->isVolatile() &&
       (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) {
     LoadSDNode *LN0 = cast(N0);
-    unsigned Align = TLI.getTargetData()->
+    unsigned Align = TLI.getDataLayout()->
       getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
     unsigned OrigAlign = LN0->getAlignment();
 
@@ -7336,7 +7336,7 @@
 
       unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
       Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
-      if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy))
+      if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy))
         return SDValue();
 
       SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(),
@@ -7398,7 +7398,7 @@
     unsigned LDAlign = LD->getAlignment();
     unsigned STAlign = ST->getAlignment();
     Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
-    unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy);
+    unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy);
     if (LDAlign < ABIAlign || STAlign < ABIAlign)
       return SDValue();
 
@@ -7435,7 +7435,7 @@
       ST->isUnindexed()) {
     unsigned OrigAlign = ST->getAlignment();
     EVT SVT = Value.getOperand(0).getValueType();
-    unsigned Align = TLI.getTargetData()->
+    unsigned Align = TLI.getDataLayout()->
       getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
     if (Align <= OrigAlign &&
         ((!LegalOperations && !ST->isVolatile()) ||
@@ -7823,7 +7823,7 @@
       // Check the resultant load doesn't need a higher alignment than the
       // original load.
       unsigned NewAlign =
-        TLI.getTargetData()
+        TLI.getDataLayout()
             ->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext()));
 
       if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT))
@@ -8711,7 +8711,7 @@
           const_cast(TV->getConstantFPValue())
         };
         Type *FPTy = Elts[0]->getType();
-        const TargetData &TD = *TLI.getTargetData();
+        const DataLayout &TD = *TLI.getDataLayout();
 
         // Create a ConstantArray of the two constants.
         Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
Index: lib/CodeGen/SelectionDAG/FastISel.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/FastISel.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/FastISel.cpp	(working copy)
@@ -53,7 +53,7 @@
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/Analysis/Loads.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Target/TargetLowering.h"
@@ -1059,7 +1059,7 @@
     MFI(*FuncInfo.MF->getFrameInfo()),
     MCP(*FuncInfo.MF->getConstantPool()),
     TM(FuncInfo.MF->getTarget()),
-    TD(*TM.getTargetData()),
+    TD(*TM.getDataLayout()),
     TII(*TM.getInstrInfo()),
     TLI(*TM.getTargetLowering()),
     TRI(*TM.getRegisterInfo()),
Index: lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp	(working copy)
@@ -29,7 +29,7 @@
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetOptions.h"
@@ -80,9 +80,9 @@
     if (const AllocaInst *AI = dyn_cast(I))
       if (const ConstantInt *CUI = dyn_cast(AI->getArraySize())) {
         Type *Ty = AI->getAllocatedType();
-        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
+        uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
         unsigned Align =
-          std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
+          std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
                    AI->getAlignment());
 
         TySize *= CUI->getZExtValue();   // Get total allocated size.
Index: lib/CodeGen/SelectionDAG/InstrEmitter.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/InstrEmitter.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/InstrEmitter.cpp	(working copy)
@@ -20,7 +20,7 @@
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLowering.h"
@@ -390,10 +390,10 @@
     Type *Type = CP->getType();
     // MachineConstantPool wants an explicit alignment.
     if (Align == 0) {
-      Align = TM->getTargetData()->getPrefTypeAlignment(Type);
+      Align = TM->getDataLayout()->getPrefTypeAlignment(Type);
       if (Align == 0) {
         // Alignment of vector types.  FIXME!
-        Align = TM->getTargetData()->getTypeAllocSize(Type);
+        Align = TM->getDataLayout()->getTypeAllocSize(Type);
       }
     }
 
Index: lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/LegalizeDAG.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/CodeGen/SelectionDAG.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -718,7 +718,7 @@
           // expand it.
           if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
             Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
-            unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
+            unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
             if (ST->getAlignment() < ABIAlignment)
               ExpandUnalignedStore(cast(Node),
                                    DAG, TLI, this);
@@ -824,7 +824,7 @@
           // expand it.
           if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
             Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
-            unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
+            unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
             if (ST->getAlignment() < ABIAlignment)
               ExpandUnalignedStore(cast(Node), DAG, TLI, this);
           }
@@ -874,7 +874,7 @@
       if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
         Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
         unsigned ABIAlignment =
-          TLI.getTargetData()->getABITypeAlignment(Ty);
+          TLI.getDataLayout()->getABITypeAlignment(Ty);
         if (LD->getAlignment() < ABIAlignment){
           ExpandUnalignedLoad(cast(Node), DAG, TLI, RVal, RChain);
         }
@@ -1059,7 +1059,7 @@
                  Type *Ty =
                    LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
                  unsigned ABIAlignment =
-                   TLI.getTargetData()->getABITypeAlignment(Ty);
+                   TLI.getDataLayout()->getABITypeAlignment(Ty);
                  if (LD->getAlignment() < ABIAlignment){
                    ExpandUnalignedLoad(cast(Node),
                                        DAG, TLI, Value, Chain);
@@ -1625,7 +1625,7 @@
                                                DebugLoc dl) {
   // Create the stack frame object.
   unsigned SrcAlign =
-    TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType().
+    TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType().
                                               getTypeForEVT(*DAG.getContext()));
   SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
 
@@ -1637,7 +1637,7 @@
   unsigned SlotSize = SlotVT.getSizeInBits();
   unsigned DestSize = DestVT.getSizeInBits();
   Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
-  unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType);
+  unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType);
 
   // Emit a store to the stack slot.  Use a truncstore if the input value is
   // later than DestVT.
@@ -2786,7 +2786,7 @@
 
     // Increment the pointer, VAList, to the next vaarg
     Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
-                       DAG.getConstant(TLI.getTargetData()->
+                       DAG.getConstant(TLI.getDataLayout()->
                           getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
                                        TLI.getPointerTy()));
     // Store the incremented VAList to the legalized pointer
@@ -3365,7 +3365,7 @@
 
     EVT PTy = TLI.getPointerTy();
 
-    const TargetData &TD = *TLI.getTargetData();
+    const DataLayout &TD = *TLI.getDataLayout();
     unsigned EntrySize =
       DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
 
Index: lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/LegalizeTypes.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/LegalizeTypes.cpp	(working copy)
@@ -15,7 +15,7 @@
 
 #include "LegalizeTypes.h"
 #include "llvm/CallingConv.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ErrorHandling.h"
Index: lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp	(working copy)
@@ -20,7 +20,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "LegalizeTypes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 //===----------------------------------------------------------------------===//
@@ -146,7 +146,7 @@
   // Create the stack frame object.  Make sure it is aligned for both
   // the source and expanded destination types.
   unsigned Alignment =
-    TLI.getTargetData()->getPrefTypeAlignment(NOutVT.
+    TLI.getDataLayout()->getPrefTypeAlignment(NOutVT.
                                               getTypeForEVT(*DAG.getContext()));
   SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment);
   int SPFI = cast(StackPtr.getNode())->getIndex();
Index: lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp	(working copy)
@@ -21,7 +21,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "LegalizeTypes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
 using namespace llvm;
@@ -749,7 +749,7 @@
   SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
   Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
   unsigned Alignment =
-    TLI.getTargetData()->getPrefTypeAlignment(VecType);
+    TLI.getDataLayout()->getPrefTypeAlignment(VecType);
   Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
                             false, false, 0);
 
Index: lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp	(working copy)
@@ -17,7 +17,7 @@
 #include "llvm/CodeGen/SchedulerRegistry.h"
 #include "llvm/CodeGen/SelectionDAGISel.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/ADT/SmallSet.h"
Index: lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/CodeGen/SelectionDAGISel.h"
 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLowering.h"
Index: lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp	(working copy)
@@ -25,7 +25,7 @@
 #include "llvm/CodeGen/SchedulerRegistry.h"
 #include "llvm/CodeGen/SelectionDAGISel.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/SelectionDAG.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp	(working copy)
@@ -29,7 +29,7 @@
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetSelectionDAGInfo.h"
 #include "llvm/Target/TargetOptions.h"
@@ -883,7 +883,7 @@
                    PointerType::get(Type::getInt8Ty(*getContext()), 0) :
                    VT.getTypeForEVT(*getContext());
 
-  return TLI.getTargetData()->getABITypeAlignment(Ty);
+  return TLI.getDataLayout()->getABITypeAlignment(Ty);
 }
 
 // EntryNode could meaningfully have debug info if we can find it...
@@ -1173,7 +1173,7 @@
   assert((TargetFlags == 0 || isTarget) &&
          "Cannot set target flags on target-independent globals");
   if (Alignment == 0)
-    Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType());
+    Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
   FoldingSetNodeID ID;
   AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@@ -1200,7 +1200,7 @@
   assert((TargetFlags == 0 || isTarget) &&
          "Cannot set target flags on target-independent globals");
   if (Alignment == 0)
-    Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType());
+    Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
   FoldingSetNodeID ID;
   AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@@ -1544,7 +1544,7 @@
   unsigned ByteSize = VT.getStoreSize();
   Type *Ty = VT.getTypeForEVT(*getContext());
   unsigned StackAlign =
-  std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
+  std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
 
   int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
   return getFrameIndex(FrameIdx, TLI.getPointerTy());
@@ -1557,7 +1557,7 @@
                             VT2.getStoreSizeInBits())/8;
   Type *Ty1 = VT1.getTypeForEVT(*getContext());
   Type *Ty2 = VT2.getTypeForEVT(*getContext());
-  const TargetData *TD = TLI.getTargetData();
+  const DataLayout *TD = TLI.getDataLayout();
   unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
                             TD->getPrefTypeAlignment(Ty2));
 
@@ -3451,7 +3451,7 @@
                                    DAG.getMachineFunction());
 
   if (VT == MVT::Other) {
-    if (DstAlign >= TLI.getTargetData()->getPointerPrefAlignment() ||
+    if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
         TLI.allowsUnalignedMemoryAccesses(VT)) {
       VT = TLI.getPointerTy();
     } else {
@@ -3539,7 +3539,7 @@
 
   if (DstAlignCanChange) {
     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
-    unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+    unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
     if (NewAlign > Align) {
       // Give the stack frame object a larger alignment if needed.
       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@@ -3628,7 +3628,7 @@
 
   if (DstAlignCanChange) {
     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
-    unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+    unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
     if (NewAlign > Align) {
       // Give the stack frame object a larger alignment if needed.
       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@@ -3703,7 +3703,7 @@
 
   if (DstAlignCanChange) {
     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
-    unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+    unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
     if (NewAlign > Align) {
       // Give the stack frame object a larger alignment if needed.
       if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@@ -3797,7 +3797,7 @@
   // Emit a library call.
   TargetLowering::ArgListTy Args;
   TargetLowering::ArgListEntry Entry;
-  Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
+  Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
   Entry.Node = Dst; Args.push_back(Entry);
   Entry.Node = Src; Args.push_back(Entry);
   Entry.Node = Size; Args.push_back(Entry);
@@ -3852,7 +3852,7 @@
   // Emit a library call.
   TargetLowering::ArgListTy Args;
   TargetLowering::ArgListEntry Entry;
-  Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext());
+  Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
   Entry.Node = Dst; Args.push_back(Entry);
   Entry.Node = Src; Args.push_back(Entry);
   Entry.Node = Size; Args.push_back(Entry);
@@ -3901,7 +3901,7 @@
     return Result;
 
   // Emit a library call.
-  Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
+  Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
   TargetLowering::ArgListTy Args;
   TargetLowering::ArgListEntry Entry;
   Entry.Node = Dst; Entry.Ty = IntPtrTy;
@@ -6097,7 +6097,7 @@
     unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
     APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
     llvm::ComputeMaskedBits(const_cast(GV), KnownZero, KnownOne,
-                            TLI.getTargetData());
+                            TLI.getDataLayout());
     unsigned AlignBits = KnownZero.countTrailingOnes();
     unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
     if (Align)
Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp	(working copy)
@@ -44,7 +44,7 @@
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetIntrinsicInfo.h"
@@ -847,7 +847,7 @@
   AA = &aa;
   GFI = gfi;
   LibInfo = li;
-  TD = DAG.getTarget().getTargetData();
+  TD = DAG.getTarget().getDataLayout();
   Context = DAG.getContext();
   LPadToCallSiteMap.clear();
 }
@@ -3208,9 +3208,9 @@
     return;   // getValue will auto-populate this.
 
   Type *Ty = I.getAllocatedType();
-  uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
+  uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
   unsigned Align =
-    std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
+    std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
              I.getAlignment());
 
   SDValue AllocSize = getValue(I.getArraySize());
@@ -5308,9 +5308,9 @@
   int DemoteStackIdx = -100;
 
   if (!CanLowerReturn) {
-    uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(
+    uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(
                       FTy->getReturnType());
-    unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(
+    unsigned Align  = TLI.getDataLayout()->getPrefTypeAlignment(
                       FTy->getReturnType());
     MachineFunction &MF = DAG.getMachineFunction();
     DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
@@ -5775,7 +5775,7 @@
   /// MVT::Other.
   EVT getCallOperandValEVT(LLVMContext &Context,
                            const TargetLowering &TLI,
-                           const TargetData *TD) const {
+                           const DataLayout *TD) const {
     if (CallOperandVal == 0) return MVT::Other;
 
     if (isa(CallOperandVal))
@@ -6079,8 +6079,8 @@
         // Otherwise, create a stack slot and emit a store to it before the
         // asm.
         Type *Ty = OpVal->getType();
-        uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
-        unsigned Align  = TLI.getTargetData()->getPrefTypeAlignment(Ty);
+        uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
+        unsigned Align  = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
         MachineFunction &MF = DAG.getMachineFunction();
         int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
         SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
@@ -6428,7 +6428,7 @@
 }
 
 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
-  const TargetData &TD = *TLI.getTargetData();
+  const DataLayout &TD = *TLI.getDataLayout();
   SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
                            getRoot(), getValue(I.getOperand(0)),
                            DAG.getSrcValue(I.getOperand(0)),
@@ -6474,7 +6474,7 @@
                            Args[i].Node.getResNo() + Value);
       ISD::ArgFlagsTy Flags;
       unsigned OriginalAlignment =
-        getTargetData()->getABITypeAlignment(ArgTy);
+        getDataLayout()->getABITypeAlignment(ArgTy);
 
       if (Args[i].isZExt)
         Flags.setZExt();
@@ -6488,7 +6488,7 @@
         Flags.setByVal();
         PointerType *Ty = cast(Args[i].Ty);
         Type *ElementTy = Ty->getElementType();
-        Flags.setByValSize(getTargetData()->getTypeAllocSize(ElementTy));
+        Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
         // For ByVal, alignment should come from FE.  BE will guess if this
         // info is not there but there are cases it cannot get right.
         unsigned FrameAlign;
@@ -6663,7 +6663,7 @@
   const Function &F = *LLVMBB->getParent();
   SelectionDAG &DAG = SDB->DAG;
   DebugLoc dl = SDB->getCurDebugLoc();
-  const TargetData *TD = TLI.getTargetData();
+  const DataLayout *TD = TLI.getDataLayout();
   SmallVector Ins;
 
   // Check whether the function can return without sret-demotion.
Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
===================================================================
--- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h	(revision 165037)
+++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h	(working copy)
@@ -66,7 +66,7 @@
 class SIToFPInst;
 class StoreInst;
 class SwitchInst;
-class TargetData;
+class DataLayout;
 class TargetLibraryInfo;
 class TargetLowering;
 class TruncInst;
@@ -285,7 +285,7 @@
   const TargetMachine &TM;
   const TargetLowering &TLI;
   SelectionDAG &DAG;
-  const TargetData *TD;
+  const DataLayout *TD;
   AliasAnalysis *AA;
   const TargetLibraryInfo *LibInfo;
 
Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/TargetLowering.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/TargetLowering.cpp	(working copy)
@@ -14,7 +14,7 @@
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCExpr.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -515,7 +515,7 @@
 /// NOTE: The constructor takes ownership of TLOF.
 TargetLowering::TargetLowering(const TargetMachine &tm,
                                const TargetLoweringObjectFile *tlof)
-  : TM(tm), TD(TM.getTargetData()), TLOF(*tlof) {
+  : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
   // All operations default to being supported.
   memset(OpActions, 0, sizeof(OpActions));
   memset(LoadExtActions, 0, sizeof(LoadExtActions));
Index: lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp	(revision 165037)
+++ lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp	(working copy)
@@ -16,7 +16,7 @@
 using namespace llvm;
 
 TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM)
-  : TD(TM.getTargetData()) {
+  : TD(TM.getDataLayout()) {
 }
 
 TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {
Index: lib/CodeGen/SjLjEHPrepare.cpp
===================================================================
--- lib/CodeGen/SjLjEHPrepare.cpp	(revision 165037)
+++ lib/CodeGen/SjLjEHPrepare.cpp	(working copy)
@@ -30,7 +30,7 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -191,7 +191,7 @@
   // that needs to be restored on all exits from the function. This is an alloca
   // because the value needs to be added to the global context list.
   unsigned Align =
-    TLI->getTargetData()->getPrefTypeAlignment(FunctionContextTy);
+    TLI->getDataLayout()->getPrefTypeAlignment(FunctionContextTy);
   FuncCtx =
     new AllocaInst(FunctionContextTy, 0, Align, "fn_context", EntryBB->begin());
 
Index: lib/CodeGen/StackProtector.cpp
===================================================================
--- lib/CodeGen/StackProtector.cpp	(revision 165037)
+++ lib/CodeGen/StackProtector.cpp	(working copy)
@@ -26,7 +26,7 @@
 #include "llvm/Module.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/ADT/Triple.h"
@@ -117,7 +117,7 @@
 
     // If an array has more than SSPBufferSize bytes of allocated space, then we
     // emit stack protectors.
-    if (TM.Options.SSPBufferSize <= TLI->getTargetData()->getTypeAllocSize(AT))
+    if (TM.Options.SSPBufferSize <= TLI->getDataLayout()->getTypeAllocSize(AT))
       return true;
   }
 
Index: lib/CodeGen/TargetLoweringObjectFileImpl.cpp
===================================================================
--- lib/CodeGen/TargetLoweringObjectFileImpl.cpp	(revision 165037)
+++ lib/CodeGen/TargetLoweringObjectFileImpl.cpp	(working copy)
@@ -27,13 +27,13 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/Dwarf.h"
 #include "llvm/Support/ELF.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/Triple.h"
@@ -77,9 +77,9 @@
                                                     Flags,
                                                     SectionKind::getDataRel(),
                                                     0, Label->getName());
-  unsigned Size = TM.getTargetData()->getPointerSize();
+  unsigned Size = TM.getDataLayout()->getPointerSize();
   Streamer.SwitchSection(Sec);
-  Streamer.EmitValueToAlignment(TM.getTargetData()->getPointerABIAlignment());
+  Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment());
   Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject);
   const MCExpr *E = MCConstantExpr::Create(Size, getContext());
   Streamer.EmitELFSize(Label, E);
@@ -247,7 +247,7 @@
     // FIXME: this is getting the alignment of the character, not the
     // alignment of the global!
     unsigned Align =
-      TM.getTargetData()->getPreferredAlignment(cast(GV));
+      TM.getDataLayout()->getPreferredAlignment(cast(GV));
 
     const char *SizeSpec = ".rodata.str1.";
     if (Kind.isMergeable2ByteCString())
@@ -522,14 +522,14 @@
 
   // FIXME: Alignment check should be handled by section classifier.
   if (Kind.isMergeable1ByteCString() &&
-      TM.getTargetData()->getPreferredAlignment(cast(GV)) < 32)
+      TM.getDataLayout()->getPreferredAlignment(cast(GV)) < 32)
     return CStringSection;
 
   // Do not put 16-bit arrays in the UString section if they have an
   // externally visible label, this runs into issues with certain linker
   // versions.
   if (Kind.isMergeable2ByteCString() && !GV->hasExternalLinkage() &&
-      TM.getTargetData()->getPreferredAlignment(cast(GV)) < 32)
+      TM.getDataLayout()->getPreferredAlignment(cast(GV)) < 32)
     return UStringSection;
 
   if (Kind.isMergeableConst()) {
Index: lib/ExecutionEngine/ExecutionEngine.cpp
===================================================================
--- lib/ExecutionEngine/ExecutionEngine.cpp	(revision 165037)
+++ lib/ExecutionEngine/ExecutionEngine.cpp	(working copy)
@@ -28,8 +28,8 @@
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/DynamicLibrary.h"
 #include "llvm/Support/Host.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetMachine.h"
 #include 
 #include 
@@ -91,11 +91,11 @@
 public:
   /// \brief Returns the address the GlobalVariable should be written into.  The
   /// GVMemoryBlock object prefixes that.
-  static char *Create(const GlobalVariable *GV, const TargetData& TD) {
+  static char *Create(const GlobalVariable *GV, const DataLayout& TD) {
     Type *ElTy = GV->getType()->getElementType();
     size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
     void *RawMemory = ::operator new(
-      TargetData::RoundUpAlignment(sizeof(GVMemoryBlock),
+      DataLayout::RoundUpAlignment(sizeof(GVMemoryBlock),
                                    TD.getPreferredAlignment(GV))
       + GVSize);
     new(RawMemory) GVMemoryBlock(GV);
@@ -113,7 +113,7 @@
 }  // anonymous namespace
 
 char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
-  return GVMemoryBlock::Create(GV, *getTargetData());
+  return GVMemoryBlock::Create(GV, *getDataLayout());
 }
 
 bool ExecutionEngine::removeModule(Module *M) {
@@ -267,7 +267,7 @@
 void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
                        const std::vector &InputArgv) {
   clear();  // Free the old contents.
-  unsigned PtrSize = EE->getTargetData()->getPointerSize();
+  unsigned PtrSize = EE->getDataLayout()->getPointerSize();
   Array = new char[(InputArgv.size()+1)*PtrSize];
 
   DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n");
@@ -342,7 +342,7 @@
 #ifndef NDEBUG
 /// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
 static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
-  unsigned PtrSize = EE->getTargetData()->getPointerSize();
+  unsigned PtrSize = EE->getDataLayout()->getPointerSize();
   for (unsigned i = 0; i < PtrSize; ++i)
     if (*(i + (uint8_t*)Loc))
       return false;
@@ -856,7 +856,7 @@
 
 void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
                                          GenericValue *Ptr, Type *Ty) {
-  const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty);
+  const unsigned StoreBytes = getDataLayout()->getTypeStoreSize(Ty);
 
   switch (Ty->getTypeID()) {
   case Type::IntegerTyID:
@@ -882,7 +882,7 @@
     dbgs() << "Cannot store value of type " << *Ty << "!\n";
   }
 
-  if (sys::isLittleEndianHost() != getTargetData()->isLittleEndian())
+  if (sys::isLittleEndianHost() != getDataLayout()->isLittleEndian())
     // Host and target are different endian - reverse the stored bytes.
     std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
 }
@@ -918,7 +918,7 @@
 void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
                                           GenericValue *Ptr,
                                           Type *Ty) {
-  const unsigned LoadBytes = getTargetData()->getTypeStoreSize(Ty);
+  const unsigned LoadBytes = getDataLayout()->getTypeStoreSize(Ty);
 
   switch (Ty->getTypeID()) {
   case Type::IntegerTyID:
@@ -959,20 +959,20 @@
   
   if (const ConstantVector *CP = dyn_cast(Init)) {
     unsigned ElementSize =
-      getTargetData()->getTypeAllocSize(CP->getType()->getElementType());
+      getDataLayout()->getTypeAllocSize(CP->getType()->getElementType());
     for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
       InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
     return;
   }
   
   if (isa(Init)) {
-    memset(Addr, 0, (size_t)getTargetData()->getTypeAllocSize(Init->getType()));
+    memset(Addr, 0, (size_t)getDataLayout()->getTypeAllocSize(Init->getType()));
     return;
   }
   
   if (const ConstantArray *CPA = dyn_cast(Init)) {
     unsigned ElementSize =
-      getTargetData()->getTypeAllocSize(CPA->getType()->getElementType());
+      getDataLayout()->getTypeAllocSize(CPA->getType()->getElementType());
     for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
       InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
     return;
@@ -980,7 +980,7 @@
   
   if (const ConstantStruct *CPS = dyn_cast(Init)) {
     const StructLayout *SL =
-      getTargetData()->getStructLayout(cast(CPS->getType()));
+      getDataLayout()->getStructLayout(cast(CPS->getType()));
     for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
       InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
     return;
@@ -1127,7 +1127,7 @@
     InitializeMemory(GV->getInitializer(), GA);
 
   Type *ElTy = GV->getType()->getElementType();
-  size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
+  size_t GVSize = (size_t)getDataLayout()->getTypeAllocSize(ElTy);
   NumInitBytes += (unsigned)GVSize;
   ++NumGlobals;
 }
Index: lib/ExecutionEngine/ExecutionEngineBindings.cpp
===================================================================
--- lib/ExecutionEngine/ExecutionEngineBindings.cpp	(revision 165037)
+++ lib/ExecutionEngine/ExecutionEngineBindings.cpp	(working copy)
@@ -238,8 +238,8 @@
   return unwrap(EE)->recompileAndRelinkFunction(unwrap(Fn));
 }
 
-LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
-  return wrap(unwrap(EE)->getTargetData());
+LLVMTargetDataRef LLVMGetExecutionEngineDataLayout(LLVMExecutionEngineRef EE) {
+  return wrap(unwrap(EE)->getDataLayout());
 }
 
 void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
Index: lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
===================================================================
--- lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp	(revision 165037)
+++ lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp	(working copy)
@@ -25,7 +25,7 @@
 #include "llvm/Config/config.h"     // Detect libffi
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/DynamicLibrary.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/ManagedStatic.h"
 #include "llvm/Support/Mutex.h"
 #include 
@@ -180,7 +180,7 @@
 
 static bool ffiInvoke(RawFunc Fn, Function *F,
                       const std::vector &ArgVals,
-                      const TargetData *TD, GenericValue &Result) {
+                      const DataLayout *TD, GenericValue &Result) {
   ffi_cif cif;
   FunctionType *FTy = F->getFunctionType();
   const unsigned NumArgs = F->arg_size();
@@ -276,7 +276,7 @@
   FunctionsLock->release();
 
   GenericValue Result;
-  if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getTargetData(), Result))
+  if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result))
     return Result;
 #endif // USE_LIBFFI
 
@@ -376,7 +376,7 @@
       case 'x': case 'X':
         if (HowLong >= 1) {
           if (HowLong == 1 &&
-              TheInterpreter->getTargetData()->getPointerSizeInBits() == 64 &&
+              TheInterpreter->getDataLayout()->getPointerSizeInBits() == 64 &&
               sizeof(long) < sizeof(int64_t)) {
             // Make sure we use %lld with a 64 bit argument because we might be
             // compiling LLI on a 32 bit compiler.
Index: lib/ExecutionEngine/Interpreter/Interpreter.cpp
===================================================================
--- lib/ExecutionEngine/Interpreter/Interpreter.cpp	(revision 165037)
+++ lib/ExecutionEngine/Interpreter/Interpreter.cpp	(working copy)
@@ -48,7 +48,7 @@
   : ExecutionEngine(M), TD(M) {
       
   memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
-  setTargetData(&TD);
+  setDataLayout(&TD);
   // Initialize the "backend"
   initializeExecutionEngine();
   initializeExternalFunctions();
Index: lib/ExecutionEngine/Interpreter/Interpreter.h
===================================================================
--- lib/ExecutionEngine/Interpreter/Interpreter.h	(revision 165037)
+++ lib/ExecutionEngine/Interpreter/Interpreter.h	(working copy)
@@ -17,7 +17,7 @@
 #include "llvm/Function.h"
 #include "llvm/ExecutionEngine/ExecutionEngine.h"
 #include "llvm/ExecutionEngine/GenericValue.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/CallSite.h"
 #include "llvm/Support/DataTypes.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -82,7 +82,7 @@
 //
 class Interpreter : public ExecutionEngine, public InstVisitor {
   GenericValue ExitValue;          // The return value of the called function
-  TargetData TD;
+  DataLayout TD;
   IntrinsicLowering *IL;
 
   // The runtime stack of executing code.  The top of the stack is the current
Index: lib/ExecutionEngine/JIT/JIT.cpp
===================================================================
--- lib/ExecutionEngine/JIT/JIT.cpp	(revision 165037)
+++ lib/ExecutionEngine/JIT/JIT.cpp	(working copy)
@@ -24,7 +24,7 @@
 #include "llvm/ExecutionEngine/GenericValue.h"
 #include "llvm/ExecutionEngine/JITEventListener.h"
 #include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetJITInfo.h"
 #include "llvm/Support/Dwarf.h"
@@ -272,7 +272,7 @@
   : ExecutionEngine(M), TM(tm), TJI(tji),
     JMM(jmm ? jmm : JITMemoryManager::CreateDefaultMemManager()),
     AllocateGVsWithCode(GVsWithCode), isAlreadyCodeGenerating(false) {
-  setTargetData(TM.getTargetData());
+  setDataLayout(TM.getDataLayout());
 
   jitstate = new JITState(M);
 
@@ -285,7 +285,7 @@
   // Add target data
   MutexGuard locked(lock);
   FunctionPassManager &PM = jitstate->getPM(locked);
-  PM.add(new TargetData(*TM.getTargetData()));
+  PM.add(new DataLayout(*TM.getDataLayout()));
 
   // Turn the machine code intermediate representation into bytes in memory that
   // may be executed.
@@ -339,7 +339,7 @@
     jitstate = new JITState(M);
 
     FunctionPassManager &PM = jitstate->getPM(locked);
-    PM.add(new TargetData(*TM.getTargetData()));
+    PM.add(new DataLayout(*TM.getDataLayout()));
 
     // Turn the machine code intermediate representation into bytes in memory
     // that may be executed.
@@ -370,7 +370,7 @@
     jitstate = new JITState(Modules[0]);
 
     FunctionPassManager &PM = jitstate->getPM(locked);
-    PM.add(new TargetData(*TM.getTargetData()));
+    PM.add(new DataLayout(*TM.getDataLayout()));
 
     // Turn the machine code intermediate representation into bytes in memory
     // that may be executed.
@@ -815,8 +815,8 @@
   // through the memory manager which puts them near the code but not in the
   // same buffer.
   Type *GlobalType = GV->getType()->getElementType();
-  size_t S = getTargetData()->getTypeAllocSize(GlobalType);
-  size_t A = getTargetData()->getPreferredAlignment(GV);
+  size_t S = getDataLayout()->getTypeAllocSize(GlobalType);
+  size_t A = getDataLayout()->getPreferredAlignment(GV);
   if (GV->isThreadLocal()) {
     MutexGuard locked(lock);
     Ptr = TJI.allocateThreadLocalMemory(S);
Index: lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
===================================================================
--- lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp	(revision 165037)
+++ lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp	(working copy)
@@ -24,7 +24,7 @@
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetMachine.h"
@@ -42,7 +42,7 @@
   assert(MMI && "MachineModuleInfo not registered!");
 
   const TargetMachine& TM = F.getTarget();
-  TD = TM.getTargetData();
+  TD = TM.getDataLayout();
   stackGrowthDirection = TM.getFrameLowering()->getStackGrowthDirection();
   RI = TM.getRegisterInfo();
   MAI = TM.getMCAsmInfo();
Index: lib/ExecutionEngine/JIT/JITDwarfEmitter.h
===================================================================
--- lib/ExecutionEngine/JIT/JITDwarfEmitter.h	(revision 165037)
+++ lib/ExecutionEngine/JIT/JITDwarfEmitter.h	(working copy)
@@ -23,12 +23,12 @@
 class MachineModuleInfo;
 class MachineMove;
 class MCAsmInfo;
-class TargetData;
+class DataLayout;
 class TargetMachine;
 class TargetRegisterInfo;
 
 class JITDwarfEmitter {
-  const TargetData* TD;
+  const DataLayout* TD;
   JITCodeEmitter* JCE;
   const TargetRegisterInfo* RI;
   const MCAsmInfo *MAI;
Index: lib/ExecutionEngine/JIT/JITEmitter.cpp
===================================================================
--- lib/ExecutionEngine/JIT/JITEmitter.cpp	(revision 165037)
+++ lib/ExecutionEngine/JIT/JITEmitter.cpp	(working copy)
@@ -30,7 +30,7 @@
 #include "llvm/ExecutionEngine/GenericValue.h"
 #include "llvm/ExecutionEngine/JITEventListener.h"
 #include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetJITInfo.h"
 #include "llvm/Target/TargetMachine.h"
@@ -763,7 +763,7 @@
 }
 
 static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
-                                           const TargetData *TD) {
+                                           const DataLayout *TD) {
   const std::vector &Constants = MCP->getConstants();
   if (Constants.empty()) return 0;
 
@@ -1058,7 +1058,7 @@
   const std::vector &Constants = MCP->getConstants();
   if (Constants.empty()) return;
 
-  unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
+  unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getDataLayout());
   unsigned Align = MCP->getConstantPoolAlignment();
   ConstantPoolBase = allocateSpace(Size, Align);
   ConstantPool = MCP;
@@ -1087,7 +1087,7 @@
           dbgs().write_hex(CAddr) << "]\n");
 
     Type *Ty = CPE.Val.ConstVal->getType();
-    Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty);
+    Offset += TheJIT->getDataLayout()->getTypeAllocSize(Ty);
   }
 }
 
@@ -1104,14 +1104,14 @@
   for (unsigned i = 0, e = JT.size(); i != e; ++i)
     NumEntries += JT[i].MBBs.size();
 
-  unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getTargetData());
+  unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getDataLayout());
 
   // Just allocate space for all the jump tables now.  We will fix up the actual
   // MBB entries in the tables after we emit the code for each block, since then
   // we will know the final locations of the MBBs in memory.
   JumpTable = MJTI;
   JumpTableBase = allocateSpace(NumEntries * EntrySize,
-                             MJTI->getEntryAlignment(*TheJIT->getTargetData()));
+                             MJTI->getEntryAlignment(*TheJIT->getDataLayout()));
 }
 
 void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
@@ -1128,7 +1128,7 @@
   case MachineJumpTableInfo::EK_BlockAddress: {
     // EK_BlockAddress - Each entry is a plain address of block, e.g.:
     //     .word LBB123
-    assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == sizeof(void*) &&
+    assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == sizeof(void*) &&
            "Cross JIT'ing?");
 
     // For each jump table, map each target in the jump table to the address of
@@ -1148,7 +1148,7 @@
   case MachineJumpTableInfo::EK_Custom32:
   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
   case MachineJumpTableInfo::EK_LabelDifference32: {
-    assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == 4&&"Cross JIT'ing?");
+    assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == 4&&"Cross JIT'ing?");
     // For each jump table, place the offset from the beginning of the table
     // to the target address.
     int *SlotPtr = (int*)JumpTableBase;
@@ -1224,7 +1224,7 @@
   const std::vector &JT = JumpTable->getJumpTables();
   assert(Index < JT.size() && "Invalid jump table index!");
 
-  unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getTargetData());
+  unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getDataLayout());
 
   unsigned Offset = 0;
   for (unsigned i = 0; i < Index; ++i)
Index: lib/ExecutionEngine/MCJIT/MCJIT.cpp
===================================================================
--- lib/ExecutionEngine/MCJIT/MCJIT.cpp	(revision 165037)
+++ lib/ExecutionEngine/MCJIT/MCJIT.cpp	(working copy)
@@ -19,7 +19,7 @@
 #include "llvm/Support/DynamicLibrary.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/MutexGuard.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 using namespace llvm;
 
@@ -52,7 +52,7 @@
   : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM),
     isCompiled(false), M(m), OS(Buffer)  {
 
-  setTargetData(TM->getTargetData());
+  setDataLayout(TM->getDataLayout());
 }
 
 MCJIT::~MCJIT() {
@@ -78,7 +78,7 @@
 
   PassManager PM;
 
-  PM.add(new TargetData(*TM->getTargetData()));
+  PM.add(new DataLayout(*TM->getDataLayout()));
 
   // Turn the machine code intermediate representation into bytes in memory
   // that may be executed.
Index: lib/Target/ARM/ARMAsmPrinter.cpp
===================================================================
--- lib/Target/ARM/ARMAsmPrinter.cpp	(revision 165037)
+++ lib/Target/ARM/ARMAsmPrinter.cpp	(working copy)
@@ -40,7 +40,7 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/Support/CommandLine.h"
@@ -302,7 +302,7 @@
 }
 
 void ARMAsmPrinter::EmitXXStructor(const Constant *CV) {
-  uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
+  uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
   assert(Size && "C++ constructor pointer had zero size!");
 
   const GlobalValue *GV = dyn_cast(CV->stripPointerCasts());
@@ -893,7 +893,7 @@
 
 void ARMAsmPrinter::
 EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
-  int Size = TM.getTargetData()->getTypeAllocSize(MCPV->getType());
+  int Size = TM.getDataLayout()->getTypeAllocSize(MCPV->getType());
 
   ARMConstantPoolValue *ACPV = static_cast(MCPV);
 
Index: lib/Target/ARM/ARMCodeEmitter.cpp
===================================================================
--- lib/Target/ARM/ARMCodeEmitter.cpp	(revision 165037)
+++ lib/Target/ARM/ARMCodeEmitter.cpp	(working copy)
@@ -47,7 +47,7 @@
   class ARMCodeEmitter : public MachineFunctionPass {
     ARMJITInfo                *JTI;
     const ARMBaseInstrInfo    *II;
-    const TargetData          *TD;
+    const DataLayout          *TD;
     const ARMSubtarget        *Subtarget;
     TargetMachine             &TM;
     JITCodeEmitter            &MCE;
@@ -67,7 +67,7 @@
     ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
       : MachineFunctionPass(ID), JTI(0),
         II((const ARMBaseInstrInfo *)tm.getInstrInfo()),
-        TD(tm.getTargetData()), TM(tm),
+        TD(tm.getDataLayout()), TM(tm),
         MCE(mce), MCPEs(0), MJTEs(0),
         IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {}
 
@@ -376,7 +376,7 @@
          "JIT relocation model must be set to static or default!");
   JTI = ((ARMBaseTargetMachine &)MF.getTarget()).getJITInfo();
   II = (const ARMBaseInstrInfo *)MF.getTarget().getInstrInfo();
-  TD = MF.getTarget().getTargetData();
+  TD = MF.getTarget().getDataLayout();
   Subtarget = &TM.getSubtarget();
   MCPEs = &MF.getConstantPool()->getConstants();
   MJTEs = 0;
Index: lib/Target/ARM/ARMConstantIslandPass.cpp
===================================================================
--- lib/Target/ARM/ARMConstantIslandPass.cpp	(revision 165037)
+++ lib/Target/ARM/ARMConstantIslandPass.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/MachineJumpTableInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -528,7 +528,7 @@
   // identity mapping of CPI's to CPE's.
   const std::vector &CPs = MCP->getConstants();
 
-  const TargetData &TD = *MF->getTarget().getTargetData();
+  const DataLayout &TD = *MF->getTarget().getDataLayout();
   for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
     unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
     assert(Size >= 4 && "Too small constant pool entry");
Index: lib/Target/ARM/ARMELFWriterInfo.cpp
===================================================================
--- lib/Target/ARM/ARMELFWriterInfo.cpp	(revision 165037)
+++ lib/Target/ARM/ARMELFWriterInfo.cpp	(working copy)
@@ -15,7 +15,7 @@
 #include "ARMRelocations.h"
 #include "llvm/Function.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/ELF.h"
 
@@ -26,8 +26,8 @@
 //===----------------------------------------------------------------------===//
 
 ARMELFWriterInfo::ARMELFWriterInfo(TargetMachine &TM)
-  : TargetELFWriterInfo(TM.getTargetData()->getPointerSizeInBits() == 64,
-                        TM.getTargetData()->isLittleEndian()) {
+  : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits() == 64,
+                        TM.getDataLayout()->isLittleEndian()) {
 }
 
 ARMELFWriterInfo::~ARMELFWriterInfo() {}
Index: lib/Target/ARM/ARMFastISel.cpp
===================================================================
--- lib/Target/ARM/ARMFastISel.cpp	(revision 165037)
+++ lib/Target/ARM/ARMFastISel.cpp	(working copy)
@@ -40,7 +40,7 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetMachine.h"
Index: lib/Target/ARM/ARMInstrInfo.cpp
===================================================================
--- lib/Target/ARM/ARMInstrInfo.cpp	(revision 165037)
+++ lib/Target/ARM/ARMInstrInfo.cpp	(working copy)
@@ -112,7 +112,7 @@
                                            "_GLOBAL_OFFSET_TABLE_");
       unsigned Id = AFI->createPICLabelUId();
       ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id);
-      unsigned Align = TM->getTargetData()->getPrefTypeAlignment(GV->getType());
+      unsigned Align = TM->getDataLayout()->getPrefTypeAlignment(GV->getType());
       unsigned Idx = MF.getConstantPool()->getConstantPoolIndex(CPV, Align);
 
       MachineBasicBlock &FirstMBB = MF.front();
Index: lib/Target/ARM/ARMISelLowering.cpp
===================================================================
--- lib/Target/ARM/ARMISelLowering.cpp	(revision 165037)
+++ lib/Target/ARM/ARMISelLowering.cpp	(working copy)
@@ -6064,9 +6064,9 @@
       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
 
       // MachineConstantPool wants an explicit alignment.
-      unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+      unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
       if (Align == 0)
-        Align = getTargetData()->getTypeAllocSize(C->getType());
+        Align = getDataLayout()->getTypeAllocSize(C->getType());
       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
 
       unsigned VReg1 = MRI->createVirtualRegister(TRC);
@@ -6153,9 +6153,9 @@
       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
 
       // MachineConstantPool wants an explicit alignment.
-      unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+      unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
       if (Align == 0)
-        Align = getTargetData()->getTypeAllocSize(C->getType());
+        Align = getDataLayout()->getTypeAllocSize(C->getType());
       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
 
       unsigned VReg1 = MRI->createVirtualRegister(TRC);
@@ -6474,9 +6474,9 @@
     const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
 
     // MachineConstantPool wants an explicit alignment.
-    unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty);
+    unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
     if (Align == 0)
-      Align = getTargetData()->getTypeAllocSize(C->getType());
+      Align = getDataLayout()->getTypeAllocSize(C->getType());
     unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
 
     AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp))
@@ -9854,7 +9854,7 @@
   case Intrinsic::arm_neon_vld4lane: {
     Info.opc = ISD::INTRINSIC_W_CHAIN;
     // Conservatively set memVT to the entire set of vectors loaded.
-    uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8;
+    uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
     Info.ptrVal = I.getArgOperand(0);
     Info.offset = 0;
@@ -9879,7 +9879,7 @@
       Type *ArgTy = I.getArgOperand(ArgI)->getType();
       if (!ArgTy->isVectorTy())
         break;
-      NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8;
+      NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
     }
     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
     Info.ptrVal = I.getArgOperand(0);
Index: lib/Target/ARM/ARMLoadStoreOptimizer.cpp
===================================================================
--- lib/Target/ARM/ARMLoadStoreOptimizer.cpp	(revision 165037)
+++ lib/Target/ARM/ARMLoadStoreOptimizer.cpp	(working copy)
@@ -27,7 +27,7 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/RegisterScavenging.h"
 #include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -1448,7 +1448,7 @@
     static char ID;
     ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
 
-    const TargetData *TD;
+    const DataLayout *TD;
     const TargetInstrInfo *TII;
     const TargetRegisterInfo *TRI;
     const ARMSubtarget *STI;
@@ -1478,7 +1478,7 @@
 }
 
 bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
-  TD  = Fn.getTarget().getTargetData();
+  TD  = Fn.getTarget().getDataLayout();
   TII = Fn.getTarget().getInstrInfo();
   TRI = Fn.getTarget().getRegisterInfo();
   STI = &Fn.getTarget().getSubtarget();
Index: lib/Target/ARM/ARMSelectionDAGInfo.cpp
===================================================================
--- lib/Target/ARM/ARMSelectionDAGInfo.cpp	(revision 165037)
+++ lib/Target/ARM/ARMSelectionDAGInfo.cpp	(working copy)
@@ -155,7 +155,7 @@
   TargetLowering::ArgListEntry Entry;
 
   // First argument: data pointer
-  Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
+  Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
   Entry.Node = Dst;
   Entry.Ty = IntPtrTy;
   Args.push_back(Entry);
Index: lib/Target/ARM/ARMTargetMachine.cpp
===================================================================
--- lib/Target/ARM/ARMTargetMachine.cpp	(revision 165037)
+++ lib/Target/ARM/ARMTargetMachine.cpp	(working copy)
@@ -60,7 +60,7 @@
                                    CodeGenOpt::Level OL)
   : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
     InstrInfo(Subtarget),
-    DataLayout(Subtarget.isAPCS_ABI() ?
+    DL(Subtarget.isAPCS_ABI() ?
                std::string("e-p:32:32-f64:32:64-i64:32:64-"
                            "v128:32:128-v64:32:64-n32-S32") :
                Subtarget.isAAPCS_ABI() ?
@@ -88,7 +88,7 @@
     InstrInfo(Subtarget.hasThumb2()
               ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget))
               : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
-    DataLayout(Subtarget.isAPCS_ABI() ?
+    DL(Subtarget.isAPCS_ABI() ?
                std::string("e-p:32:32-f64:32:64-i64:32:64-"
                            "i16:16:32-i8:8:32-i1:8:32-"
                            "v128:32:128-v64:32:64-a:0:32-n32-S32") :
Index: lib/Target/ARM/ARMTargetMachine.h
===================================================================
--- lib/Target/ARM/ARMTargetMachine.h	(revision 165037)
+++ lib/Target/ARM/ARMTargetMachine.h	(working copy)
@@ -25,7 +25,7 @@
 #include "Thumb1FrameLowering.h"
 #include "Thumb2InstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/ADT/OwningPtr.h"
 
@@ -62,7 +62,7 @@
 class ARMTargetMachine : public ARMBaseTargetMachine {
   virtual void anchor();
   ARMInstrInfo        InstrInfo;
-  const TargetData    DataLayout;       // Calculates type size & alignment
+  const DataLayout    DL;       // Calculates type size & alignment
   ARMELFWriterInfo    ELFWriterInfo;
   ARMTargetLowering   TLInfo;
   ARMSelectionDAGInfo TSInfo;
@@ -90,7 +90,7 @@
   }
 
   virtual const ARMInstrInfo     *getInstrInfo() const { return &InstrInfo; }
-  virtual const TargetData       *getTargetData() const { return &DataLayout; }
+  virtual const DataLayout       *getDataLayout() const { return &DL; }
   virtual const ARMELFWriterInfo *getELFWriterInfo() const {
     return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
   }
@@ -104,7 +104,7 @@
   virtual void anchor();
   // Either Thumb1InstrInfo or Thumb2InstrInfo.
   OwningPtr InstrInfo;
-  const TargetData    DataLayout;   // Calculates type size & alignment
+  const DataLayout    DL;   // Calculates type size & alignment
   ARMELFWriterInfo    ELFWriterInfo;
   ARMTargetLowering   TLInfo;
   ARMSelectionDAGInfo TSInfo;
@@ -138,7 +138,7 @@
   virtual const ARMFrameLowering *getFrameLowering() const {
     return FrameLowering.get();
   }
-  virtual const TargetData       *getTargetData() const { return &DataLayout; }
+  virtual const DataLayout       *getDataLayout() const { return &DL; }
   virtual const ARMELFWriterInfo *getELFWriterInfo() const {
     return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
   }
Index: lib/Target/CellSPU/SPUFrameLowering.cpp
===================================================================
--- lib/Target/CellSPU/SPUFrameLowering.cpp	(revision 165037)
+++ lib/Target/CellSPU/SPUFrameLowering.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/CommandLine.h"
 using namespace llvm;
Index: lib/Target/CellSPU/SPUSubtarget.h
===================================================================
--- lib/Target/CellSPU/SPUSubtarget.h	(revision 165037)
+++ lib/Target/CellSPU/SPUSubtarget.h	(working copy)
@@ -80,9 +80,9 @@
       return UseLargeMem;
     }
 
-    /// getTargetDataString - Return the pointer size and type alignment
+    /// getDataLayoutString - Return the pointer size and type alignment
     /// properties of this subtarget.
-    const char *getTargetDataString() const {
+    const char *getDataLayoutString() const {
       return "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128"
              "-i16:16:128-i8:8:128-i1:8:128-a:0:128-v64:64:128-v128:128:128"
              "-s:128:128-n32:64";
Index: lib/Target/CellSPU/SPUTargetMachine.cpp
===================================================================
--- lib/Target/CellSPU/SPUTargetMachine.cpp	(revision 165037)
+++ lib/Target/CellSPU/SPUTargetMachine.cpp	(working copy)
@@ -38,7 +38,7 @@
                                    CodeGenOpt::Level OL)
   : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
     Subtarget(TT, CPU, FS),
-    DataLayout(Subtarget.getTargetDataString()),
+    DL(Subtarget.getDataLayoutString()),
     InstrInfo(*this),
     FrameLowering(Subtarget),
     TLInfo(*this),
Index: lib/Target/CellSPU/SPUTargetMachine.h
===================================================================
--- lib/Target/CellSPU/SPUTargetMachine.h	(revision 165037)
+++ lib/Target/CellSPU/SPUTargetMachine.h	(working copy)
@@ -20,7 +20,7 @@
 #include "SPUSelectionDAGInfo.h"
 #include "SPUFrameLowering.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 namespace llvm {
 
@@ -28,7 +28,7 @@
 ///
 class SPUTargetMachine : public LLVMTargetMachine {
   SPUSubtarget        Subtarget;
-  const TargetData    DataLayout;
+  const DataLayout    DL;
   SPUInstrInfo        InstrInfo;
   SPUFrameLowering    FrameLowering;
   SPUTargetLowering   TLInfo;
@@ -70,8 +70,8 @@
     return &InstrInfo.getRegisterInfo();
   }
 
-  virtual const TargetData *getTargetData() const {
-    return &DataLayout;
+  virtual const DataLayout *getDataLayout() const {
+    return &DL;
   }
 
   virtual const InstrItineraryData *getInstrItineraryData() const {
Index: lib/Target/CMakeLists.txt
===================================================================
--- lib/Target/CMakeLists.txt	(revision 165037)
+++ lib/Target/CMakeLists.txt	(working copy)
@@ -1,7 +1,6 @@
 add_llvm_library(LLVMTarget
   Mangler.cpp
   Target.cpp
-  TargetData.cpp
   TargetELFWriterInfo.cpp
   TargetInstrInfo.cpp
   TargetIntrinsicInfo.cpp
Index: lib/Target/CppBackend/CPPTargetMachine.h
===================================================================
--- lib/Target/CppBackend/CPPTargetMachine.h	(revision 165037)
+++ lib/Target/CppBackend/CPPTargetMachine.h	(working copy)
@@ -15,7 +15,7 @@
 #define CPPTARGETMACHINE_H
 
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 namespace llvm {
 
@@ -35,7 +35,7 @@
                                    AnalysisID StartAfter,
                                    AnalysisID StopAfter);
 
-  virtual const TargetData *getTargetData() const { return 0; }
+  virtual const DataLayout *getDataLayout() const { return 0; }
 };
 
 extern Target TheCppBackendTarget;
Index: lib/Target/Hexagon/HexagonAsmPrinter.cpp
===================================================================
--- lib/Target/Hexagon/HexagonAsmPrinter.cpp	(revision 165037)
+++ lib/Target/Hexagon/HexagonAsmPrinter.cpp	(working copy)
@@ -46,7 +46,7 @@
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
Index: lib/Target/Hexagon/HexagonCallingConvLower.cpp
===================================================================
--- lib/Target/Hexagon/HexagonCallingConvLower.cpp	(revision 165037)
+++ lib/Target/Hexagon/HexagonCallingConvLower.cpp	(working copy)
@@ -16,7 +16,7 @@
 #include "HexagonCallingConvLower.h"
 #include "Hexagon.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
Index: lib/Target/Hexagon/HexagonTargetMachine.cpp
===================================================================
--- lib/Target/Hexagon/HexagonTargetMachine.cpp	(revision 165037)
+++ lib/Target/Hexagon/HexagonTargetMachine.cpp	(working copy)
@@ -68,7 +68,7 @@
                                            CodeModel::Model CM,
                                            CodeGenOpt::Level OL)
   : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
-    DataLayout("e-p:32:32:32-"
+    DL("e-p:32:32:32-"
                 "i64:64:64-i32:32:32-i16:16:16-i1:32:32-"
                 "f64:64:64-f32:32:32-a0:0-n32") ,
     Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this),
Index: lib/Target/Hexagon/HexagonTargetMachine.h
===================================================================
--- lib/Target/Hexagon/HexagonTargetMachine.h	(revision 165037)
+++ lib/Target/Hexagon/HexagonTargetMachine.h	(working copy)
@@ -20,14 +20,14 @@
 #include "HexagonSelectionDAGInfo.h"
 #include "HexagonFrameLowering.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 namespace llvm {
 
 class Module;
 
 class HexagonTargetMachine : public LLVMTargetMachine {
-  const TargetData DataLayout;       // Calculates type size & alignment.
+  const DataLayout DL;       // Calculates type size & alignment.
   HexagonSubtarget Subtarget;
   HexagonInstrInfo InstrInfo;
   HexagonTargetLowering TLInfo;
@@ -68,7 +68,7 @@
     return &TSInfo;
   }
 
-  virtual const TargetData       *getTargetData() const { return &DataLayout; }
+  virtual const DataLayout       *getDataLayout() const { return &DL; }
   static unsigned getModuleMatchQuality(const Module &M);
 
   // Pass Pipeline Configuration.
Index: lib/Target/Hexagon/HexagonTargetObjectFile.cpp
===================================================================
--- lib/Target/Hexagon/HexagonTargetObjectFile.cpp	(revision 165037)
+++ lib/Target/Hexagon/HexagonTargetObjectFile.cpp	(working copy)
@@ -16,7 +16,7 @@
 #include "HexagonTargetMachine.h"
 #include "llvm/Function.h"
 #include "llvm/GlobalVariable.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/Support/ELF.h"
@@ -73,7 +73,7 @@
 
   if (Kind.isBSS() || Kind.isDataNoRel() || Kind.isCommon()) {
     Type *Ty = GV->getType()->getElementType();
-    return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
+    return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
   }
 
   return false;
Index: lib/Target/Hexagon/HexagonVarargsCallingConvention.h
===================================================================
--- lib/Target/Hexagon/HexagonVarargsCallingConvention.h	(revision 165037)
+++ lib/Target/Hexagon/HexagonVarargsCallingConvention.h	(working copy)
@@ -75,9 +75,9 @@
 
   const Type* ArgTy = LocVT.getTypeForEVT(State.getContext());
   unsigned Alignment =
-    State.getTarget().getTargetData()->getABITypeAlignment(ArgTy);
+    State.getTarget().getDataLayout()->getABITypeAlignment(ArgTy);
   unsigned Size =
-    State.getTarget().getTargetData()->getTypeSizeInBits(ArgTy) / 8;
+    State.getTarget().getDataLayout()->getTypeSizeInBits(ArgTy) / 8;
 
   // If it's passed by value, then we need the size of the aggregate not of
   // the pointer.
@@ -130,9 +130,9 @@
 
   const Type* ArgTy = LocVT.getTypeForEVT(State.getContext());
   unsigned Alignment =
-    State.getTarget().getTargetData()->getABITypeAlignment(ArgTy);
+    State.getTarget().getDataLayout()->getABITypeAlignment(ArgTy);
   unsigned Size =
-    State.getTarget().getTargetData()->getTypeSizeInBits(ArgTy) / 8;
+    State.getTarget().getDataLayout()->getTypeSizeInBits(ArgTy) / 8;
 
   unsigned Offset3 = State.AllocateStack(Size, Alignment);
   State.addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset3,
Index: lib/Target/Mangler.cpp
===================================================================
--- lib/Target/Mangler.cpp	(revision 165037)
+++ lib/Target/Mangler.cpp	(working copy)
@@ -14,10 +14,10 @@
 #include "llvm/Target/Mangler.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/Function.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/Twine.h"
 using namespace llvm;
@@ -157,7 +157,7 @@
 /// a suffix on their name indicating the number of words of arguments they
 /// take.
 static void AddFastCallStdCallSuffix(SmallVectorImpl &OutName,
-                                     const Function *F, const TargetData &TD) {
+                                     const Function *F, const DataLayout &TD) {
   // Calculate arguments size total.
   unsigned ArgWords = 0;
   for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
Index: lib/Target/MBlaze/MBlazeAsmPrinter.cpp
===================================================================
--- lib/Target/MBlaze/MBlazeAsmPrinter.cpp	(revision 165037)
+++ lib/Target/MBlaze/MBlazeAsmPrinter.cpp	(working copy)
@@ -34,7 +34,7 @@
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
Index: lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
===================================================================
--- lib/Target/MBlaze/MBlazeELFWriterInfo.cpp	(revision 165037)
+++ lib/Target/MBlaze/MBlazeELFWriterInfo.cpp	(working copy)
@@ -16,7 +16,7 @@
 #include "llvm/Function.h"
 #include "llvm/Support/ELF.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 
 using namespace llvm;
@@ -26,8 +26,8 @@
 //===----------------------------------------------------------------------===//
 
 MBlazeELFWriterInfo::MBlazeELFWriterInfo(TargetMachine &TM)
-  : TargetELFWriterInfo(TM.getTargetData()->getPointerSizeInBits() == 64,
-                        TM.getTargetData()->isLittleEndian()) {
+  : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits() == 64,
+                        TM.getDataLayout()->isLittleEndian()) {
 }
 
 MBlazeELFWriterInfo::~MBlazeELFWriterInfo() {}
Index: lib/Target/MBlaze/MBlazeFrameLowering.cpp
===================================================================
--- lib/Target/MBlaze/MBlazeFrameLowering.cpp	(revision 165037)
+++ lib/Target/MBlaze/MBlazeFrameLowering.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
Index: lib/Target/MBlaze/MBlazeTargetMachine.cpp
===================================================================
--- lib/Target/MBlaze/MBlazeTargetMachine.cpp	(revision 165037)
+++ lib/Target/MBlaze/MBlazeTargetMachine.cpp	(working copy)
@@ -38,7 +38,7 @@
                     CodeGenOpt::Level OL)
   : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
     Subtarget(TT, CPU, FS),
-    DataLayout("E-p:32:32:32-i8:8:8-i16:16:16"),
+    DL("E-p:32:32:32-i8:8:8-i16:16:16"),
     InstrInfo(*this),
     FrameLowering(Subtarget),
     TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this),
Index: lib/Target/MBlaze/MBlazeTargetMachine.h
===================================================================
--- lib/Target/MBlaze/MBlazeTargetMachine.h	(revision 165037)
+++ lib/Target/MBlaze/MBlazeTargetMachine.h	(working copy)
@@ -23,7 +23,7 @@
 #include "MBlazeELFWriterInfo.h"
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 
 namespace llvm {
@@ -31,7 +31,7 @@
 
   class MBlazeTargetMachine : public LLVMTargetMachine {
     MBlazeSubtarget        Subtarget;
-    const TargetData       DataLayout; // Calculates type size & alignment
+    const DataLayout       DL; // Calculates type size & alignment
     MBlazeInstrInfo        InstrInfo;
     MBlazeFrameLowering    FrameLowering;
     MBlazeTargetLowering   TLInfo;
@@ -59,8 +59,8 @@
     virtual const MBlazeSubtarget *getSubtargetImpl() const
     { return &Subtarget; }
 
-    virtual const TargetData *getTargetData() const
-    { return &DataLayout;}
+    virtual const DataLayout *getDataLayout() const
+    { return &DL;}
 
     virtual const MBlazeRegisterInfo *getRegisterInfo() const
     { return &InstrInfo.getRegisterInfo(); }
Index: lib/Target/MBlaze/MBlazeTargetObjectFile.cpp
===================================================================
--- lib/Target/MBlaze/MBlazeTargetObjectFile.cpp	(revision 165037)
+++ lib/Target/MBlaze/MBlazeTargetObjectFile.cpp	(working copy)
@@ -13,7 +13,7 @@
 #include "llvm/GlobalVariable.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/MC/MCSectionELF.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ELF.h"
@@ -70,7 +70,7 @@
     return false;
 
   Type *Ty = GV->getType()->getElementType();
-  return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
+  return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
 }
 
 const MCSection *MBlazeTargetObjectFile::
Index: lib/Target/Mips/Mips16FrameLowering.cpp
===================================================================
--- lib/Target/Mips/Mips16FrameLowering.cpp	(revision 165037)
+++ lib/Target/Mips/Mips16FrameLowering.cpp	(working copy)
@@ -20,7 +20,7 @@
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/CommandLine.h"
 
Index: lib/Target/Mips/MipsAsmPrinter.cpp
===================================================================
--- lib/Target/Mips/MipsAsmPrinter.cpp	(revision 165037)
+++ lib/Target/Mips/MipsAsmPrinter.cpp	(working copy)
@@ -38,7 +38,7 @@
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/Target/TargetOptions.h"
 
Index: lib/Target/Mips/MipsCodeEmitter.cpp
===================================================================
--- lib/Target/Mips/MipsCodeEmitter.cpp	(revision 165037)
+++ lib/Target/Mips/MipsCodeEmitter.cpp	(working copy)
@@ -47,7 +47,7 @@
 class MipsCodeEmitter : public MachineFunctionPass {
   MipsJITInfo *JTI;
   const MipsInstrInfo *II;
-  const TargetData *TD;
+  const DataLayout *TD;
   const MipsSubtarget *Subtarget;
   TargetMachine &TM;
   JITCodeEmitter &MCE;
@@ -66,7 +66,7 @@
     MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) :
       MachineFunctionPass(ID), JTI(0),
       II((const MipsInstrInfo *) tm.getInstrInfo()),
-      TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
+      TD(tm.getDataLayout()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0),
       IsPIC(TM.getRelocationModel() == Reloc::PIC_) {
     }
 
@@ -128,7 +128,7 @@
 bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
   JTI = ((MipsTargetMachine&) MF.getTarget()).getJITInfo();
   II = ((const MipsTargetMachine&) MF.getTarget()).getInstrInfo();
-  TD = ((const MipsTargetMachine&) MF.getTarget()).getTargetData();
+  TD = ((const MipsTargetMachine&) MF.getTarget()).getDataLayout();
   Subtarget = &TM.getSubtarget ();
   MCPEs = &MF.getConstantPool()->getConstants();
   MJTEs = 0;
Index: lib/Target/Mips/MipsELFWriterInfo.cpp
===================================================================
--- lib/Target/Mips/MipsELFWriterInfo.cpp	(revision 165037)
+++ lib/Target/Mips/MipsELFWriterInfo.cpp	(working copy)
@@ -15,7 +15,7 @@
 #include "MipsRelocations.h"
 #include "llvm/Function.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/ELF.h"
 
Index: lib/Target/Mips/MipsFrameLowering.cpp
===================================================================
--- lib/Target/Mips/MipsFrameLowering.cpp	(revision 165037)
+++ lib/Target/Mips/MipsFrameLowering.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/CommandLine.h"
 
Index: lib/Target/Mips/MipsSEFrameLowering.cpp
===================================================================
--- lib/Target/Mips/MipsSEFrameLowering.cpp	(revision 165037)
+++ lib/Target/Mips/MipsSEFrameLowering.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/CommandLine.h"
 
Index: lib/Target/Mips/MipsTargetMachine.cpp
===================================================================
--- lib/Target/Mips/MipsTargetMachine.cpp	(revision 165037)
+++ lib/Target/Mips/MipsTargetMachine.cpp	(working copy)
@@ -43,7 +43,7 @@
                   bool isLittle)
   : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
     Subtarget(TT, CPU, FS, isLittle, RM),
-    DataLayout(isLittle ?
+    DL(isLittle ?
                (Subtarget.isABI_N64() ?
                 "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" :
                 "e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32") :
Index: lib/Target/Mips/MipsTargetMachine.h
===================================================================
--- lib/Target/Mips/MipsTargetMachine.h	(revision 165037)
+++ lib/Target/Mips/MipsTargetMachine.h	(working copy)
@@ -22,7 +22,7 @@
 #include "MipsSubtarget.h"
 #include "MipsELFWriterInfo.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 
 namespace llvm {
@@ -31,7 +31,7 @@
 
 class MipsTargetMachine : public LLVMTargetMachine {
   MipsSubtarget       Subtarget;
-  const TargetData    DataLayout; // Calculates type size & alignment
+  const DataLayout    DL; // Calculates type size & alignment
   const MipsInstrInfo *InstrInfo;
   const MipsFrameLowering *FrameLowering;
   MipsTargetLowering  TLInfo;
@@ -54,8 +54,8 @@
   { return FrameLowering; }
   virtual const MipsSubtarget *getSubtargetImpl() const
   { return &Subtarget; }
-  virtual const TargetData *getTargetData()    const
-  { return &DataLayout;}
+  virtual const DataLayout *getDataLayout()    const
+  { return &DL;}
   virtual MipsJITInfo *getJITInfo()
   { return &JITInfo; }
 
Index: lib/Target/Mips/MipsTargetObjectFile.cpp
===================================================================
--- lib/Target/Mips/MipsTargetObjectFile.cpp	(revision 165037)
+++ lib/Target/Mips/MipsTargetObjectFile.cpp	(working copy)
@@ -13,7 +13,7 @@
 #include "llvm/GlobalVariable.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/MC/MCSectionELF.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ELF.h"
@@ -82,7 +82,7 @@
     return false;
 
   Type *Ty = GV->getType()->getElementType();
-  return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty));
+  return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty));
 }
 
 
Index: lib/Target/MSP430/MSP430FrameLowering.cpp
===================================================================
--- lib/Target/MSP430/MSP430FrameLowering.cpp	(revision 165037)
+++ lib/Target/MSP430/MSP430FrameLowering.cpp	(working copy)
@@ -20,7 +20,7 @@
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/CommandLine.h"
 
Index: lib/Target/MSP430/MSP430ISelLowering.cpp
===================================================================
--- lib/Target/MSP430/MSP430ISelLowering.cpp	(revision 165037)
+++ lib/Target/MSP430/MSP430ISelLowering.cpp	(working copy)
@@ -61,7 +61,7 @@
   TargetLowering(tm, new TargetLoweringObjectFileELF()),
   Subtarget(*tm.getSubtargetImpl()) {
 
-  TD = getTargetData();
+  TD = getDataLayout();
 
   // Set up the register classes.
   addRegisterClass(MVT::i8,  &MSP430::GR8RegClass);
Index: lib/Target/MSP430/MSP430ISelLowering.h
===================================================================
--- lib/Target/MSP430/MSP430ISelLowering.h	(revision 165037)
+++ lib/Target/MSP430/MSP430ISelLowering.h	(working copy)
@@ -169,7 +169,7 @@
                                             SelectionDAG &DAG) const;
 
     const MSP430Subtarget &Subtarget;
-    const TargetData *TD;
+    const DataLayout *TD;
   };
 } // namespace llvm
 
Index: lib/Target/MSP430/MSP430TargetMachine.cpp
===================================================================
--- lib/Target/MSP430/MSP430TargetMachine.cpp	(revision 165037)
+++ lib/Target/MSP430/MSP430TargetMachine.cpp	(working copy)
@@ -33,8 +33,8 @@
                                          CodeGenOpt::Level OL)
   : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
     Subtarget(TT, CPU, FS),
-    // FIXME: Check TargetData string.
-    DataLayout("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
+    // FIXME: Check DataLayout string.
+    DL("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
     InstrInfo(*this), TLInfo(*this), TSInfo(*this),
     FrameLowering(Subtarget) { }
 
Index: lib/Target/MSP430/MSP430TargetMachine.h
===================================================================
--- lib/Target/MSP430/MSP430TargetMachine.h	(revision 165037)
+++ lib/Target/MSP430/MSP430TargetMachine.h	(working copy)
@@ -21,7 +21,7 @@
 #include "MSP430SelectionDAGInfo.h"
 #include "MSP430RegisterInfo.h"
 #include "MSP430Subtarget.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetMachine.h"
 
@@ -31,7 +31,7 @@
 ///
 class MSP430TargetMachine : public LLVMTargetMachine {
   MSP430Subtarget        Subtarget;
-  const TargetData       DataLayout;       // Calculates type size & alignment
+  const DataLayout       DL;       // Calculates type size & alignment
   MSP430InstrInfo        InstrInfo;
   MSP430TargetLowering   TLInfo;
   MSP430SelectionDAGInfo TSInfo;
@@ -47,7 +47,7 @@
     return &FrameLowering;
   }
   virtual const MSP430InstrInfo *getInstrInfo() const  { return &InstrInfo; }
-  virtual const TargetData *getTargetData() const     { return &DataLayout;}
+  virtual const DataLayout *getDataLayout() const     { return &DL;}
   virtual const MSP430Subtarget *getSubtargetImpl() const { return &Subtarget; }
 
   virtual const TargetRegisterInfo *getRegisterInfo() const {
Index: lib/Target/NVPTX/NVPTXAllocaHoisting.h
===================================================================
--- lib/Target/NVPTX/NVPTXAllocaHoisting.h	(revision 165037)
+++ lib/Target/NVPTX/NVPTXAllocaHoisting.h	(working copy)
@@ -16,7 +16,7 @@
 
 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
 #include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 namespace llvm {
 
@@ -31,7 +31,7 @@
   NVPTXAllocaHoisting() : FunctionPass(ID) {}
 
   void getAnalysisUsage(AnalysisUsage &AU) const {
-    AU.addRequired();
+    AU.addRequired();
     AU.addPreserved();
   }
 
Index: lib/Target/NVPTX/NVPTXAsmPrinter.cpp
===================================================================
--- lib/Target/NVPTX/NVPTXAsmPrinter.cpp	(revision 165037)
+++ lib/Target/NVPTX/NVPTXAsmPrinter.cpp	(working copy)
@@ -98,10 +98,10 @@
   switch (CE->getOpcode()) {
   default:
     // If the code isn't optimized, there may be outstanding folding
-    // opportunities. Attempt to fold the expression using TargetData as a
+    // opportunities. Attempt to fold the expression using DataLayout as a
     // last resort before giving up.
     if (Constant *C =
-        ConstantFoldConstantExpression(CE, AP.TM.getTargetData()))
+        ConstantFoldConstantExpression(CE, AP.TM.getDataLayout()))
       if (C != CE)
         return LowerConstant(C, AP);
 
@@ -115,7 +115,7 @@
         report_fatal_error(OS.str());
     }
   case Instruction::GetElementPtr: {
-    const TargetData &TD = *AP.TM.getTargetData();
+    const DataLayout &TD = *AP.TM.getDataLayout();
     // Generate a symbolic expression for the byte address
     const Constant *PtrVal = CE->getOperand(0);
     SmallVector IdxVec(CE->op_begin()+1, CE->op_end());
@@ -145,7 +145,7 @@
     return LowerConstant(CE->getOperand(0), AP);
 
   case Instruction::IntToPtr: {
-    const TargetData &TD = *AP.TM.getTargetData();
+    const DataLayout &TD = *AP.TM.getDataLayout();
     // Handle casts to pointers by changing them into casts to the appropriate
     // integer type.  This promotes constant folding and simplifies this code.
     Constant *Op = CE->getOperand(0);
@@ -155,7 +155,7 @@
   }
 
   case Instruction::PtrToInt: {
-    const TargetData &TD = *AP.TM.getTargetData();
+    const DataLayout &TD = *AP.TM.getDataLayout();
     // Support only foldable casts to/from pointers that can be eliminated by
     // changing the pointer to the appropriately sized integer type.
     Constant *Op = CE->getOperand(0);
@@ -270,7 +270,7 @@
 void NVPTXAsmPrinter::printReturnValStr(const Function *F,
                                         raw_ostream &O)
 {
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   const TargetLowering *TLI = TM.getTargetLowering();
 
   Type *Ty = F->getReturnType();
@@ -874,7 +874,7 @@
   const_cast(getObjFileLowering())
           .Initialize(OutContext, TM);
 
-  Mang = new Mangler(OutContext, *TM.getTargetData());
+  Mang = new Mangler(OutContext, *TM.getDataLayout());
 
   // Emit header before any dwarf directives are emitted below.
   emitHeader(M, OS1);
@@ -1023,7 +1023,7 @@
       return;
   }
 
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
 
   // GlobalVariables are always constant pointers themselves.
   const PointerType *PTy = GVar->getType();
@@ -1296,7 +1296,7 @@
 void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar,
                                             raw_ostream &O) {
 
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
 
   // GlobalVariables are always constant pointers themselves.
   const PointerType *PTy = GVar->getType();
@@ -1342,7 +1342,7 @@
 
 
 static unsigned int
-getOpenCLAlignment(const TargetData *TD,
+getOpenCLAlignment(const DataLayout *TD,
                    Type *Ty) {
   if (Ty->isPrimitiveType() || Ty->isIntegerTy() || isa(Ty))
     return TD->getPrefTypeAlignment(Ty);
@@ -1421,7 +1421,7 @@
 
 void NVPTXAsmPrinter::emitFunctionParamList(const Function *F,
                                             raw_ostream &O) {
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   const AttrListPtr &PAL = F->getAttributes();
   const TargetLowering *TLI = TM.getTargetLowering();
   Function::const_arg_iterator I, E;
@@ -1714,7 +1714,7 @@
 void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes,
                                    AggBuffer *aggBuffer) {
 
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
 
   if (isa(CPV) || CPV->isNullValue()) {
     int s = TD->getTypeAllocSize(CPV->getType());
@@ -1843,7 +1843,7 @@
 
 void NVPTXAsmPrinter::bufferAggregateConstant(Constant *CPV,
                                               AggBuffer *aggBuffer) {
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   int Bytes;
 
   // Old constants
Index: lib/Target/NVPTX/NVPTXISelLowering.cpp
===================================================================
--- lib/Target/NVPTX/NVPTXISelLowering.cpp	(revision 165037)
+++ lib/Target/NVPTX/NVPTXISelLowering.cpp	(working copy)
@@ -402,7 +402,7 @@
 
     if (isABI) {
       unsigned align = Outs[i].Flags.getByValAlign();
-      unsigned sz = getTargetData()->getTypeAllocSize(ETy);
+      unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
       O << ".param .align " << align
           << " .b8 ";
       O << "_";
@@ -655,11 +655,11 @@
       else {
         if (Func) { // direct call
           if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment))
-            retAlignment = getTargetData()->getABITypeAlignment(retTy);
+            retAlignment = getDataLayout()->getABITypeAlignment(retTy);
         } else { // indirect call
           const CallInst *CallI = dyn_cast(CS->getInstruction());
           if (!llvm::getAlign(*CallI, 0, retAlignment))
-            retAlignment = getTargetData()->getABITypeAlignment(retTy);
+            retAlignment = getDataLayout()->getABITypeAlignment(retTy);
         }
         SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
         SDValue DeclareRetOps[] = { Chain, DAG.getConstant(retAlignment,
@@ -916,7 +916,7 @@
                                           DebugLoc dl, SelectionDAG &DAG,
                                        SmallVectorImpl &InVals) const {
   MachineFunction &MF = DAG.getMachineFunction();
-  const TargetData *TD = getTargetData();
+  const DataLayout *TD = getDataLayout();
 
   const Function *F = MF.getFunction();
   const AttrListPtr &PAL = F->getAttributes();
Index: lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
===================================================================
--- lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp	(revision 165037)
+++ lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp	(working copy)
@@ -21,7 +21,7 @@
 #include "llvm/LLVMContext.h"
 #include "llvm/Module.h"
 #include "llvm/Support/InstIterator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 using namespace llvm;
 
@@ -110,7 +110,7 @@
   SmallVector aggrMemcpys;
   SmallVector aggrMemsets;
 
-  TargetData *TD = &getAnalysis();
+  DataLayout *TD = &getAnalysis();
   LLVMContext &Context = F.getParent()->getContext();
 
   //
Index: lib/Target/NVPTX/NVPTXLowerAggrCopies.h
===================================================================
--- lib/Target/NVPTX/NVPTXLowerAggrCopies.h	(revision 165037)
+++ lib/Target/NVPTX/NVPTXLowerAggrCopies.h	(working copy)
@@ -17,7 +17,7 @@
 
 #include "llvm/Pass.h"
 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 namespace llvm {
 
@@ -28,7 +28,7 @@
   NVPTXLowerAggrCopies() : FunctionPass(ID) {}
 
   void getAnalysisUsage(AnalysisUsage &AU) const {
-    AU.addRequired();
+    AU.addRequired();
     AU.addPreserved();
   }
 
Index: lib/Target/NVPTX/NVPTXTargetMachine.cpp
===================================================================
--- lib/Target/NVPTX/NVPTXTargetMachine.cpp	(revision 165037)
+++ lib/Target/NVPTX/NVPTXTargetMachine.cpp	(working copy)
@@ -32,7 +32,7 @@
 #include "llvm/MC/MCSubtargetInfo.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
@@ -71,7 +71,7 @@
                                        bool is64bit)
 : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
   Subtarget(TT, CPU, FS, is64bit),
-  DataLayout(Subtarget.getDataLayout()),
+  DL(Subtarget.getDataLayout()),
   InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit)
 /*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ {
 }
Index: lib/Target/NVPTX/NVPTXTargetMachine.h
===================================================================
--- lib/Target/NVPTX/NVPTXTargetMachine.h	(revision 165037)
+++ lib/Target/NVPTX/NVPTXTargetMachine.h	(working copy)
@@ -21,7 +21,7 @@
 #include "NVPTXSubtarget.h"
 #include "NVPTXFrameLowering.h"
 #include "ManagedStringPool.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetSelectionDAGInfo.h"
@@ -32,7 +32,7 @@
 ///
 class NVPTXTargetMachine : public LLVMTargetMachine {
   NVPTXSubtarget        Subtarget;
-  const TargetData      DataLayout;       // Calculates type size & alignment
+  const DataLayout      DL;       // Calculates type size & alignment
   NVPTXInstrInfo        InstrInfo;
   NVPTXTargetLowering   TLInfo;
   TargetSelectionDAGInfo   TSInfo;
@@ -58,7 +58,7 @@
     return &FrameLowering;
   }
   virtual const NVPTXInstrInfo *getInstrInfo() const  { return &InstrInfo; }
-  virtual const TargetData *getTargetData() const     { return &DataLayout;}
+  virtual const DataLayout *getDataLayout() const     { return &DL;}
   virtual const NVPTXSubtarget *getSubtargetImpl() const { return &Subtarget;}
 
   virtual const NVPTXRegisterInfo *getRegisterInfo() const {
Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
===================================================================
--- lib/Target/PowerPC/PPCAsmPrinter.cpp	(revision 165037)
+++ lib/Target/PowerPC/PPCAsmPrinter.cpp	(working copy)
@@ -437,7 +437,7 @@
 
 
 bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
 
   bool isPPC64 = TD->getPointerSizeInBits() == 64;
 
@@ -545,7 +545,7 @@
 
 void PPCDarwinAsmPrinter::
 EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
-  bool isPPC64 = TM.getTargetData()->getPointerSizeInBits() == 64;
+  bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
   
   const TargetLoweringObjectFileMachO &TLOFMacho = 
     static_cast(getObjFileLowering());
@@ -640,7 +640,7 @@
 
 
 bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
-  bool isPPC64 = TM.getTargetData()->getPointerSizeInBits() == 64;
+  bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
 
   // Darwin/PPC always uses mach-o.
   const TargetLoweringObjectFileMachO &TLOFMacho = 
Index: lib/Target/PowerPC/PPCISelLowering.cpp
===================================================================
--- lib/Target/PowerPC/PPCISelLowering.cpp	(revision 165037)
+++ lib/Target/PowerPC/PPCISelLowering.cpp	(working copy)
@@ -1493,7 +1493,7 @@
   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   bool isPPC64 = (PtrVT == MVT::i64);
   Type *IntPtrTy =
-    DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType(
+    DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
                                                              *DAG.getContext());
 
   TargetLowering::ArgListTy Args;
Index: lib/Target/PowerPC/PPCSubtarget.h
===================================================================
--- lib/Target/PowerPC/PPCSubtarget.h	(revision 165037)
+++ lib/Target/PowerPC/PPCSubtarget.h	(working copy)
@@ -108,9 +108,9 @@
   /// selection.
   const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
 
-  /// getTargetDataString - Return the pointer size and type alignment
+  /// getDataLayoutString - Return the pointer size and type alignment
   /// properties of this subtarget.
-  const char *getTargetDataString() const {
+  const char *getDataLayoutString() const {
     // Note, the alignment values for f64 and i64 on ppc64 in Darwin
     // documentation are wrong; these are correct (i.e. "what gcc does").
     return isPPC64() ? "E-p:64:64-f64:64:64-i64:64:64-f128:64:128-n32:64"
Index: lib/Target/PowerPC/PPCTargetMachine.cpp
===================================================================
--- lib/Target/PowerPC/PPCTargetMachine.cpp	(revision 165037)
+++ lib/Target/PowerPC/PPCTargetMachine.cpp	(working copy)
@@ -40,7 +40,7 @@
                                    bool is64Bit)
   : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
     Subtarget(TT, CPU, FS, is64Bit),
-    DataLayout(Subtarget.getTargetDataString()), InstrInfo(*this),
+    DL(Subtarget.getDataLayoutString()), InstrInfo(*this),
     FrameLowering(Subtarget), JITInfo(*this, is64Bit),
     TLInfo(*this), TSInfo(*this),
     InstrItins(Subtarget.getInstrItineraryData()) {
Index: lib/Target/PowerPC/PPCTargetMachine.h
===================================================================
--- lib/Target/PowerPC/PPCTargetMachine.h	(revision 165037)
+++ lib/Target/PowerPC/PPCTargetMachine.h	(working copy)
@@ -21,7 +21,7 @@
 #include "PPCISelLowering.h"
 #include "PPCSelectionDAGInfo.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 namespace llvm {
 
@@ -29,7 +29,7 @@
 ///
 class PPCTargetMachine : public LLVMTargetMachine {
   PPCSubtarget        Subtarget;
-  const TargetData    DataLayout;       // Calculates type size & alignment
+  const DataLayout    DL;       // Calculates type size & alignment
   PPCInstrInfo        InstrInfo;
   PPCFrameLowering    FrameLowering;
   PPCJITInfo          JITInfo;
@@ -58,7 +58,7 @@
     return &InstrInfo.getRegisterInfo();
   }
 
-  virtual const TargetData    *getTargetData() const    { return &DataLayout; }
+  virtual const DataLayout    *getDataLayout() const    { return &DL; }
   virtual const PPCSubtarget  *getSubtargetImpl() const { return &Subtarget; }
   virtual const InstrItineraryData *getInstrItineraryData() const {
     return &InstrItins;
Index: lib/Target/Sparc/SparcFrameLowering.cpp
===================================================================
--- lib/Target/Sparc/SparcFrameLowering.cpp	(revision 165037)
+++ lib/Target/Sparc/SparcFrameLowering.cpp	(working copy)
@@ -20,7 +20,7 @@
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/CommandLine.h"
 
Index: lib/Target/Sparc/SparcISelLowering.cpp
===================================================================
--- lib/Target/Sparc/SparcISelLowering.cpp	(revision 165037)
+++ lib/Target/Sparc/SparcISelLowering.cpp	(working copy)
@@ -637,7 +637,7 @@
 
   PointerType *Ty = cast(CalleeFn->arg_begin()->getType());
   Type *ElementTy = Ty->getElementType();
-  return getTargetData()->getTypeAllocSize(ElementTy);
+  return getDataLayout()->getTypeAllocSize(ElementTy);
 }
 
 //===----------------------------------------------------------------------===//
Index: lib/Target/Sparc/SparcTargetMachine.cpp
===================================================================
--- lib/Target/Sparc/SparcTargetMachine.cpp	(revision 165037)
+++ lib/Target/Sparc/SparcTargetMachine.cpp	(working copy)
@@ -33,7 +33,7 @@
                                        bool is64bit)
   : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
     Subtarget(TT, CPU, FS, is64bit),
-    DataLayout(Subtarget.getDataLayout()),
+    DL(Subtarget.getDataLayout()),
     InstrInfo(Subtarget),
     TLInfo(*this), TSInfo(*this),
     FrameLowering(Subtarget) {
Index: lib/Target/Sparc/SparcTargetMachine.h
===================================================================
--- lib/Target/Sparc/SparcTargetMachine.h	(revision 165037)
+++ lib/Target/Sparc/SparcTargetMachine.h	(working copy)
@@ -20,14 +20,14 @@
 #include "SparcSelectionDAGInfo.h"
 #include "SparcSubtarget.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 
 namespace llvm {
 
 class SparcTargetMachine : public LLVMTargetMachine {
   SparcSubtarget Subtarget;
-  const TargetData DataLayout;       // Calculates type size & alignment
+  const DataLayout DL;       // Calculates type size & alignment
   SparcInstrInfo InstrInfo;
   SparcTargetLowering TLInfo;
   SparcSelectionDAGInfo TSInfo;
@@ -52,7 +52,7 @@
   virtual const SparcSelectionDAGInfo* getSelectionDAGInfo() const {
     return &TSInfo;
   }
-  virtual const TargetData       *getTargetData() const { return &DataLayout; }
+  virtual const DataLayout       *getDataLayout() const { return &DL; }
 
   // Pass Pipeline Configuration
   virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
Index: lib/Target/Target.cpp
===================================================================
--- lib/Target/Target.cpp	(revision 165037)
+++ lib/Target/Target.cpp	(working copy)
@@ -16,7 +16,7 @@
 #include "llvm-c/Initialization.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/PassManager.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/LLVMContext.h"
 #include 
@@ -24,7 +24,7 @@
 using namespace llvm;
 
 void llvm::initializeTarget(PassRegistry &Registry) {
-  initializeTargetDataPass(Registry);
+  initializeDataLayoutPass(Registry);
   initializeTargetLibraryInfoPass(Registry);
 }
 
@@ -32,12 +32,12 @@
   initializeTarget(*unwrap(R));
 }
 
-LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep) {
-  return wrap(new TargetData(StringRep));
+LLVMTargetDataRef LLVMCreatTargetData(const char *StringRep) {
+  return wrap(new DataLayout(StringRep));
 }
 
 void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) {
-  unwrap(PM)->add(new TargetData(*unwrap(TD)));
+  unwrap(PM)->add(new DataLayout(*unwrap(TD)));
 }
 
 void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
Index: lib/Target/TargetData.cpp
===================================================================
--- lib/Target/TargetData.cpp	(revision 165037)
+++ lib/Target/TargetData.cpp	(working copy)
@@ -1,665 +0,0 @@
-//===-- TargetData.cpp - Data size & alignment routines --------------------==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines target properties related to datatype size/offset/alignment
-// information.
-//
-// This structure should be created once, filled in if the defaults are not
-// correct and then passed around by const&.  None of the members functions
-// require modification to the object.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Target/TargetData.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Mutex.h"
-#include "llvm/ADT/DenseMap.h"
-#include 
-#include 
-using namespace llvm;
-
-// Handle the Pass registration stuff necessary to use TargetData's.
-
-// Register the default SparcV9 implementation...
-INITIALIZE_PASS(TargetData, "targetdata", "Target Data Layout", false, true)
-char TargetData::ID = 0;
-
-//===----------------------------------------------------------------------===//
-// Support for StructLayout
-//===----------------------------------------------------------------------===//
-
-StructLayout::StructLayout(StructType *ST, const TargetData &TD) {
-  assert(!ST->isOpaque() && "Cannot get layout of opaque structs");
-  StructAlignment = 0;
-  StructSize = 0;
-  NumElements = ST->getNumElements();
-
-  // Loop over each of the elements, placing them in memory.
-  for (unsigned i = 0, e = NumElements; i != e; ++i) {
-    Type *Ty = ST->getElementType(i);
-    unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty);
-
-    // Add padding if necessary to align the data element properly.
-    if ((StructSize & (TyAlign-1)) != 0)
-      StructSize = TargetData::RoundUpAlignment(StructSize, TyAlign);
-
-    // Keep track of maximum alignment constraint.
-    StructAlignment = std::max(TyAlign, StructAlignment);
-
-    MemberOffsets[i] = StructSize;
-    StructSize += TD.getTypeAllocSize(Ty); // Consume space for this data item
-  }
-
-  // Empty structures have alignment of 1 byte.
-  if (StructAlignment == 0) StructAlignment = 1;
-
-  // Add padding to the end of the struct so that it could be put in an array
-  // and all array elements would be aligned correctly.
-  if ((StructSize & (StructAlignment-1)) != 0)
-    StructSize = TargetData::RoundUpAlignment(StructSize, StructAlignment);
-}
-
-
-/// getElementContainingOffset - Given a valid offset into the structure,
-/// return the structure index that contains it.
-unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
-  const uint64_t *SI =
-    std::upper_bound(&MemberOffsets[0], &MemberOffsets[NumElements], Offset);
-  assert(SI != &MemberOffsets[0] && "Offset not in structure type!");
-  --SI;
-  assert(*SI <= Offset && "upper_bound didn't work");
-  assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) &&
-         (SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) &&
-         "Upper bound didn't work!");
-
-  // Multiple fields can have the same offset if any of them are zero sized.
-  // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop
-  // at the i32 element, because it is the last element at that offset.  This is
-  // the right one to return, because anything after it will have a higher
-  // offset, implying that this element is non-empty.
-  return SI-&MemberOffsets[0];
-}
-
-//===----------------------------------------------------------------------===//
-// TargetAlignElem, TargetAlign support
-//===----------------------------------------------------------------------===//
-
-TargetAlignElem
-TargetAlignElem::get(AlignTypeEnum align_type, unsigned abi_align,
-                     unsigned pref_align, uint32_t bit_width) {
-  assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
-  TargetAlignElem retval;
-  retval.AlignType = align_type;
-  retval.ABIAlign = abi_align;
-  retval.PrefAlign = pref_align;
-  retval.TypeBitWidth = bit_width;
-  return retval;
-}
-
-bool
-TargetAlignElem::operator==(const TargetAlignElem &rhs) const {
-  return (AlignType == rhs.AlignType
-          && ABIAlign == rhs.ABIAlign
-          && PrefAlign == rhs.PrefAlign
-          && TypeBitWidth == rhs.TypeBitWidth);
-}
-
-const TargetAlignElem
-TargetData::InvalidAlignmentElem = { (AlignTypeEnum)0xFF, 0, 0, 0 };
-
-//===----------------------------------------------------------------------===//
-//                       TargetData Class Implementation
-//===----------------------------------------------------------------------===//
-
-/// getInt - Get an integer ignoring errors.
-static int getInt(StringRef R) {
-  int Result = 0;
-  R.getAsInteger(10, Result);
-  return Result;
-}
-
-void TargetData::init() {
-  initializeTargetDataPass(*PassRegistry::getPassRegistry());
-
-  LayoutMap = 0;
-  LittleEndian = false;
-  PointerMemSize = 8;
-  PointerABIAlign = 8;
-  PointerPrefAlign = PointerABIAlign;
-  StackNaturalAlign = 0;
-
-  // Default alignments
-  setAlignment(INTEGER_ALIGN,   1,  1, 1);   // i1
-  setAlignment(INTEGER_ALIGN,   1,  1, 8);   // i8
-  setAlignment(INTEGER_ALIGN,   2,  2, 16);  // i16
-  setAlignment(INTEGER_ALIGN,   4,  4, 32);  // i32
-  setAlignment(INTEGER_ALIGN,   4,  8, 64);  // i64
-  setAlignment(FLOAT_ALIGN,     2,  2, 16);  // half
-  setAlignment(FLOAT_ALIGN,     4,  4, 32);  // float
-  setAlignment(FLOAT_ALIGN,     8,  8, 64);  // double
-  setAlignment(FLOAT_ALIGN,    16, 16, 128); // ppcf128, quad, ...
-  setAlignment(VECTOR_ALIGN,    8,  8, 64);  // v2i32, v1i64, ...
-  setAlignment(VECTOR_ALIGN,   16, 16, 128); // v16i8, v8i16, v4i32, ...
-  setAlignment(AGGREGATE_ALIGN, 0,  8,  0);  // struct
-}
-
-std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) {
-
-  if (td)
-    td->init();
-
-  while (!Desc.empty()) {
-    std::pair Split = Desc.split('-');
-    StringRef Token = Split.first;
-    Desc = Split.second;
-
-    if (Token.empty())
-      continue;
-
-    Split = Token.split(':');
-    StringRef Specifier = Split.first;
-    Token = Split.second;
-
-    assert(!Specifier.empty() && "Can't be empty here");
-
-    switch (Specifier[0]) {
-    case 'E':
-      if (td)
-        td->LittleEndian = false;
-      break;
-    case 'e':
-      if (td)
-        td->LittleEndian = true;
-      break;
-    case 'p': {
-      // Pointer size.
-      Split = Token.split(':');
-      int PointerMemSizeBits = getInt(Split.first);
-      if (PointerMemSizeBits < 0 || PointerMemSizeBits % 8 != 0)
-        return "invalid pointer size, must be a positive 8-bit multiple";
-      if (td)
-        td->PointerMemSize = PointerMemSizeBits / 8;
-
-      // Pointer ABI alignment.
-      Split = Split.second.split(':');
-      int PointerABIAlignBits = getInt(Split.first);
-      if (PointerABIAlignBits < 0 || PointerABIAlignBits % 8 != 0) {
-        return "invalid pointer ABI alignment, "
-               "must be a positive 8-bit multiple";
-      }
-      if (td)
-        td->PointerABIAlign = PointerABIAlignBits / 8;
-
-      // Pointer preferred alignment.
-      Split = Split.second.split(':');
-      int PointerPrefAlignBits = getInt(Split.first);
-      if (PointerPrefAlignBits < 0 || PointerPrefAlignBits % 8 != 0) {
-        return "invalid pointer preferred alignment, "
-               "must be a positive 8-bit multiple";
-      }
-      if (td) {
-        td->PointerPrefAlign = PointerPrefAlignBits / 8;
-        if (td->PointerPrefAlign == 0)
-          td->PointerPrefAlign = td->PointerABIAlign;
-      }
-      break;
-    }
-    case 'i':
-    case 'v':
-    case 'f':
-    case 'a':
-    case 's': {
-      AlignTypeEnum AlignType;
-      char field = Specifier[0];
-      switch (field) {
-      default:
-      case 'i': AlignType = INTEGER_ALIGN; break;
-      case 'v': AlignType = VECTOR_ALIGN; break;
-      case 'f': AlignType = FLOAT_ALIGN; break;
-      case 'a': AlignType = AGGREGATE_ALIGN; break;
-      case 's': AlignType = STACK_ALIGN; break;
-      }
-      int Size = getInt(Specifier.substr(1));
-      if (Size < 0) {
-        return std::string("invalid ") + field + "-size field, "
-               "must be positive";
-      }
-
-      Split = Token.split(':');
-      int ABIAlignBits = getInt(Split.first);
-      if (ABIAlignBits < 0 || ABIAlignBits % 8 != 0) {
-        return std::string("invalid ") + field +"-abi-alignment field, "
-               "must be a positive 8-bit multiple";
-      }
-      unsigned ABIAlign = ABIAlignBits / 8;
-
-      Split = Split.second.split(':');
-
-      int PrefAlignBits = getInt(Split.first);
-      if (PrefAlignBits < 0 || PrefAlignBits % 8 != 0) {
-        return std::string("invalid ") + field +"-preferred-alignment field, "
-               "must be a positive 8-bit multiple";
-      }
-      unsigned PrefAlign = PrefAlignBits / 8;
-      if (PrefAlign == 0)
-        PrefAlign = ABIAlign;
-      
-      if (td)
-        td->setAlignment(AlignType, ABIAlign, PrefAlign, Size);
-      break;
-    }
-    case 'n':  // Native integer types.
-      Specifier = Specifier.substr(1);
-      do {
-        int Width = getInt(Specifier);
-        if (Width <= 0) {
-          return std::string("invalid native integer size \'") + Specifier.str() +
-                 "\', must be a positive integer.";
-        }
-        if (td && Width != 0)
-          td->LegalIntWidths.push_back(Width);
-        Split = Token.split(':');
-        Specifier = Split.first;
-        Token = Split.second;
-      } while (!Specifier.empty() || !Token.empty());
-      break;
-    case 'S': { // Stack natural alignment.
-      int StackNaturalAlignBits = getInt(Specifier.substr(1));
-      if (StackNaturalAlignBits < 0 || StackNaturalAlignBits % 8 != 0) {
-        return "invalid natural stack alignment (S-field), "
-               "must be a positive 8-bit multiple";
-      }
-      if (td)
-        td->StackNaturalAlign = StackNaturalAlignBits / 8;
-      break;
-    }
-    default:
-      break;
-    }
-  }
-
-  return "";
-}
-
-/// Default ctor.
-///
-/// @note This has to exist, because this is a pass, but it should never be
-/// used.
-TargetData::TargetData() : ImmutablePass(ID) {
-  report_fatal_error("Bad TargetData ctor used.  "
-                    "Tool did not specify a TargetData to use?");
-}
-
-TargetData::TargetData(const Module *M)
-  : ImmutablePass(ID) {
-  std::string errMsg = parseSpecifier(M->getDataLayout(), this);
-  assert(errMsg == "" && "Module M has malformed target data layout string.");
-  (void)errMsg;
-}
-
-void
-TargetData::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
-                         unsigned pref_align, uint32_t bit_width) {
-  assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
-  assert(pref_align < (1 << 16) && "Alignment doesn't fit in bitfield");
-  assert(bit_width < (1 << 24) && "Bit width doesn't fit in bitfield");
-  for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
-    if (Alignments[i].AlignType == align_type &&
-        Alignments[i].TypeBitWidth == bit_width) {
-      // Update the abi, preferred alignments.
-      Alignments[i].ABIAlign = abi_align;
-      Alignments[i].PrefAlign = pref_align;
-      return;
-    }
-  }
-
-  Alignments.push_back(TargetAlignElem::get(align_type, abi_align,
-                                            pref_align, bit_width));
-}
-
-/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
-/// preferred if ABIInfo = false) the target wants for the specified datatype.
-unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
-                                      uint32_t BitWidth, bool ABIInfo,
-                                      Type *Ty) const {
-  // Check to see if we have an exact match and remember the best match we see.
-  int BestMatchIdx = -1;
-  int LargestInt = -1;
-  for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
-    if (Alignments[i].AlignType == AlignType &&
-        Alignments[i].TypeBitWidth == BitWidth)
-      return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign;
-
-    // The best match so far depends on what we're looking for.
-     if (AlignType == INTEGER_ALIGN &&
-         Alignments[i].AlignType == INTEGER_ALIGN) {
-      // The "best match" for integers is the smallest size that is larger than
-      // the BitWidth requested.
-      if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 ||
-           Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth))
-        BestMatchIdx = i;
-      // However, if there isn't one that's larger, then we must use the
-      // largest one we have (see below)
-      if (LargestInt == -1 ||
-          Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth)
-        LargestInt = i;
-    }
-  }
-
-  // Okay, we didn't find an exact solution.  Fall back here depending on what
-  // is being looked for.
-  if (BestMatchIdx == -1) {
-    // If we didn't find an integer alignment, fall back on most conservative.
-    if (AlignType == INTEGER_ALIGN) {
-      BestMatchIdx = LargestInt;
-    } else {
-      assert(AlignType == VECTOR_ALIGN && "Unknown alignment type!");
-
-      // By default, use natural alignment for vector types. This is consistent
-      // with what clang and llvm-gcc do.
-      unsigned Align = getTypeAllocSize(cast(Ty)->getElementType());
-      Align *= cast(Ty)->getNumElements();
-      // If the alignment is not a power of 2, round up to the next power of 2.
-      // This happens for non-power-of-2 length vectors.
-      if (Align & (Align-1))
-        Align = NextPowerOf2(Align);
-      return Align;
-    }
-  }
-
-  // Since we got a "best match" index, just return it.
-  return ABIInfo ? Alignments[BestMatchIdx].ABIAlign
-                 : Alignments[BestMatchIdx].PrefAlign;
-}
-
-namespace {
-
-class StructLayoutMap {
-  typedef DenseMap LayoutInfoTy;
-  LayoutInfoTy LayoutInfo;
-
-public:
-  virtual ~StructLayoutMap() {
-    // Remove any layouts.
-    for (LayoutInfoTy::iterator I = LayoutInfo.begin(), E = LayoutInfo.end();
-         I != E; ++I) {
-      StructLayout *Value = I->second;
-      Value->~StructLayout();
-      free(Value);
-    }
-  }
-
-  StructLayout *&operator[](StructType *STy) {
-    return LayoutInfo[STy];
-  }
-
-  // for debugging...
-  virtual void dump() const {}
-};
-
-} // end anonymous namespace
-
-TargetData::~TargetData() {
-  delete static_cast(LayoutMap);
-}
-
-const StructLayout *TargetData::getStructLayout(StructType *Ty) const {
-  if (!LayoutMap)
-    LayoutMap = new StructLayoutMap();
-
-  StructLayoutMap *STM = static_cast(LayoutMap);
-  StructLayout *&SL = (*STM)[Ty];
-  if (SL) return SL;
-
-  // Otherwise, create the struct layout.  Because it is variable length, we
-  // malloc it, then use placement new.
-  int NumElts = Ty->getNumElements();
-  StructLayout *L =
-    (StructLayout *)malloc(sizeof(StructLayout)+(NumElts-1) * sizeof(uint64_t));
-
-  // Set SL before calling StructLayout's ctor.  The ctor could cause other
-  // entries to be added to TheMap, invalidating our reference.
-  SL = L;
-
-  new (L) StructLayout(Ty, *this);
-
-  return L;
-}
-
-std::string TargetData::getStringRepresentation() const {
-  std::string Result;
-  raw_string_ostream OS(Result);
-
-  OS << (LittleEndian ? "e" : "E")
-     << "-p:" << PointerMemSize*8 << ':' << PointerABIAlign*8
-     << ':' << PointerPrefAlign*8
-     << "-S" << StackNaturalAlign*8;
-
-  for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
-    const TargetAlignElem &AI = Alignments[i];
-    OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':'
-       << AI.ABIAlign*8 << ':' << AI.PrefAlign*8;
-  }
-
-  if (!LegalIntWidths.empty()) {
-    OS << "-n" << (unsigned)LegalIntWidths[0];
-
-    for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i)
-      OS << ':' << (unsigned)LegalIntWidths[i];
-  }
-  return OS.str();
-}
-
-
-uint64_t TargetData::getTypeSizeInBits(Type *Ty) const {
-  assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
-  switch (Ty->getTypeID()) {
-  case Type::LabelTyID:
-  case Type::PointerTyID:
-    return getPointerSizeInBits();
-  case Type::ArrayTyID: {
-    ArrayType *ATy = cast(Ty);
-    return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements();
-  }
-  case Type::StructTyID:
-    // Get the layout annotation... which is lazily created on demand.
-    return getStructLayout(cast(Ty))->getSizeInBits();
-  case Type::IntegerTyID:
-    return cast(Ty)->getBitWidth();
-  case Type::VoidTyID:
-    return 8;
-  case Type::HalfTyID:
-    return 16;
-  case Type::FloatTyID:
-    return 32;
-  case Type::DoubleTyID:
-  case Type::X86_MMXTyID:
-    return 64;
-  case Type::PPC_FP128TyID:
-  case Type::FP128TyID:
-    return 128;
-  // In memory objects this is always aligned to a higher boundary, but
-  // only 80 bits contain information.
-  case Type::X86_FP80TyID:
-    return 80;
-  case Type::VectorTyID:
-    return cast(Ty)->getBitWidth();
-  default:
-    llvm_unreachable("TargetData::getTypeSizeInBits(): Unsupported type");
-  }
-}
-
-/*!
-  \param abi_or_pref Flag that determines which alignment is returned. true
-  returns the ABI alignment, false returns the preferred alignment.
-  \param Ty The underlying type for which alignment is determined.
-
-  Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
-  == false) for the requested type \a Ty.
- */
-unsigned TargetData::getAlignment(Type *Ty, bool abi_or_pref) const {
-  int AlignType = -1;
-
-  assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
-  switch (Ty->getTypeID()) {
-  // Early escape for the non-numeric types.
-  case Type::LabelTyID:
-  case Type::PointerTyID:
-    return (abi_or_pref
-            ? getPointerABIAlignment()
-            : getPointerPrefAlignment());
-  case Type::ArrayTyID:
-    return getAlignment(cast(Ty)->getElementType(), abi_or_pref);
-
-  case Type::StructTyID: {
-    // Packed structure types always have an ABI alignment of one.
-    if (cast(Ty)->isPacked() && abi_or_pref)
-      return 1;
-
-    // Get the layout annotation... which is lazily created on demand.
-    const StructLayout *Layout = getStructLayout(cast(Ty));
-    unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
-    return std::max(Align, Layout->getAlignment());
-  }
-  case Type::IntegerTyID:
-  case Type::VoidTyID:
-    AlignType = INTEGER_ALIGN;
-    break;
-  case Type::HalfTyID:
-  case Type::FloatTyID:
-  case Type::DoubleTyID:
-  // PPC_FP128TyID and FP128TyID have different data contents, but the
-  // same size and alignment, so they look the same here.
-  case Type::PPC_FP128TyID:
-  case Type::FP128TyID:
-  case Type::X86_FP80TyID:
-    AlignType = FLOAT_ALIGN;
-    break;
-  case Type::X86_MMXTyID:
-  case Type::VectorTyID:
-    AlignType = VECTOR_ALIGN;
-    break;
-  default:
-    llvm_unreachable("Bad type for getAlignment!!!");
-  }
-
-  return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty),
-                          abi_or_pref, Ty);
-}
-
-unsigned TargetData::getABITypeAlignment(Type *Ty) const {
-  return getAlignment(Ty, true);
-}
-
-/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
-/// an integer type of the specified bitwidth.
-unsigned TargetData::getABIIntegerTypeAlignment(unsigned BitWidth) const {
-  return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0);
-}
-
-
-unsigned TargetData::getCallFrameTypeAlignment(Type *Ty) const {
-  for (unsigned i = 0, e = Alignments.size(); i != e; ++i)
-    if (Alignments[i].AlignType == STACK_ALIGN)
-      return Alignments[i].ABIAlign;
-
-  return getABITypeAlignment(Ty);
-}
-
-unsigned TargetData::getPrefTypeAlignment(Type *Ty) const {
-  return getAlignment(Ty, false);
-}
-
-unsigned TargetData::getPreferredTypeAlignmentShift(Type *Ty) const {
-  unsigned Align = getPrefTypeAlignment(Ty);
-  assert(!(Align & (Align-1)) && "Alignment is not a power of two!");
-  return Log2_32(Align);
-}
-
-/// getIntPtrType - Return an unsigned integer type that is the same size or
-/// greater to the host pointer size.
-IntegerType *TargetData::getIntPtrType(LLVMContext &C) const {
-  return IntegerType::get(C, getPointerSizeInBits());
-}
-
-
-uint64_t TargetData::getIndexedOffset(Type *ptrTy,
-                                      ArrayRef Indices) const {
-  Type *Ty = ptrTy;
-  assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()");
-  uint64_t Result = 0;
-
-  generic_gep_type_iterator
-    TI = gep_type_begin(ptrTy, Indices);
-  for (unsigned CurIDX = 0, EndIDX = Indices.size(); CurIDX != EndIDX;
-       ++CurIDX, ++TI) {
-    if (StructType *STy = dyn_cast(*TI)) {
-      assert(Indices[CurIDX]->getType() ==
-             Type::getInt32Ty(ptrTy->getContext()) &&
-             "Illegal struct idx");
-      unsigned FieldNo = cast(Indices[CurIDX])->getZExtValue();
-
-      // Get structure layout information...
-      const StructLayout *Layout = getStructLayout(STy);
-
-      // Add in the offset, as calculated by the structure layout info...
-      Result += Layout->getElementOffset(FieldNo);
-
-      // Update Ty to refer to current element
-      Ty = STy->getElementType(FieldNo);
-    } else {
-      // Update Ty to refer to current element
-      Ty = cast(Ty)->getElementType();
-
-      // Get the array index and the size of each array element.
-      if (int64_t arrayIdx = cast(Indices[CurIDX])->getSExtValue())
-        Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty);
-    }
-  }
-
-  return Result;
-}
-
-/// getPreferredAlignment - Return the preferred alignment of the specified
-/// global.  This includes an explicitly requested alignment (if the global
-/// has one).
-unsigned TargetData::getPreferredAlignment(const GlobalVariable *GV) const {
-  Type *ElemType = GV->getType()->getElementType();
-  unsigned Alignment = getPrefTypeAlignment(ElemType);
-  unsigned GVAlignment = GV->getAlignment();
-  if (GVAlignment >= Alignment) {
-    Alignment = GVAlignment;
-  } else if (GVAlignment != 0) {
-    Alignment = std::max(GVAlignment, getABITypeAlignment(ElemType));
-  }
-
-  if (GV->hasInitializer() && GVAlignment == 0) {
-    if (Alignment < 16) {
-      // If the global is not external, see if it is large.  If so, give it a
-      // larger alignment.
-      if (getTypeSizeInBits(ElemType) > 128)
-        Alignment = 16;    // 16-byte alignment.
-    }
-  }
-  return Alignment;
-}
-
-/// getPreferredAlignmentLog - Return the preferred alignment of the
-/// specified global, returned in log form.  This includes an explicitly
-/// requested alignment (if the global has one).
-unsigned TargetData::getPreferredAlignmentLog(const GlobalVariable *GV) const {
-  return Log2_32(getPreferredAlignment(GV));
-}
Index: lib/Target/TargetELFWriterInfo.cpp
===================================================================
--- lib/Target/TargetELFWriterInfo.cpp	(revision 165037)
+++ lib/Target/TargetELFWriterInfo.cpp	(working copy)
@@ -12,8 +12,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Function.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetELFWriterInfo.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetMachine.h"
 using namespace llvm;
 
Index: lib/Target/TargetLoweringObjectFile.cpp
===================================================================
--- lib/Target/TargetLoweringObjectFile.cpp	(revision 165037)
+++ lib/Target/TargetLoweringObjectFile.cpp	(working copy)
@@ -22,12 +22,12 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/Dwarf.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 //===----------------------------------------------------------------------===//
@@ -184,7 +184,7 @@
       // Otherwise, just drop it into a mergable constant section.  If we have
       // a section for this size, use it, otherwise use the arbitrary sized
       // mergable section.
-      switch (TM.getTargetData()->getTypeAllocSize(C->getType())) {
+      switch (TM.getDataLayout()->getTypeAllocSize(C->getType())) {
       case 4:  return SectionKind::getMergeableConst4();
       case 8:  return SectionKind::getMergeableConst8();
       case 16: return SectionKind::getMergeableConst16();
Index: lib/Target/TargetMachineC.cpp
===================================================================
--- lib/Target/TargetMachineC.cpp	(revision 165037)
+++ lib/Target/TargetMachineC.cpp	(working copy)
@@ -14,8 +14,8 @@
 #include "llvm-c/Core.h"
 #include "llvm-c/Target.h"
 #include "llvm-c/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/CodeGen.h"
@@ -146,7 +146,7 @@
 }
 
 LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) {
-  return wrap(unwrap(T)->getTargetData());
+  return wrap(unwrap(T)->getDataLayout());
 }
 
 LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
@@ -158,14 +158,14 @@
 
   std::string error;
 
-  const TargetData* td = TM->getTargetData();
+  const DataLayout* td = TM->getDataLayout();
 
   if (!td) {
-    error = "No TargetData in TargetMachine";
+    error = "No DataLayout in TargetMachine";
     *ErrorMessage = strdup(error.c_str());
     return true;
   }
-  pass.add(new TargetData(*td));
+  pass.add(new DataLayout(*td));
 
   TargetMachine::CodeGenFileType ft;
   switch (codegen) {
@@ -184,7 +184,7 @@
   }
 
   if (TM->addPassesToEmitFile(pass, destf, ft)) {
-    error = "No TargetData in TargetMachine";
+    error = "No DataLayout in TargetMachine";
     *ErrorMessage = strdup(error.c_str());
     return true;
   }
Index: lib/Target/X86/X86AsmPrinter.cpp
===================================================================
--- lib/Target/X86/X86AsmPrinter.cpp	(revision 165037)
+++ lib/Target/X86/X86AsmPrinter.cpp	(working copy)
@@ -682,7 +682,7 @@
     MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
     if (!Stubs.empty()) {
       OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
-      const TargetData *TD = TM.getTargetData();
+      const DataLayout *TD = TM.getDataLayout();
 
       for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
         OutStreamer.EmitLabel(Stubs[i].first);
Index: lib/Target/X86/X86CodeEmitter.cpp
===================================================================
--- lib/Target/X86/X86CodeEmitter.cpp	(revision 165037)
+++ lib/Target/X86/X86CodeEmitter.cpp	(working copy)
@@ -42,7 +42,7 @@
   template
   class Emitter : public MachineFunctionPass {
     const X86InstrInfo  *II;
-    const TargetData    *TD;
+    const DataLayout    *TD;
     X86TargetMachine    &TM;
     CodeEmitter         &MCE;
     MachineModuleInfo   *MMI;
@@ -56,7 +56,7 @@
       MCE(mce), PICBaseOffset(0), Is64BitMode(false),
       IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
     Emitter(X86TargetMachine &tm, CodeEmitter &mce,
-            const X86InstrInfo &ii, const TargetData &td, bool is64)
+            const X86InstrInfo &ii, const DataLayout &td, bool is64)
       : MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm),
       MCE(mce), PICBaseOffset(0), Is64BitMode(is64),
       IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
@@ -128,7 +128,7 @@
   MCE.setModuleInfo(MMI);
 
   II = TM.getInstrInfo();
-  TD = TM.getTargetData();
+  TD = TM.getDataLayout();
   Is64BitMode = TM.getSubtarget().is64Bit();
   IsPIC = TM.getRelocationModel() == Reloc::PIC_;
 
Index: lib/Target/X86/X86COFFMachineModuleInfo.h
===================================================================
--- lib/Target/X86/X86COFFMachineModuleInfo.h	(revision 165037)
+++ lib/Target/X86/X86COFFMachineModuleInfo.h	(working copy)
@@ -20,7 +20,7 @@
 
 namespace llvm {
   class X86MachineFunctionInfo;
-  class TargetData;
+  class DataLayout;
 
 /// X86COFFMachineModuleInfo - This is a MachineModuleInfoImpl implementation
 /// for X86 COFF targets.
Index: lib/Target/X86/X86ELFWriterInfo.cpp
===================================================================
--- lib/Target/X86/X86ELFWriterInfo.cpp	(revision 165037)
+++ lib/Target/X86/X86ELFWriterInfo.cpp	(working copy)
@@ -16,7 +16,7 @@
 #include "llvm/Function.h"
 #include "llvm/Support/ELF.h"
 #include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 
 using namespace llvm;
Index: lib/Target/X86/X86FrameLowering.cpp
===================================================================
--- lib/Target/X86/X86FrameLowering.cpp	(revision 165037)
+++ lib/Target/X86/X86FrameLowering.cpp	(working copy)
@@ -25,7 +25,7 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/ADT/SmallSet.h"
@@ -313,7 +313,7 @@
   if (CSI.empty()) return;
 
   std::vector &Moves = MMI.getFrameMoves();
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   bool HasFP = hasFP(MF);
 
   // Calculate amount of bytes used for return address storing.
@@ -715,7 +715,7 @@
   //        ELSE                        => DW_CFA_offset_extended
 
   std::vector &Moves = MMI.getFrameMoves();
-  const TargetData *TD = MF.getTarget().getTargetData();
+  const DataLayout *TD = MF.getTarget().getDataLayout();
   uint64_t NumBytes = 0;
   int stackGrowth = -TD->getPointerSize();
 
Index: lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- lib/Target/X86/X86ISelLowering.cpp	(revision 165037)
+++ lib/Target/X86/X86ISelLowering.cpp	(working copy)
@@ -161,7 +161,7 @@
   X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
 
   RegInfo = TM.getRegisterInfo();
-  TD = getTargetData();
+  TD = getDataLayout();
 
   // Set up the TargetLowering object.
   static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
@@ -9649,7 +9649,7 @@
 
   EVT ArgVT = Op.getNode()->getValueType(0);
   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
-  uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy);
+  uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
   uint8_t ArgMode;
 
   // Decide which area this value should be read from.
@@ -13910,7 +13910,7 @@
     // alignment is valid.
     unsigned Align = LN0->getAlignment();
     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-    unsigned NewAlign = TLI.getTargetData()->
+    unsigned NewAlign = TLI.getDataLayout()->
       getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
 
     if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT))
Index: lib/Target/X86/X86ISelLowering.h
===================================================================
--- lib/Target/X86/X86ISelLowering.h	(revision 165037)
+++ lib/Target/X86/X86ISelLowering.h	(working copy)
@@ -701,7 +701,7 @@
     /// make the right decision when generating code for different targets.
     const X86Subtarget *Subtarget;
     const X86RegisterInfo *RegInfo;
-    const TargetData *TD;
+    const DataLayout *TD;
 
     /// X86StackPtr - X86 physical register used as stack ptr.
     unsigned X86StackPtr;
Index: lib/Target/X86/X86SelectionDAGInfo.cpp
===================================================================
--- lib/Target/X86/X86SelectionDAGInfo.cpp	(revision 165037)
+++ lib/Target/X86/X86SelectionDAGInfo.cpp	(working copy)
@@ -54,7 +54,7 @@
     if (const char *bzeroEntry =  V &&
         V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
       EVT IntPtr = TLI.getPointerTy();
-      Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+      Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
       TargetLowering::ArgListTy Args;
       TargetLowering::ArgListEntry Entry;
       Entry.Node = Dst;
Index: lib/Target/X86/X86TargetMachine.cpp
===================================================================
--- lib/Target/X86/X86TargetMachine.cpp	(revision 165037)
+++ lib/Target/X86/X86TargetMachine.cpp	(working copy)
@@ -36,7 +36,7 @@
                                          Reloc::Model RM, CodeModel::Model CM,
                                          CodeGenOpt::Level OL)
   : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false),
-    DataLayout(getSubtargetImpl()->isTargetDarwin() ?
+    DL(getSubtargetImpl()->isTargetDarwin() ?
                "e-p:32:32-f64:32:64-i64:32:64-f80:128:128-f128:128:128-"
                "n8:16:32-S128" :
                (getSubtargetImpl()->isTargetCygMing() ||
@@ -59,7 +59,7 @@
                                          Reloc::Model RM, CodeModel::Model CM,
                                          CodeGenOpt::Level OL)
   : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true),
-    DataLayout("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-"
+    DL("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-"
                "n8:16:32:64-S128"),
     InstrInfo(*this),
     TSInfo(*this),
Index: lib/Target/X86/X86TargetMachine.h
===================================================================
--- lib/Target/X86/X86TargetMachine.h	(revision 165037)
+++ lib/Target/X86/X86TargetMachine.h	(working copy)
@@ -23,7 +23,7 @@
 #include "X86SelectionDAGInfo.h"
 #include "X86Subtarget.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetFrameLowering.h"
 
 namespace llvm {
@@ -80,7 +80,7 @@
 ///
 class X86_32TargetMachine : public X86TargetMachine {
   virtual void anchor();
-  const TargetData  DataLayout; // Calculates type size & alignment
+  const DataLayout  DL; // Calculates type size & alignment
   X86InstrInfo      InstrInfo;
   X86SelectionDAGInfo TSInfo;
   X86TargetLowering TLInfo;
@@ -90,7 +90,7 @@
                       StringRef CPU, StringRef FS, const TargetOptions &Options,
                       Reloc::Model RM, CodeModel::Model CM,
                       CodeGenOpt::Level OL);
-  virtual const TargetData *getTargetData() const { return &DataLayout; }
+  virtual const DataLayout *getDataLayout() const { return &DL; }
   virtual const X86TargetLowering *getTargetLowering() const {
     return &TLInfo;
   }
@@ -109,7 +109,7 @@
 ///
 class X86_64TargetMachine : public X86TargetMachine {
   virtual void anchor();
-  const TargetData  DataLayout; // Calculates type size & alignment
+  const DataLayout  DL; // Calculates type size & alignment
   X86InstrInfo      InstrInfo;
   X86SelectionDAGInfo TSInfo;
   X86TargetLowering TLInfo;
@@ -119,7 +119,7 @@
                       StringRef CPU, StringRef FS, const TargetOptions &Options,
                       Reloc::Model RM, CodeModel::Model CM,
                       CodeGenOpt::Level OL);
-  virtual const TargetData *getTargetData() const { return &DataLayout; }
+  virtual const DataLayout *getDataLayout() const { return &DL; }
   virtual const X86TargetLowering *getTargetLowering() const {
     return &TLInfo;
   }
Index: lib/Target/XCore/XCoreAsmPrinter.cpp
===================================================================
--- lib/Target/XCore/XCoreAsmPrinter.cpp	(revision 165037)
+++ lib/Target/XCore/XCoreAsmPrinter.cpp	(working copy)
@@ -31,7 +31,7 @@
 #include "llvm/MC/MCStreamer.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/StringExtras.h"
@@ -112,7 +112,7 @@
       EmitSpecialLLVMGlobal(GV))
     return;
 
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(GV, Mang,TM));
 
   
Index: lib/Target/XCore/XCoreFrameLowering.cpp
===================================================================
--- lib/Target/XCore/XCoreFrameLowering.cpp	(revision 165037)
+++ lib/Target/XCore/XCoreFrameLowering.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/ErrorHandling.h"
 
Index: lib/Target/XCore/XCoreISelLowering.cpp
===================================================================
--- lib/Target/XCore/XCoreISelLowering.cpp	(revision 165037)
+++ lib/Target/XCore/XCoreISelLowering.cpp	(working copy)
@@ -285,7 +285,7 @@
     llvm_unreachable(0);
   }
   SDValue base = getGlobalAddressWrapper(GA, GV, DAG);
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   unsigned Size = TD->getTypeAllocSize(Ty);
   SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl),
                        DAG.getConstant(Size, MVT::i32));
@@ -405,7 +405,7 @@
   if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
     return SDValue();
 
-  unsigned ABIAlignment = getTargetData()->
+  unsigned ABIAlignment = getDataLayout()->
     getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
   // Leave aligned load alone.
   if (LD->getAlignment() >= ABIAlignment)
@@ -477,7 +477,7 @@
   }
 
   // Lower to a call to __misaligned_load(BasePtr).
-  Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+  Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
   TargetLowering::ArgListTy Args;
   TargetLowering::ArgListEntry Entry;
 
@@ -507,7 +507,7 @@
   if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
     return SDValue();
   }
-  unsigned ABIAlignment = getTargetData()->
+  unsigned ABIAlignment = getDataLayout()->
     getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
   // Leave aligned store alone.
   if (ST->getAlignment() >= ABIAlignment) {
@@ -536,7 +536,7 @@
   }
 
   // Lower to a call to __misaligned_store(BasePtr, Value).
-  Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+  Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
   TargetLowering::ArgListTy Args;
   TargetLowering::ArgListEntry Entry;
 
@@ -1499,7 +1499,7 @@
     if (StoreBits % 8) {
       break;
     }
-    unsigned ABIAlignment = getTargetData()->getABITypeAlignment(
+    unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(
         ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
     unsigned Alignment = ST->getAlignment();
     if (Alignment >= ABIAlignment) {
@@ -1570,7 +1570,7 @@
   if (Ty->getTypeID() == Type::VoidTyID)
     return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
 
-  const TargetData *TD = TM.getTargetData();
+  const DataLayout *TD = TM.getDataLayout();
   unsigned Size = TD->getTypeAllocSize(Ty);
   if (AM.BaseGV) {
     return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
Index: lib/Target/XCore/XCoreTargetMachine.cpp
===================================================================
--- lib/Target/XCore/XCoreTargetMachine.cpp	(revision 165037)
+++ lib/Target/XCore/XCoreTargetMachine.cpp	(working copy)
@@ -27,7 +27,7 @@
                                        CodeGenOpt::Level OL)
   : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
     Subtarget(TT, CPU, FS),
-    DataLayout("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-"
+    DL("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-"
                "i16:16:32-i32:32:32-i64:32:32-n32"),
     InstrInfo(),
     FrameLowering(Subtarget),
Index: lib/Target/XCore/XCoreTargetMachine.h
===================================================================
--- lib/Target/XCore/XCoreTargetMachine.h	(revision 165037)
+++ lib/Target/XCore/XCoreTargetMachine.h	(working copy)
@@ -20,13 +20,13 @@
 #include "XCoreISelLowering.h"
 #include "XCoreSelectionDAGInfo.h"
 #include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 namespace llvm {
 
 class XCoreTargetMachine : public LLVMTargetMachine {
   XCoreSubtarget Subtarget;
-  const TargetData DataLayout;       // Calculates type size & alignment
+  const DataLayout DL;       // Calculates type size & alignment
   XCoreInstrInfo InstrInfo;
   XCoreFrameLowering FrameLowering;
   XCoreTargetLowering TLInfo;
@@ -53,7 +53,7 @@
   virtual const TargetRegisterInfo *getRegisterInfo() const {
     return &InstrInfo.getRegisterInfo();
   }
-  virtual const TargetData       *getTargetData() const { return &DataLayout; }
+  virtual const DataLayout       *getDataLayout() const { return &DL; }
 
   // Pass Pipeline Configuration
   virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
Index: lib/Transforms/InstCombine/InstCombine.h
===================================================================
--- lib/Transforms/InstCombine/InstCombine.h	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombine.h	(working copy)
@@ -21,7 +21,7 @@
 
 namespace llvm {
   class CallSite;
-  class TargetData;
+  class DataLayout;
   class TargetLibraryInfo;
   class DbgDeclareInst;
   class MemIntrinsic;
@@ -71,7 +71,7 @@
 class LLVM_LIBRARY_VISIBILITY InstCombiner
                              : public FunctionPass,
                                public InstVisitor {
-  TargetData *TD;
+  DataLayout *TD;
   TargetLibraryInfo *TLI;
   bool MadeIRChange;
 public:
@@ -95,7 +95,7 @@
 
   virtual void getAnalysisUsage(AnalysisUsage &AU) const;
 
-  TargetData *getTargetData() const { return TD; }
+  DataLayout *getDataLayout() const { return TD; }
 
   TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
 
@@ -218,7 +218,7 @@
                           Type *Ty);
 
   Instruction *visitCallSite(CallSite CS);
-  Instruction *tryOptimizeCall(CallInst *CI, const TargetData *TD);
+  Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *TD);
   bool transformConstExprCastCall(CallSite CS);
   Instruction *transformCallThroughTrampoline(CallSite CS,
                                               IntrinsicInst *Tramp);
Index: lib/Transforms/InstCombine/InstCombineAddSub.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineAddSub.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineAddSub.cpp	(working copy)
@@ -13,7 +13,7 @@
 
 #include "InstCombine.h"
 #include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/PatternMatch.h"
 using namespace llvm;
Index: lib/Transforms/InstCombine/InstCombineCalls.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineCalls.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineCalls.cpp	(working copy)
@@ -13,7 +13,7 @@
 
 #include "InstCombine.h"
 #include "llvm/Support/CallSite.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
 #include "llvm/Transforms/Utils/BuildLibCalls.h"
 #include "llvm/Transforms/Utils/Local.h"
@@ -754,7 +754,7 @@
 /// passed through the varargs area, we can eliminate the use of the cast.
 static bool isSafeToEliminateVarargsCast(const CallSite CS,
                                          const CastInst * const CI,
-                                         const TargetData * const TD,
+                                         const DataLayout * const TD,
                                          const int ix) {
   if (!CI->isLosslessCast())
     return false;
@@ -812,7 +812,7 @@
 // Currently we're only working with the checking functions, memcpy_chk,
 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
 // strcat_chk and strncat_chk.
-Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
+Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *TD) {
   if (CI->getCalledFunction() == 0) return 0;
 
   InstCombineFortifiedLibCalls Simplifier(this);
@@ -984,7 +984,7 @@
     Changed = true;
   }
 
-  // Try to optimize the call if possible, we require TargetData for most of
+  // Try to optimize the call if possible, we require DataLayout for most of
   // this.  None of these calls are seen as possibly dead so go ahead and
   // delete the instruction now.
   if (CallInst *CI = dyn_cast(CS.getInstruction())) {
Index: lib/Transforms/InstCombine/InstCombineCasts.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineCasts.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineCasts.cpp	(working copy)
@@ -13,7 +13,7 @@
 
 #include "InstCombine.h"
 #include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Support/PatternMatch.h"
 using namespace llvm;
@@ -78,7 +78,7 @@
 /// try to eliminate the cast by moving the type information into the alloc.
 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
                                                    AllocaInst &AI) {
-  // This requires TargetData to get the alloca alignment and size information.
+  // This requires DataLayout to get the alloca alignment and size information.
   if (!TD) return 0;
 
   PointerType *PTy = cast(CI.getType());
@@ -229,7 +229,7 @@
   const CastInst *CI, ///< The first cast instruction
   unsigned opcode,       ///< The opcode of the second cast instruction
   Type *DstTy,     ///< The target type for the second cast instruction
-  TargetData *TD         ///< The target data for pointer size
+  DataLayout *TD         ///< The target data for pointer size
 ) {
 
   Type *SrcTy = CI->getOperand(0)->getType();   // A from above
Index: lib/Transforms/InstCombine/InstCombineCompares.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineCompares.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineCompares.cpp	(working copy)
@@ -16,7 +16,7 @@
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Support/ConstantRange.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
@@ -474,7 +474,7 @@
 /// If we can't emit an optimized form for this expression, this returns null.
 ///
 static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
-  TargetData &TD = *IC.getTargetData();
+  DataLayout &TD = *IC.getDataLayout();
   gep_type_iterator GTI = gep_type_begin(GEP);
 
   // Check to see if this gep only has a single variable index.  If so, and if
Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp	(working copy)
@@ -14,7 +14,7 @@
 #include "InstCombine.h"
 #include "llvm/IntrinsicInst.h"
 #include "llvm/Analysis/Loads.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/ADT/Statistic.h"
@@ -152,7 +152,7 @@
 
 /// getPointeeAlignment - Compute the minimum alignment of the value pointed
 /// to by the given pointer.
-static unsigned getPointeeAlignment(Value *V, const TargetData &TD) {
+static unsigned getPointeeAlignment(Value *V, const DataLayout &TD) {
   if (ConstantExpr *CE = dyn_cast(V))
     if (CE->getOpcode() == Instruction::BitCast ||
         (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -297,7 +297,7 @@
 
 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
-                                        const TargetData *TD) {
+                                        const DataLayout *TD) {
   User *CI = cast(LI.getOperand(0));
   Value *CastOp = CI->getOperand(0);
 
@@ -327,14 +327,14 @@
             SrcPTy = SrcTy->getElementType();
           }
 
-      if (IC.getTargetData() &&
+      if (IC.getDataLayout() &&
           (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() || 
             SrcPTy->isVectorTy()) &&
           // Do not allow turning this into a load of an integer, which is then
           // casted to a pointer, this pessimizes pointer analysis a lot.
           (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
-          IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
-               IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
+          IC.getDataLayout()->getTypeSizeInBits(SrcPTy) ==
+               IC.getDataLayout()->getTypeSizeInBits(DestPTy)) {
 
         // Okay, we are casting from one integer or pointer type to another of
         // the same size.  Instead of casting the pointer before the load, cast
@@ -512,11 +512,11 @@
   
   // If the pointers point into different address spaces or if they point to
   // values with different sizes, we can't do the transformation.
-  if (!IC.getTargetData() ||
+  if (!IC.getDataLayout() ||
       SrcTy->getAddressSpace() != 
         cast(CI->getType())->getAddressSpace() ||
-      IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
-      IC.getTargetData()->getTypeSizeInBits(DestPTy))
+      IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
+      IC.getDataLayout()->getTypeSizeInBits(DestPTy))
     return 0;
 
   // Okay, we are casting from one integer or pointer type to another of
Index: lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineMulDivRem.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp	(working copy)
@@ -37,7 +37,7 @@
   if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(PowerOf2), m_Value(A))),
                       m_Value(B))) &&
       // The "1" can be any value known to be a power of 2.
-      isPowerOfTwo(PowerOf2, IC.getTargetData())) {
+      isPowerOfTwo(PowerOf2, IC.getDataLayout())) {
     A = IC.Builder->CreateSub(A, B);
     return IC.Builder->CreateShl(PowerOf2, A);
   }
@@ -46,7 +46,7 @@
   // inexact.  Similarly for <<.
   if (BinaryOperator *I = dyn_cast(V))
     if (I->isLogicalShift() &&
-        isPowerOfTwo(I->getOperand(0), IC.getTargetData())) {
+        isPowerOfTwo(I->getOperand(0), IC.getDataLayout())) {
       // We know that this is an exact/nuw shift and that the input is a
       // non-zero context as well.
       if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC)) {
Index: lib/Transforms/InstCombine/InstCombinePHI.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombinePHI.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombinePHI.cpp	(working copy)
@@ -13,7 +13,7 @@
 
 #include "InstCombine.h"
 #include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/STLExtras.h"
 using namespace llvm;
Index: lib/Transforms/InstCombine/InstCombineSelect.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineSelect.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineSelect.cpp	(working copy)
@@ -287,7 +287,7 @@
 /// SimplifyWithOpReplaced - See if V simplifies when its operand Op is
 /// replaced with RepOp.
 static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
-                                     const TargetData *TD,
+                                     const DataLayout *TD,
                                      const TargetLibraryInfo *TLI) {
   // Trivial replacement.
   if (V == Op)
Index: lib/Transforms/InstCombine/InstCombineShifts.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineShifts.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineShifts.cpp	(working copy)
@@ -190,7 +190,7 @@
       V = IC.Builder->CreateLShr(C, NumBits);
     // If we got a constantexpr back, try to simplify it with TD info.
     if (ConstantExpr *CE = dyn_cast(V))
-      V = ConstantFoldConstantExpression(CE, IC.getTargetData(),
+      V = ConstantFoldConstantExpression(CE, IC.getDataLayout(),
                                          IC.getTargetLibraryInfo());
     return V;
   }
Index: lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
===================================================================
--- lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp	(working copy)
@@ -14,7 +14,7 @@
 
 
 #include "InstCombine.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/IntrinsicInst.h"
 
 using namespace llvm;
Index: lib/Transforms/InstCombine/InstructionCombining.cpp
===================================================================
--- lib/Transforms/InstCombine/InstructionCombining.cpp	(revision 165037)
+++ lib/Transforms/InstCombine/InstructionCombining.cpp	(working copy)
@@ -40,7 +40,7 @@
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Support/CFG.h"
@@ -88,7 +88,7 @@
 
 
 Value *InstCombiner::EmitGEPOffset(User *GEP) {
-  return llvm::EmitGEPOffset(Builder, *getTargetData(), GEP);
+  return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP);
 }
 
 /// ShouldChangeType - Return true if it is desirable to convert a computation
@@ -1854,7 +1854,7 @@
 static bool AddReachableCodeToWorklist(BasicBlock *BB,
                                        SmallPtrSet &Visited,
                                        InstCombiner &IC,
-                                       const TargetData *TD,
+                                       const DataLayout *TD,
                                        const TargetLibraryInfo *TLI) {
   bool MadeIRChange = false;
   SmallVector Worklist;
@@ -2120,7 +2120,7 @@
 
 
 bool InstCombiner::runOnFunction(Function &F) {
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
 
   /// Builder - This is an IRBuilder that automatically inserts new
Index: lib/Transforms/Instrumentation/AddressSanitizer.cpp
===================================================================
--- lib/Transforms/Instrumentation/AddressSanitizer.cpp	(revision 165037)
+++ lib/Transforms/Instrumentation/AddressSanitizer.cpp	(working copy)
@@ -35,7 +35,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/system_error.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Transforms/Instrumentation.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -207,7 +207,7 @@
   bool HasDynamicInitializer(GlobalVariable *G);
 
   LLVMContext *C;
-  TargetData *TD;
+  DataLayout *TD;
   uint64_t MappingOffset;
   int MappingScale;
   size_t RedzoneSize;
@@ -736,7 +736,7 @@
 // virtual
 bool AddressSanitizer::runOnModule(Module &M) {
   // Initialize the private fields. No one has accessed them before.
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   if (!TD)
     return false;
   BL.reset(new BlackList(ClBlackListFile));
Index: lib/Transforms/Instrumentation/BoundsChecking.cpp
===================================================================
--- lib/Transforms/Instrumentation/BoundsChecking.cpp	(revision 165037)
+++ lib/Transforms/Instrumentation/BoundsChecking.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/Support/InstIterator.h"
 #include "llvm/Support/TargetFolder.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Instrumentation.h"
 using namespace llvm;
@@ -48,12 +48,12 @@
     virtual bool runOnFunction(Function &F);
 
     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
-      AU.addRequired();
+      AU.addRequired();
       AU.addRequired();
     }
 
   private:
-    const TargetData *TD;
+    const DataLayout *TD;
     const TargetLibraryInfo *TLI;
     ObjectSizeOffsetEvaluator *ObjSizeEval;
     BuilderTy *Builder;
@@ -168,7 +168,7 @@
 }
 
 bool BoundsChecking::runOnFunction(Function &F) {
-  TD = &getAnalysis();
+  TD = &getAnalysis();
   TLI = &getAnalysis();
 
   TrapBB = 0;
Index: lib/Transforms/Instrumentation/ThreadSanitizer.cpp
===================================================================
--- lib/Transforms/Instrumentation/ThreadSanitizer.cpp	(revision 165037)
+++ lib/Transforms/Instrumentation/ThreadSanitizer.cpp	(working copy)
@@ -38,7 +38,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Instrumentation.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/ModuleUtils.h"
@@ -76,7 +76,7 @@
   bool addrPointsToConstantData(Value *Addr);
   int getMemoryAccessFuncIndex(Value *Addr);
 
-  TargetData *TD;
+  DataLayout *TD;
   OwningPtr BL;
   IntegerType *OrdTy;
   // Callbacks to run-time library are computed in doInitialization.
@@ -118,7 +118,7 @@
 }
 
 bool ThreadSanitizer::doInitialization(Module &M) {
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   if (!TD)
     return false;
   BL.reset(new BlackList(ClBlackListFile));
Index: lib/Transforms/IPO/ConstantMerge.cpp
===================================================================
--- lib/Transforms/IPO/ConstantMerge.cpp	(revision 165037)
+++ lib/Transforms/IPO/ConstantMerge.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/DerivedTypes.h"
 #include "llvm/Module.h"
 #include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/PointerIntPair.h"
 #include "llvm/ADT/SmallPtrSet.h"
@@ -50,7 +50,7 @@
     // alignment to a concrete value.
     unsigned getAlignment(GlobalVariable *GV) const;
 
-    const TargetData *TD;
+    const DataLayout *TD;
   };
 }
 
@@ -98,7 +98,7 @@
 }
 
 bool ConstantMerge::runOnModule(Module &M) {
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
 
   // Find all the globals that are marked "used".  These cannot be merged.
   SmallPtrSet UsedGlobals;
@@ -107,7 +107,7 @@
   
   // Map unique  pairs to globals.  We don't
   // want to merge globals of unknown alignment with those of explicit
-  // alignment.  If we have TargetData, we always know the alignment.
+  // alignment.  If we have DataLayout, we always know the alignment.
   DenseMap, GlobalVariable*> CMap;
 
   // Replacements - This vector contains a list of replacements to perform.
Index: lib/Transforms/IPO/GlobalOpt.cpp
===================================================================
--- lib/Transforms/IPO/GlobalOpt.cpp	(revision 165037)
+++ lib/Transforms/IPO/GlobalOpt.cpp	(working copy)
@@ -25,7 +25,7 @@
 #include "llvm/Pass.h"
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Support/CallSite.h"
 #include "llvm/Support/Debug.h"
@@ -83,7 +83,7 @@
                                const GlobalStatus &GS);
     bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
 
-    TargetData *TD;
+    DataLayout *TD;
     TargetLibraryInfo *TLI;
   };
 }
@@ -464,7 +464,7 @@
 /// quick scan over the use list to clean up the easy and obvious cruft.  This
 /// returns true if it made a change.
 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
-                                       TargetData *TD, TargetLibraryInfo *TLI) {
+                                       DataLayout *TD, TargetLibraryInfo *TLI) {
   bool Changed = false;
   for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
     User *U = *UI++;
@@ -656,7 +656,7 @@
 /// behavior of the program in a more fine-grained way.  We have determined that
 /// this transformation is safe already.  We return the first global variable we
 /// insert so that the caller can reprocess it.
-static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
+static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
   // Make sure this global only has simple uses that we can SRA.
   if (!GlobalUsersSafeToSRA(GV))
     return 0;
@@ -932,7 +932,7 @@
 /// if the loaded value is dynamically null, then we know that they cannot be
 /// reachable with a null optimize away the load.
 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
-                                            TargetData *TD,
+                                            DataLayout *TD,
                                             TargetLibraryInfo *TLI) {
   bool Changed = false;
 
@@ -996,7 +996,7 @@
 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
 /// instructions that are foldable.
 static void ConstantPropUsersOf(Value *V,
-                                TargetData *TD, TargetLibraryInfo *TLI) {
+                                DataLayout *TD, TargetLibraryInfo *TLI) {
   for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
     if (Instruction *I = dyn_cast(*UI++))
       if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
@@ -1019,7 +1019,7 @@
                                                      CallInst *CI,
                                                      Type *AllocTy,
                                                      ConstantInt *NElements,
-                                                     TargetData *TD,
+                                                     DataLayout *TD,
                                                      TargetLibraryInfo *TLI) {
   DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << "  CALL = " << *CI << '\n');
 
@@ -1468,7 +1468,7 @@
 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures.  Break
 /// it up into multiple allocations of arrays of the fields.
 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
-                                            Value *NElems, TargetData *TD,
+                                            Value *NElems, DataLayout *TD,
                                             const TargetLibraryInfo *TLI) {
   DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << "  MALLOC = " << *CI << '\n');
   Type *MAT = getMallocAllocatedType(CI, TLI);
@@ -1660,7 +1660,7 @@
                                                Type *AllocTy,
                                                AtomicOrdering Ordering,
                                                Module::global_iterator &GVI,
-                                               TargetData *TD,
+                                               DataLayout *TD,
                                                TargetLibraryInfo *TLI) {
   if (!TD)
     return false;
@@ -1759,7 +1759,7 @@
 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
                                      AtomicOrdering Ordering,
                                      Module::global_iterator &GVI,
-                                     TargetData *TD, TargetLibraryInfo *TLI) {
+                                     DataLayout *TD, TargetLibraryInfo *TLI) {
   // Ignore no-op GEPs and bitcasts.
   StoredOnceVal = StoredOnceVal->stripPointerCasts();
 
@@ -2002,7 +2002,7 @@
     ++NumMarked;
     return true;
   } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
-    if (TargetData *TD = getAnalysisIfAvailable())
+    if (DataLayout *TD = getAnalysisIfAvailable())
       if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
         GVI = FirstNewGV;  // Don't skip the newly produced globals!
         return true;
@@ -2256,7 +2256,7 @@
 static inline bool 
 isSimpleEnoughValueToCommit(Constant *C,
                             SmallPtrSet &SimpleConstants,
-                            const TargetData *TD);
+                            const DataLayout *TD);
 
 
 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be
@@ -2269,7 +2269,7 @@
 /// time.
 static bool isSimpleEnoughValueToCommitHelper(Constant *C,
                                    SmallPtrSet &SimpleConstants,
-                                   const TargetData *TD) {
+                                   const DataLayout *TD) {
   // Simple integer, undef, constant aggregate zero, global addresses, etc are
   // all supported.
   if (C->getNumOperands() == 0 || isa(C) ||
@@ -2324,7 +2324,7 @@
 static inline bool 
 isSimpleEnoughValueToCommit(Constant *C,
                             SmallPtrSet &SimpleConstants,
-                            const TargetData *TD) {
+                            const DataLayout *TD) {
   // If we already checked this constant, we win.
   if (!SimpleConstants.insert(C)) return true;
   // Check the constant.
@@ -2455,7 +2455,7 @@
 /// Once an evaluation call fails, the evaluation object should not be reused.
 class Evaluator {
 public:
-  Evaluator(const TargetData *TD, const TargetLibraryInfo *TLI)
+  Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI)
     : TD(TD), TLI(TLI) {
     ValueStack.push_back(new DenseMap);
   }
@@ -2536,7 +2536,7 @@
   /// simple enough to live in a static initializer of a global.
   SmallPtrSet SimpleConstants;
 
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLibraryInfo *TLI;
 };
 
@@ -2874,7 +2874,7 @@
 
 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if
 /// we can.  Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const TargetData *TD,
+static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,
                                       const TargetLibraryInfo *TLI) {
   // Call the function.
   Evaluator Eval(TD, TLI);
@@ -3115,7 +3115,7 @@
 bool GlobalOpt::runOnModule(Module &M) {
   bool Changed = false;
 
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
 
   // Try to find the llvm.globalctors list.
Index: lib/Transforms/IPO/InlineAlways.cpp
===================================================================
--- lib/Transforms/IPO/InlineAlways.cpp	(revision 165037)
+++ lib/Transforms/IPO/InlineAlways.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/Support/CallSite.h"
 #include "llvm/Transforms/IPO.h"
 #include "llvm/Transforms/IPO/InlinerPass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SmallPtrSet.h"
 
 using namespace llvm;
Index: lib/Transforms/IPO/Inliner.cpp
===================================================================
--- lib/Transforms/IPO/Inliner.cpp	(revision 165037)
+++ lib/Transforms/IPO/Inliner.cpp	(working copy)
@@ -19,7 +19,7 @@
 #include "llvm/IntrinsicInst.h"
 #include "llvm/Analysis/CallGraph.h"
 #include "llvm/Analysis/InlineCost.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/IPO/InlinerPass.h"
 #include "llvm/Transforms/Utils/Cloning.h"
@@ -340,7 +340,7 @@
 
 bool Inliner::runOnSCC(CallGraphSCC &SCC) {
   CallGraph &CG = getAnalysis();
-  const TargetData *TD = getAnalysisIfAvailable();
+  const DataLayout *TD = getAnalysisIfAvailable();
   const TargetLibraryInfo *TLI = getAnalysisIfAvailable();
 
   SmallPtrSet SCCFunctions;
Index: lib/Transforms/IPO/InlineSimple.cpp
===================================================================
--- lib/Transforms/IPO/InlineSimple.cpp	(revision 165037)
+++ lib/Transforms/IPO/InlineSimple.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/Support/CallSite.h"
 #include "llvm/Transforms/IPO.h"
 #include "llvm/Transforms/IPO/InlinerPass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 
 using namespace llvm;
 
@@ -62,7 +62,7 @@
 // doInitialization - Initializes the vector of functions that have been
 // annotated with the noinline attribute.
 bool SimpleInliner::doInitialization(CallGraph &CG) {
-  CA.setTargetData(getAnalysisIfAvailable());
+  CA.setDataLayout(getAnalysisIfAvailable());
   return false;
 }
 
Index: lib/Transforms/IPO/MergeFunctions.cpp
===================================================================
--- lib/Transforms/IPO/MergeFunctions.cpp	(revision 165037)
+++ lib/Transforms/IPO/MergeFunctions.cpp	(working copy)
@@ -63,7 +63,7 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/ValueHandle.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include 
 using namespace llvm;
 
@@ -92,19 +92,19 @@
 namespace {
 
 /// ComparableFunction - A struct that pairs together functions with a
-/// TargetData so that we can keep them together as elements in the DenseSet.
+/// DataLayout so that we can keep them together as elements in the DenseSet.
 class ComparableFunction {
 public:
   static const ComparableFunction EmptyKey;
   static const ComparableFunction TombstoneKey;
-  static TargetData * const LookupOnly;
+  static DataLayout * const LookupOnly;
 
-  ComparableFunction(Function *Func, TargetData *TD)
+  ComparableFunction(Function *Func, DataLayout *TD)
     : Func(Func), Hash(profileFunction(Func)), TD(TD) {}
 
   Function *getFunc() const { return Func; }
   unsigned getHash() const { return Hash; }
-  TargetData *getTD() const { return TD; }
+  DataLayout *getTD() const { return TD; }
 
   // Drops AssertingVH reference to the function. Outside of debug mode, this
   // does nothing.
@@ -120,13 +120,13 @@
 
   AssertingVH Func;
   unsigned Hash;
-  TargetData *TD;
+  DataLayout *TD;
 };
 
 const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
 const ComparableFunction ComparableFunction::TombstoneKey =
     ComparableFunction(1);
-TargetData *const ComparableFunction::LookupOnly = (TargetData*)(-1);
+DataLayout *const ComparableFunction::LookupOnly = (DataLayout*)(-1);
 
 }
 
@@ -150,12 +150,12 @@
 namespace {
 
 /// FunctionComparator - Compares two functions to determine whether or not
-/// they will generate machine code with the same behaviour. TargetData is
+/// they will generate machine code with the same behaviour. DataLayout is
 /// used if available. The comparator always fails conservatively (erring on the
 /// side of claiming that two functions are different).
 class FunctionComparator {
 public:
-  FunctionComparator(const TargetData *TD, const Function *F1,
+  FunctionComparator(const DataLayout *TD, const Function *F1,
                      const Function *F2)
     : F1(F1), F2(F2), TD(TD) {}
 
@@ -190,7 +190,7 @@
   // The two functions undergoing comparison.
   const Function *F1, *F2;
 
-  const TargetData *TD;
+  const DataLayout *TD;
 
   DenseMap id_map;
   DenseSet seen_values;
@@ -591,8 +591,8 @@
   /// to modify it.
   FnSetType FnSet;
 
-  /// TargetData for more accurate GEP comparisons. May be NULL.
-  TargetData *TD;
+  /// DataLayout for more accurate GEP comparisons. May be NULL.
+  DataLayout *TD;
 
   /// Whether or not the target supports global aliases.
   bool HasGlobalAliases;
@@ -609,7 +609,7 @@
 
 bool MergeFunctions::runOnModule(Module &M) {
   bool Changed = false;
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
 
   for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
     if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
Index: lib/Transforms/Scalar/CodeGenPrepare.cpp
===================================================================
--- lib/Transforms/Scalar/CodeGenPrepare.cpp	(revision 165037)
+++ lib/Transforms/Scalar/CodeGenPrepare.cpp	(working copy)
@@ -38,7 +38,7 @@
 #include "llvm/Support/PatternMatch.h"
 #include "llvm/Support/ValueHandle.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Transforms/Utils/AddrModeMatcher.h"
@@ -622,7 +622,7 @@
     // happens.
     WeakVH IterHandle(CurInstIterator);
 
-    replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0,
+    replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
                                   TLInfo, ModifiedDT ? 0 : DT);
 
     // If the iterator instruction was recursively deleted, start over at the
@@ -646,8 +646,8 @@
   // From here on out we're working with named functions.
   if (CI->getCalledFunction() == 0) return false;
 
-  // We'll need TargetData from here on out.
-  const TargetData *TD = TLI ? TLI->getTargetData() : 0;
+  // We'll need DataLayout from here on out.
+  const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
   if (!TD) return false;
 
   // Lower all default uses of _chk calls.  This is very similar
@@ -929,7 +929,7 @@
     DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
                  << *MemoryInst);
     Type *IntPtrTy =
-          TLI->getTargetData()->getIntPtrType(AccessTy->getContext());
+          TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
 
     Value *Result = 0;
 
Index: lib/Transforms/Scalar/ConstantProp.cpp
===================================================================
--- lib/Transforms/Scalar/ConstantProp.cpp	(revision 165037)
+++ lib/Transforms/Scalar/ConstantProp.cpp	(working copy)
@@ -24,7 +24,7 @@
 #include "llvm/Constant.h"
 #include "llvm/Instruction.h"
 #include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Support/InstIterator.h"
 #include "llvm/ADT/Statistic.h"
@@ -67,7 +67,7 @@
       WorkList.insert(&*i);
   }
   bool Changed = false;
-  TargetData *TD = getAnalysisIfAvailable();
+  DataLayout *TD = getAnalysisIfAvailable();
   TargetLibraryInfo *TLI = &getAnalysis();
 
   while (!WorkList.empty()) {
Index: lib/Transforms/Scalar/DeadStoreElimination.cpp
===================================================================
--- lib/Transforms/Scalar/DeadStoreElimination.cpp	(revision 165037)
+++ lib/Transforms/Scalar/DeadStoreElimination.cpp	(working copy)
@@ -29,9 +29,9 @@
 #include "llvm/Analysis/MemoryBuiltins.h"
 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
 #include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/Statistic.h"
@@ -199,7 +199,7 @@
     // If we don't have target data around, an unknown size in Location means
     // that we should use the size of the pointee type.  This isn't valid for
     // memset/memcpy, which writes more than an i8.
-    if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0)
+    if (Loc.Size == AliasAnalysis::UnknownSize && AA.getDataLayout() == 0)
       return AliasAnalysis::Location();
     return Loc;
   }
@@ -213,7 +213,7 @@
     // If we don't have target data around, an unknown size in Location means
     // that we should use the size of the pointee type.  This isn't valid for
     // init.trampoline, which writes more than an i8.
-    if (AA.getTargetData() == 0) return AliasAnalysis::Location();
+    if (AA.getDataLayout() == 0) return AliasAnalysis::Location();
 
     // FIXME: We don't know the size of the trampoline, so we can't really
     // handle it here.
@@ -318,7 +318,7 @@
 
 static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
   uint64_t Size;
-  if (getObjectSize(V, Size, AA.getTargetData(), AA.getTargetLibraryInfo()))
+  if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo()))
     return Size;
   return AliasAnalysis::UnknownSize;
 }
@@ -351,10 +351,10 @@
     // comparison.
     if (Later.Size == AliasAnalysis::UnknownSize ||
         Earlier.Size == AliasAnalysis::UnknownSize) {
-      // If we have no TargetData information around, then the size of the store
+      // If we have no DataLayout information around, then the size of the store
       // is inferrable from the pointee type.  If they are the same type, then
       // we know that the store is safe.
-      if (AA.getTargetData() == 0 &&
+      if (AA.getDataLayout() == 0 &&
           Later.Ptr->getType() == Earlier.Ptr->getType())
         return OverwriteComplete;
 
@@ -370,13 +370,13 @@
   // larger than the earlier one.
   if (Later.Size == AliasAnalysis::UnknownSize ||
       Earlier.Size == AliasAnalysis::UnknownSize ||
-      AA.getTargetData() == 0)
+      AA.getDataLayout() == 0)
     return OverwriteUnknown;
 
   // Check to see if the later store is to the entire object (either a global,
   // an alloca, or a byval argument).  If so, then it clearly overwrites any
   // other store to the same object.
-  const TargetData &TD = *AA.getTargetData();
+  const DataLayout &TD = *AA.getDataLayout();
 
   const Value *UO1 = GetUnderlyingObject(P1, &TD),
               *UO2 = GetUnderlyingObject(P2, &TD);
Index: lib/Transforms/Scalar/EarlyCSE.cpp
===================================================================
--- lib/Transforms/Scalar/EarlyCSE.cpp	(revision 165037)
+++ lib/Transforms/Scalar/EarlyCSE.cpp	(working copy)
@@ -18,7 +18,7 @@
 #include "llvm/Pass.h"
 #include "llvm/Analysis/Dominators.h"
 #include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Support/Debug.h"
@@ -216,7 +216,7 @@
 /// cases.
 class EarlyCSE : public FunctionPass {
 public:
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLibraryInfo *TLI;
   DominatorTree *DT;
   typedef RecyclingAllocator nodesToProcess;
 
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
   DT = &getAnalysis();
 
Index: lib/Transforms/Scalar/GlobalMerge.cpp
===================================================================
--- lib/Transforms/Scalar/GlobalMerge.cpp	(revision 165037)
+++ lib/Transforms/Scalar/GlobalMerge.cpp	(working copy)
@@ -62,7 +62,7 @@
 #include "llvm/Intrinsics.h"
 #include "llvm/Module.h"
 #include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetLoweringObjectFile.h"
 #include "llvm/ADT/Statistic.h"
@@ -98,9 +98,9 @@
     }
 
     struct GlobalCmp {
-      const TargetData *TD;
+      const DataLayout *TD;
 
-      GlobalCmp(const TargetData *td) : TD(td) { }
+      GlobalCmp(const DataLayout *td) : TD(td) { }
 
       bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
         Type *Ty1 = cast(GV1->getType())->getElementType();
@@ -119,7 +119,7 @@
 
 bool GlobalMerge::doMerge(SmallVectorImpl &Globals,
                              Module &M, bool isConst) const {
-  const TargetData *TD = TLI->getTargetData();
+  const DataLayout *TD = TLI->getDataLayout();
 
   // FIXME: Infer the maximum possible offset depending on the actual users
   // (these max offsets are different for the users inside Thumb or ARM
@@ -170,7 +170,7 @@
 
 bool GlobalMerge::doInitialization(Module &M) {
   SmallVector Globals, ConstGlobals, BSSGlobals;
-  const TargetData *TD = TLI->getTargetData();
+  const DataLayout *TD = TLI->getDataLayout();
   unsigned MaxOffset = TLI->getMaximalGlobalOffset();
   bool Changed = false;
 
Index: lib/Transforms/Scalar/GVN.cpp
===================================================================
--- lib/Transforms/Scalar/GVN.cpp	(revision 165037)
+++ lib/Transforms/Scalar/GVN.cpp	(working copy)
@@ -41,7 +41,7 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/PatternMatch.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -503,7 +503,7 @@
     bool NoLoads;
     MemoryDependenceAnalysis *MD;
     DominatorTree *DT;
-    const TargetData *TD;
+    const DataLayout *TD;
     const TargetLibraryInfo *TLI;
 
     ValueTable VN;
@@ -535,7 +535,7 @@
       InstrsToErase.push_back(I);
     }
 
-    const TargetData *getTargetData() const { return TD; }
+    const DataLayout *getDataLayout() const { return TD; }
     DominatorTree &getDominatorTree() const { return *DT; }
     AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
     MemoryDependenceAnalysis &getMemDep() const { return *MD; }
@@ -730,7 +730,7 @@
 /// CoerceAvailableValueToLoadType will succeed.
 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
                                             Type *LoadTy,
-                                            const TargetData &TD) {
+                                            const DataLayout &TD) {
   // If the loaded or stored value is an first class array or struct, don't try
   // to transform them.  We need to be able to bitcast to integer.
   if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
@@ -756,7 +756,7 @@
 static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
                                              Type *LoadedTy,
                                              Instruction *InsertPt,
-                                             const TargetData &TD) {
+                                             const DataLayout &TD) {
   if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
     return 0;
 
@@ -842,7 +842,7 @@
 static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
                                           Value *WritePtr,
                                           uint64_t WriteSizeInBits,
-                                          const TargetData &TD) {
+                                          const DataLayout &TD) {
   // If the loaded or stored value is a first class array or struct, don't try
   // to transform them.  We need to be able to bitcast to integer.
   if (LoadTy->isStructTy() || LoadTy->isArrayTy())
@@ -915,7 +915,7 @@
 /// memdep query of a load that ends up being a clobbering store.
 static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
                                           StoreInst *DepSI,
-                                          const TargetData &TD) {
+                                          const DataLayout &TD) {
   // Cannot handle reading from store of first-class aggregate yet.
   if (DepSI->getValueOperand()->getType()->isStructTy() ||
       DepSI->getValueOperand()->getType()->isArrayTy())
@@ -931,7 +931,7 @@
 /// memdep query of a load that ends up being clobbered by another load.  See if
 /// the other load can feed into the second load.
 static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
-                                         LoadInst *DepLI, const TargetData &TD){
+                                         LoadInst *DepLI, const DataLayout &TD){
   // Cannot handle reading from store of first-class aggregate yet.
   if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
     return -1;
@@ -959,7 +959,7 @@
 
 static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
                                             MemIntrinsic *MI,
-                                            const TargetData &TD) {
+                                            const DataLayout &TD) {
   // If the mem operation is a non-constant size, we can't handle it.
   ConstantInt *SizeCst = dyn_cast(MI->getLength());
   if (SizeCst == 0) return -1;
@@ -1009,7 +1009,7 @@
 /// before we give up.
 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
                                    Type *LoadTy,
-                                   Instruction *InsertPt, const TargetData &TD){
+                                   Instruction *InsertPt, const DataLayout &TD){
   LLVMContext &Ctx = SrcVal->getType()->getContext();
 
   uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
@@ -1048,7 +1048,7 @@
 static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
                                   Type *LoadTy, Instruction *InsertPt,
                                   GVN &gvn) {
-  const TargetData &TD = *gvn.getTargetData();
+  const DataLayout &TD = *gvn.getDataLayout();
   // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
   // widen SrcVal out to a larger load.
   unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());
@@ -1107,7 +1107,7 @@
 /// memdep query of a load that ends up being a clobbering mem intrinsic.
 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
                                      Type *LoadTy, Instruction *InsertPt,
-                                     const TargetData &TD){
+                                     const DataLayout &TD){
   LLVMContext &Ctx = LoadTy->getContext();
   uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
 
@@ -1231,7 +1231,7 @@
     if (isSimpleValue()) {
       Res = getSimpleValue();
       if (Res->getType() != LoadTy) {
-        const TargetData *TD = gvn.getTargetData();
+        const DataLayout *TD = gvn.getDataLayout();
         assert(TD && "Need target data to handle type mismatch case");
         Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
                                    *TD);
@@ -1253,7 +1253,7 @@
                      << *Res << '\n' << "\n\n\n");
       }
     } else {
-      const TargetData *TD = gvn.getTargetData();
+      const DataLayout *TD = gvn.getDataLayout();
       assert(TD && "Need target data to handle type mismatch case");
       Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
                                    LoadTy, BB->getTerminator(), *TD);
@@ -2295,7 +2295,7 @@
   if (!NoLoads)
     MD = &getAnalysis();
   DT = &getAnalysis();
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
   VN.setAliasAnalysis(&getAnalysis());
   VN.setMemDep(MD);
Index: lib/Transforms/Scalar/IndVarSimplify.cpp
===================================================================
--- lib/Transforms/Scalar/IndVarSimplify.cpp	(revision 165037)
+++ lib/Transforms/Scalar/IndVarSimplify.cpp	(working copy)
@@ -43,7 +43,7 @@
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallVector.h"
@@ -68,7 +68,7 @@
     LoopInfo        *LI;
     ScalarEvolution *SE;
     DominatorTree   *DT;
-    TargetData      *TD;
+    DataLayout      *TD;
     TargetLibraryInfo *TLI;
 
     SmallVector DeadInsts;
@@ -597,13 +597,13 @@
 
   class WideIVVisitor : public IVVisitor {
     ScalarEvolution *SE;
-    const TargetData *TD;
+    const DataLayout *TD;
 
   public:
     WideIVInfo WI;
 
     WideIVVisitor(PHINode *NarrowIV, ScalarEvolution *SCEV,
-                  const TargetData *TData) :
+                  const DataLayout *TData) :
       SE(SCEV), TD(TData) { WI.NarrowIV = NarrowIV; }
 
     // Implement the interface used by simplifyUsersOfIV.
@@ -1341,7 +1341,7 @@
 /// could at least handle constant BECounts.
 static PHINode *
 FindLoopCounter(Loop *L, const SCEV *BECount,
-                ScalarEvolution *SE, DominatorTree *DT, const TargetData *TD) {
+                ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) {
   uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
 
   Value *Cond =
@@ -1698,7 +1698,7 @@
   LI = &getAnalysis();
   SE = &getAnalysis();
   DT = &getAnalysis();
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = getAnalysisIfAvailable();
 
   DeadInsts.clear();
Index: lib/Transforms/Scalar/JumpThreading.cpp
===================================================================
--- lib/Transforms/Scalar/JumpThreading.cpp	(revision 165037)
+++ lib/Transforms/Scalar/JumpThreading.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
@@ -75,7 +75,7 @@
   /// revectored to the false side of the second if.
   ///
   class JumpThreading : public FunctionPass {
-    TargetData *TD;
+    DataLayout *TD;
     TargetLibraryInfo *TLI;
     LazyValueInfo *LVI;
 #ifdef NDEBUG
@@ -147,7 +147,7 @@
 ///
 bool JumpThreading::runOnFunction(Function &F) {
   DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
   LVI = &getAnalysis();
 
Index: lib/Transforms/Scalar/LICM.cpp
===================================================================
--- lib/Transforms/Scalar/LICM.cpp	(revision 165037)
+++ lib/Transforms/Scalar/LICM.cpp	(working copy)
@@ -46,7 +46,7 @@
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Utils/SSAUpdater.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Support/CFG.h"
 #include "llvm/Support/CommandLine.h"
@@ -100,7 +100,7 @@
     LoopInfo      *LI;       // Current LoopInfo
     DominatorTree *DT;       // Dominator Tree for the current Loop.
 
-    TargetData *TD;          // TargetData for constant folding.
+    DataLayout *TD;          // DataLayout for constant folding.
     TargetLibraryInfo *TLI;  // TargetLibraryInfo for constant folding.
 
     // State that is updated as we process loops.
@@ -207,7 +207,7 @@
   AA = &getAnalysis();
   DT = &getAnalysis();
 
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
 
   CurAST = new AliasSetTracker(*AA);
Index: lib/Transforms/Scalar/LoopIdiomRecognize.cpp
===================================================================
--- lib/Transforms/Scalar/LoopIdiomRecognize.cpp	(revision 165037)
+++ lib/Transforms/Scalar/LoopIdiomRecognize.cpp	(working copy)
@@ -54,7 +54,7 @@
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Utils/Local.h"
 using namespace llvm;
@@ -65,7 +65,7 @@
 namespace {
   class LoopIdiomRecognize : public LoopPass {
     Loop *CurLoop;
-    const TargetData *TD;
+    const DataLayout *TD;
     DominatorTree *DT;
     ScalarEvolution *SE;
     TargetLibraryInfo *TLI;
@@ -199,7 +199,7 @@
       return false;
 
   // We require target data for now.
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   if (TD == 0) return false;
 
   DT = &getAnalysis();
@@ -408,7 +408,7 @@
 ///
 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
 /// just replicate their input array and then pass on to memset_pattern16.
-static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) {
+static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) {
   // If the value isn't a constant, we can't promote it to being in a constant
   // array.  We could theoretically do a store to an alloca or something, but
   // that doesn't seem worthwhile.
Index: lib/Transforms/Scalar/LoopInstSimplify.cpp
===================================================================
--- lib/Transforms/Scalar/LoopInstSimplify.cpp	(revision 165037)
+++ lib/Transforms/Scalar/LoopInstSimplify.cpp	(working copy)
@@ -18,7 +18,7 @@
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Utils/Local.h"
@@ -66,7 +66,7 @@
 bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
   DominatorTree *DT = getAnalysisIfAvailable();
   LoopInfo *LI = &getAnalysis();
-  const TargetData *TD = getAnalysisIfAvailable();
+  const DataLayout *TD = getAnalysisIfAvailable();
   const TargetLibraryInfo *TLI = &getAnalysis();
 
   SmallVector ExitBlocks;
Index: lib/Transforms/Scalar/LoopUnrollPass.cpp
===================================================================
--- lib/Transforms/Scalar/LoopUnrollPass.cpp	(revision 165037)
+++ lib/Transforms/Scalar/LoopUnrollPass.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Transforms/Utils/UnrollLoop.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include 
 
 using namespace llvm;
@@ -113,7 +113,7 @@
 
 /// ApproximateLoopSize - Approximate the size of the loop.
 static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
-                                    const TargetData *TD) {
+                                    const DataLayout *TD) {
   CodeMetrics Metrics;
   for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
        I != E; ++I)
@@ -178,7 +178,7 @@
 
   // Enforce the threshold.
   if (Threshold != NoThreshold) {
-    const TargetData *TD = getAnalysisIfAvailable();
+    const DataLayout *TD = getAnalysisIfAvailable();
     unsigned NumInlineCandidates;
     unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates, TD);
     DEBUG(dbgs() << "  Loop Size = " << LoopSize << "\n");
Index: lib/Transforms/Scalar/MemCpyOptimizer.cpp
===================================================================
--- lib/Transforms/Scalar/MemCpyOptimizer.cpp	(revision 165037)
+++ lib/Transforms/Scalar/MemCpyOptimizer.cpp	(working copy)
@@ -27,7 +27,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include 
@@ -39,7 +39,7 @@
 STATISTIC(NumCpyToSet,    "Number of memcpys converted to memset");
 
 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
-                                  bool &VariableIdxFound, const TargetData &TD){
+                                  bool &VariableIdxFound, const DataLayout &TD){
   // Skip over the first indices.
   gep_type_iterator GTI = gep_type_begin(GEP);
   for (unsigned i = 1; i != Idx; ++i, ++GTI)
@@ -72,7 +72,7 @@
 /// constant offset, and return that constant offset.  For example, Ptr1 might
 /// be &A[42], and Ptr2 might be &A[40].  In this case offset would be -8.
 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
-                            const TargetData &TD) {
+                            const DataLayout &TD) {
   Ptr1 = Ptr1->stripPointerCasts();
   Ptr2 = Ptr2->stripPointerCasts();
   GEPOperator *GEP1 = dyn_cast(Ptr1);
@@ -141,12 +141,12 @@
   /// TheStores - The actual stores that make up this range.
   SmallVector TheStores;
 
-  bool isProfitableToUseMemset(const TargetData &TD) const;
+  bool isProfitableToUseMemset(const DataLayout &TD) const;
 
 };
 } // end anon namespace
 
-bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
+bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
   // If we found more than 4 stores to merge or 16 bytes, use memset.
   if (TheStores.size() >= 4 || End-Start >= 16) return true;
 
@@ -192,9 +192,9 @@
   /// because each element is relatively large and expensive to copy.
   std::list Ranges;
   typedef std::list::iterator range_iterator;
-  const TargetData &TD;
+  const DataLayout &TD;
 public:
-  MemsetRanges(const TargetData &td) : TD(td) {}
+  MemsetRanges(const DataLayout &td) : TD(td) {}
 
   typedef std::list::const_iterator const_iterator;
   const_iterator begin() const { return Ranges.begin(); }
@@ -302,7 +302,7 @@
   class MemCpyOpt : public FunctionPass {
     MemoryDependenceAnalysis *MD;
     TargetLibraryInfo *TLI;
-    const TargetData *TD;
+    const DataLayout *TD;
   public:
     static char ID; // Pass identification, replacement for typeid
     MemCpyOpt() : FunctionPass(ID) {
@@ -974,7 +974,7 @@
 bool MemCpyOpt::runOnFunction(Function &F) {
   bool MadeChange = false;
   MD = &getAnalysis();
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   TLI = &getAnalysis();
 
   // If we don't have at least memset and memcpy, there is little point of doing
Index: lib/Transforms/Scalar/Scalar.cpp
===================================================================
--- lib/Transforms/Scalar/Scalar.cpp	(revision 165037)
+++ lib/Transforms/Scalar/Scalar.cpp	(working copy)
@@ -19,7 +19,7 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Passes.h"
 #include "llvm/Analysis/Verifier.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Scalar.h"
 
 using namespace llvm;
Index: lib/Transforms/Scalar/ScalarReplAggregates.cpp
===================================================================
--- lib/Transforms/Scalar/ScalarReplAggregates.cpp	(revision 165037)
+++ lib/Transforms/Scalar/ScalarReplAggregates.cpp	(working copy)
@@ -46,7 +46,7 @@
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
 #include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -87,7 +87,7 @@
 
   private:
     bool HasDomTree;
-    TargetData *TD;
+    DataLayout *TD;
 
     /// DeadInsts - Keep track of instructions we have made dead, so that
     /// we can remove them after we are done working.
@@ -258,7 +258,7 @@
 class ConvertToScalarInfo {
   /// AllocaSize - The size of the alloca being considered in bytes.
   unsigned AllocaSize;
-  const TargetData &TD;
+  const DataLayout &TD;
   unsigned ScalarLoadThreshold;
 
   /// IsNotTrivial - This is set to true if there is some access to the object
@@ -301,7 +301,7 @@
   bool HadDynamicAccess;
 
 public:
-  explicit ConvertToScalarInfo(unsigned Size, const TargetData &td,
+  explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td,
                                unsigned SLT)
     : AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false),
     ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),
@@ -1020,11 +1020,11 @@
 
 
 bool SROA::runOnFunction(Function &F) {
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
 
   bool Changed = performPromotion(F);
 
-  // FIXME: ScalarRepl currently depends on TargetData more than it
+  // FIXME: ScalarRepl currently depends on DataLayout more than it
   // theoretically needs to. It should be refactored in order to support
   // target-independent IR. Until this is done, just skip the actual
   // scalar-replacement portion of this pass.
@@ -1134,7 +1134,7 @@
 ///
 /// We can do this to a select if its only uses are loads and if the operand to
 /// the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
+static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {
   bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
   bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
 
@@ -1172,7 +1172,7 @@
 ///
 /// We can do this to a select if its only uses are loads and if the operand to
 /// the select can be loaded unconditionally.
-static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
+static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
   // For now, we can only do this promotion if the load is in the same block as
   // the PHI, and if there are no stores between the phi and load.
   // TODO: Allow recursive phi users.
@@ -1236,7 +1236,7 @@
 /// direct (non-volatile) loads and stores to it.  If the alloca is close but
 /// not quite there, this will transform the code to allow promotion.  As such,
 /// it is a non-pure predicate.
-static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
+static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
   SetVector,
             SmallPtrSet > InstsToRewrite;
 
@@ -2537,7 +2537,7 @@
 /// HasPadding - Return true if the specified type has any structure or
 /// alignment padding in between the elements that would be split apart
 /// by SROA; return false otherwise.
-static bool HasPadding(Type *Ty, const TargetData &TD) {
+static bool HasPadding(Type *Ty, const DataLayout &TD) {
   if (ArrayType *ATy = dyn_cast(Ty)) {
     Ty = ATy->getElementType();
     return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
Index: lib/Transforms/Scalar/SCCP.cpp
===================================================================
--- lib/Transforms/Scalar/SCCP.cpp	(revision 165037)
+++ lib/Transforms/Scalar/SCCP.cpp	(working copy)
@@ -26,7 +26,7 @@
 #include "llvm/Pass.h"
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Support/CallSite.h"
 #include "llvm/Support/Debug.h"
@@ -153,7 +153,7 @@
 /// Constant Propagation.
 ///
 class SCCPSolver : public InstVisitor {
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLibraryInfo *TLI;
   SmallPtrSet BBExecutable; // The BBs that are executable.
   DenseMap ValueState;  // The state each value is in.
@@ -205,7 +205,7 @@
   typedef std::pair Edge;
   DenseSet KnownFeasibleEdges;
 public:
-  SCCPSolver(const TargetData *td, const TargetLibraryInfo *tli)
+  SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli)
     : TD(td), TLI(tli) {}
 
   /// MarkBlockExecutable - This method can be used by clients to mark all of
@@ -1564,7 +1564,7 @@
 //
 bool SCCP::runOnFunction(Function &F) {
   DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
-  const TargetData *TD = getAnalysisIfAvailable();
+  const DataLayout *TD = getAnalysisIfAvailable();
   const TargetLibraryInfo *TLI = &getAnalysis();
   SCCPSolver Solver(TD, TLI);
 
@@ -1693,7 +1693,7 @@
 }
 
 bool IPSCCP::runOnModule(Module &M) {
-  const TargetData *TD = getAnalysisIfAvailable();
+  const DataLayout *TD = getAnalysisIfAvailable();
   const TargetLibraryInfo *TLI = &getAnalysis();
   SCCPSolver Solver(TD, TLI);
 
Index: lib/Transforms/Scalar/SimplifyCFGPass.cpp
===================================================================
--- lib/Transforms/Scalar/SimplifyCFGPass.cpp	(revision 165037)
+++ lib/Transforms/Scalar/SimplifyCFGPass.cpp	(working copy)
@@ -31,7 +31,7 @@
 #include "llvm/Attributes.h"
 #include "llvm/Support/CFG.h"
 #include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/Statistic.h"
@@ -293,7 +293,7 @@
 
 /// iterativelySimplifyCFG - Call SimplifyCFG on all the blocks in the function,
 /// iterating until no more changes are made.
-static bool iterativelySimplifyCFG(Function &F, const TargetData *TD) {
+static bool iterativelySimplifyCFG(Function &F, const DataLayout *TD) {
   bool Changed = false;
   bool LocalChange = true;
   while (LocalChange) {
@@ -316,7 +316,7 @@
 // simplify the CFG.
 //
 bool CFGSimplifyPass::runOnFunction(Function &F) {
-  const TargetData *TD = getAnalysisIfAvailable();
+  const DataLayout *TD = getAnalysisIfAvailable();
   bool EverChanged = removeUnreachableBlocksFromFn(F);
   EverChanged |= mergeEmptyReturnBlocks(F);
   EverChanged |= iterativelySimplifyCFG(F, TD);
Index: lib/Transforms/Scalar/SimplifyLibCalls.cpp
===================================================================
--- lib/Transforms/Scalar/SimplifyLibCalls.cpp	(revision 165037)
+++ lib/Transforms/Scalar/SimplifyLibCalls.cpp	(working copy)
@@ -31,7 +31,7 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Config/config.h"            // FIXME: Shouldn't depend on host!
 using namespace llvm;
@@ -53,7 +53,7 @@
 class LibCallOptimization {
 protected:
   Function *Caller;
-  const TargetData *TD;
+  const DataLayout *TD;
   const TargetLibraryInfo *TLI;
   LLVMContext* Context;
 public:
@@ -68,7 +68,7 @@
   virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B)
     =0;
 
-  Value *OptimizeCall(CallInst *CI, const TargetData *TD,
+  Value *OptimizeCall(CallInst *CI, const DataLayout *TD,
                       const TargetLibraryInfo *TLI, IRBuilder<> &B) {
     Caller = CI->getParent()->getParent();
     this->TD = TD;
@@ -159,7 +159,7 @@
     if (Len == 0)
       return Dst;
 
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     return EmitStrLenMemCpy(Src, Dst, Len, B);
@@ -220,7 +220,7 @@
     // strncat(x,  c, 0) -> x
     if (SrcLen == 0 || Len == 0) return Dst;
 
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     // We don't optimize this case
@@ -251,7 +251,7 @@
     // of the input string and turn this into memchr.
     ConstantInt *CharC = dyn_cast(CI->getArgOperand(1));
     if (CharC == 0) {
-      // These optimizations require TargetData.
+      // These optimizations require DataLayout.
       if (!TD) return 0;
 
       uint64_t Len = GetStringLength(SrcStr);
@@ -356,7 +356,7 @@
     uint64_t Len1 = GetStringLength(Str1P);
     uint64_t Len2 = GetStringLength(Str2P);
     if (Len1 && Len2) {
-      // These optimizations require TargetData.
+      // These optimizations require DataLayout.
       if (!TD) return 0;
 
       return EmitMemCmp(Str1P, Str2P,
@@ -444,7 +444,7 @@
     if (Dst == Src)      // strcpy(x,x)  -> x
       return Src;
 
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     // See if we can get the length of the input string.
@@ -481,7 +481,7 @@
         FT->getParamType(0) != B.getInt8PtrTy())
       return 0;
 
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
@@ -543,7 +543,7 @@
 
     if (Len == 0) return Dst; // strncpy(x, y, 0) -> x
 
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     // Let strncpy handle the zero padding
@@ -832,7 +832,7 @@
 
 struct MemCpyOpt : public LibCallOptimization {
   virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     FunctionType *FT = Callee->getFunctionType();
@@ -854,7 +854,7 @@
 
 struct MemMoveOpt : public LibCallOptimization {
   virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     FunctionType *FT = Callee->getFunctionType();
@@ -876,7 +876,7 @@
 
 struct MemSetOpt : public LibCallOptimization {
   virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     FunctionType *FT = Callee->getFunctionType();
@@ -1304,7 +1304,7 @@
         if (FormatStr[i] == '%')
           return 0; // we found a format specifier, bail out.
 
-      // These optimizations require TargetData.
+      // These optimizations require DataLayout.
       if (!TD) return 0;
 
       // sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
@@ -1334,7 +1334,7 @@
     }
 
     if (FormatStr[1] == 's') {
-      // These optimizations require TargetData.
+      // These optimizations require DataLayout.
       if (!TD) return 0;
 
       // sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
@@ -1422,7 +1422,7 @@
 
 struct FPutsOpt : public LibCallOptimization {
   virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
-    // These optimizations require TargetData.
+    // These optimizations require DataLayout.
     if (!TD) return 0;
 
     // Require two pointers.  Also, we can't optimize if return value is used.
@@ -1459,7 +1459,7 @@
         if (FormatStr[i] == '%')  // Could handle %% -> % if we cared.
           return 0; // We found a format specifier.
 
-      // These optimizations require TargetData.
+      // These optimizations require DataLayout.
       if (!TD) return 0;
 
       Value *NewCI = EmitFWrite(CI->getArgOperand(1),
@@ -1749,7 +1749,7 @@
   if (Optimizations.empty())
     InitOptimizations();
 
-  const TargetData *TD = getAnalysisIfAvailable();
+  const DataLayout *TD = getAnalysisIfAvailable();
 
   IRBuilder<> Builder(F.getContext());
 
Index: lib/Transforms/Scalar/SROA.cpp
===================================================================
--- lib/Transforms/Scalar/SROA.cpp	(revision 165037)
+++ lib/Transforms/Scalar/SROA.cpp	(working copy)
@@ -51,7 +51,7 @@
 #include "llvm/Support/InstVisitor.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
 #include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -166,7 +166,7 @@
   ///
   /// Construction does most of the work for partitioning the alloca. This
   /// performs the necessary walks of users and builds a partitioning from it.
-  AllocaPartitioning(const TargetData &TD, AllocaInst &AI);
+  AllocaPartitioning(const DataLayout &TD, AllocaInst &AI);
 
   /// \brief Test whether a pointer to the allocation escapes our analysis.
   ///
@@ -386,7 +386,7 @@
 class AllocaPartitioning::BuilderBase
     : public InstVisitor {
 public:
-  BuilderBase(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+  BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
       : TD(TD),
         AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
         P(P) {
@@ -394,7 +394,7 @@
   }
 
 protected:
-  const TargetData &TD;
+  const DataLayout &TD;
   const uint64_t AllocSize;
   AllocaPartitioning &P;
 
@@ -495,7 +495,7 @@
   SmallDenseMap MemTransferPartitionMap;
 
 public:
-  PartitionBuilder(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+  PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
       : BuilderBase(TD, AI, P) {}
 
   /// \brief Run the builder over the allocation.
@@ -808,7 +808,7 @@
   SmallPtrSet VisitedDeadInsts;
 
 public:
-  UseBuilder(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+  UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
       : BuilderBase(TD, AI, P) {}
 
   /// \brief Run the builder over the allocation.
@@ -1067,7 +1067,7 @@
   Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end());
 }
 
-AllocaPartitioning::AllocaPartitioning(const TargetData &TD, AllocaInst &AI)
+AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
     :
 #ifndef NDEBUG
       AI(AI),
@@ -1295,7 +1295,7 @@
   const bool RequiresDomTree;
 
   LLVMContext *C;
-  const TargetData *TD;
+  const DataLayout *TD;
   DominatorTree *DT;
 
   /// \brief Worklist of alloca instructions to simplify.
@@ -1363,7 +1363,7 @@
 /// If the provided GEP is all-constant, the total byte offset formed by the
 /// GEP is computed and Offset is set to it. If the GEP has any non-constant
 /// operands, the function returns false and the value of Offset is unmodified.
-static bool accumulateGEPOffsets(const TargetData &TD, GEPOperator &GEP,
+static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP,
                                  APInt &Offset) {
   APInt GEPOffset(Offset.getBitWidth(), 0);
   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
@@ -1423,7 +1423,7 @@
 /// TargetTy. If we can't find one with the same type, we at least try to use
 /// one with the same size. If none of that works, we just produce the GEP as
 /// indicated by Indices to have the correct offset.
-static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD,
                                     Value *BasePtr, Type *Ty, Type *TargetTy,
                                     SmallVectorImpl &Indices,
                                     const Twine &Prefix) {
@@ -1458,7 +1458,7 @@
 ///
 /// This is the recursive step for getNaturalGEPWithOffset that walks down the
 /// element types adding appropriate indices for the GEP.
-static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD,
                                        Value *Ptr, Type *Ty, APInt &Offset,
                                        Type *TargetTy,
                                        SmallVectorImpl &Indices,
@@ -1529,7 +1529,7 @@
 /// Indices, and setting Ty to the result subtype.
 ///
 /// If no natural GEP can be constructed, this function returns null.
-static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD,
                                       Value *Ptr, APInt Offset, Type *TargetTy,
                                       SmallVectorImpl &Indices,
                                       const Twine &Prefix) {
@@ -1569,7 +1569,7 @@
 /// properities. The algorithm tries to fold as many constant indices into
 /// a single GEP as possible, thus making each GEP more independent of the
 /// surrounding code.
-static Value *getAdjustedPtr(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD,
                              Value *Ptr, APInt Offset, Type *PointerTy,
                              const Twine &Prefix) {
   // Even though we don't look through PHI nodes, we could be called on an
@@ -1665,7 +1665,7 @@
 /// SSA value. We only can ensure this for a limited set of operations, and we
 /// don't want to do the rewrites unless we are confident that the result will
 /// be promotable, so we have an early test here.
-static bool isVectorPromotionViable(const TargetData &TD,
+static bool isVectorPromotionViable(const DataLayout &TD,
                                     Type *AllocaTy,
                                     AllocaPartitioning &P,
                                     uint64_t PartitionBeginOffset,
@@ -1735,7 +1735,7 @@
 /// promotion to an SSA value. We only can ensure this for a limited set of
 /// operations, and we don't want to do the rewrites unless we are confident
 /// that the result will be promotable, so we have an early test here.
-static bool isIntegerPromotionViable(const TargetData &TD,
+static bool isIntegerPromotionViable(const DataLayout &TD,
                                      Type *AllocaTy,
                                      AllocaPartitioning &P,
                                      AllocaPartitioning::const_use_iterator I,
@@ -1784,12 +1784,12 @@
   // Befriend the base class so it can delegate to private visit methods.
   friend class llvm::InstVisitor;
 
-  const TargetData &TD;
+  const DataLayout &TD;
   AllocaPartitioning &P;
   SROA &Pass;
 
 public:
-  PHIOrSelectSpeculator(const TargetData &TD, AllocaPartitioning &P, SROA &Pass)
+  PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass)
     : TD(TD), P(P), Pass(Pass) {}
 
   /// \brief Visit the users of an alloca partition and rewrite them.
@@ -2071,7 +2071,7 @@
   // Befriend the base class so it can delegate to private visit methods.
   friend class llvm::InstVisitor;
 
-  const TargetData &TD;
+  const DataLayout &TD;
   AllocaPartitioning &P;
   SROA &Pass;
   AllocaInst &OldAI, &NewAI;
@@ -2105,7 +2105,7 @@
   std::string NamePrefix;
 
 public:
-  AllocaPartitionRewriter(const TargetData &TD, AllocaPartitioning &P,
+  AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P,
                           AllocaPartitioning::iterator PI,
                           SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
                           uint64_t NewBeginOffset, uint64_t NewEndOffset)
@@ -2682,7 +2682,7 @@
   // Befriend the base class so it can delegate to private visit methods.
   friend class llvm::InstVisitor;
 
-  const TargetData &TD;
+  const DataLayout &TD;
 
   /// Queue of pointer uses to analyze and potentially rewrite.
   SmallVector Queue;
@@ -2695,7 +2695,7 @@
   Use *U;
 
 public:
-  AggLoadStoreRewriter(const TargetData &TD) : TD(TD) {}
+  AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {}
 
   /// Rewrite loads and stores through a pointer and all pointers derived from
   /// it.
@@ -2895,7 +2895,7 @@
 /// when the size or offset cause either end of type-based partition to be off.
 /// Also, this is a best-effort routine. It is reasonable to give up and not
 /// return a type if necessary.
-static Type *getTypePartition(const TargetData &TD, Type *Ty,
+static Type *getTypePartition(const DataLayout &TD, Type *Ty,
                               uint64_t Offset, uint64_t Size) {
   if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
     return Ty;
@@ -3281,7 +3281,7 @@
 bool SROA::runOnFunction(Function &F) {
   DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
   C = &F.getContext();
-  TD = getAnalysisIfAvailable();
+  TD = getAnalysisIfAvailable();
   if (!TD) {
     DEBUG(dbgs() << "  Skipping SROA -- no target data!\n");
     return false;
Index: lib/Transforms/Utils/AddrModeMatcher.cpp
===================================================================
--- lib/Transforms/Utils/AddrModeMatcher.cpp	(revision 165037)
+++ lib/Transforms/Utils/AddrModeMatcher.cpp	(working copy)
@@ -16,7 +16,7 @@
 #include "llvm/GlobalValue.h"
 #include "llvm/Instruction.h"
 #include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/PatternMatch.h"
@@ -221,7 +221,7 @@
     unsigned VariableScale = 0;
     
     int64_t ConstantOffset = 0;
-    const TargetData *TD = TLI.getTargetData();
+    const DataLayout *TD = TLI.getDataLayout();
     gep_type_iterator GTI = gep_type_begin(AddrInst);
     for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
       if (StructType *STy = dyn_cast(*GTI)) {
Index: lib/Transforms/Utils/BasicBlockUtils.cpp
===================================================================
--- lib/Transforms/Utils/BasicBlockUtils.cpp	(revision 165037)
+++ lib/Transforms/Utils/BasicBlockUtils.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/Analysis/Dominators.h"
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Support/ErrorHandling.h"
Index: lib/Transforms/Utils/BuildLibCalls.cpp
===================================================================
--- lib/Transforms/Utils/BuildLibCalls.cpp	(revision 165037)
+++ lib/Transforms/Utils/BuildLibCalls.cpp	(working copy)
@@ -22,7 +22,7 @@
 #include "llvm/Module.h"
 #include "llvm/Type.h"
 #include "llvm/ADT/SmallString.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 
 using namespace llvm;
@@ -34,7 +34,7 @@
 
 /// EmitStrLen - Emit a call to the strlen function to the builder, for the
 /// specified pointer.  This always returns an integer value of size intptr_t.
-Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD,
+Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
                         const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::strlen))
     return 0;
@@ -61,7 +61,7 @@
 /// specified pointer.  Ptr is required to be some pointer type, MaxLen must
 /// be of size_t type, and the return value has 'intptr_t' type.
 Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
-                         const TargetData *TD, const TargetLibraryInfo *TLI) {
+                         const DataLayout *TD, const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::strnlen))
     return 0;
 
@@ -88,7 +88,7 @@
 /// specified pointer and character.  Ptr is required to be some pointer type,
 /// and the return value has 'i8*' type.
 Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
-                        const TargetData *TD, const TargetLibraryInfo *TLI) {
+                        const DataLayout *TD, const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::strchr))
     return 0;
 
@@ -109,7 +109,7 @@
 
 /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
 Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
-                         IRBuilder<> &B, const TargetData *TD,
+                         IRBuilder<> &B, const DataLayout *TD,
                          const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::strncmp))
     return 0;
@@ -139,7 +139,7 @@
 /// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
 /// specified pointer arguments.
 Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
-                        const TargetData *TD, const TargetLibraryInfo *TLI,
+                        const DataLayout *TD, const TargetLibraryInfo *TLI,
                         StringRef Name) {
   if (!TLI->has(LibFunc::strcpy))
     return 0;
@@ -161,7 +161,7 @@
 /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
 /// specified pointer arguments.
 Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
-                         IRBuilder<> &B, const TargetData *TD,
+                         IRBuilder<> &B, const DataLayout *TD,
                          const TargetLibraryInfo *TLI, StringRef Name) {
   if (!TLI->has(LibFunc::strncpy))
     return 0;
@@ -185,7 +185,7 @@
 /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
 /// are pointers.
 Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
-                           IRBuilder<> &B, const TargetData *TD,
+                           IRBuilder<> &B, const DataLayout *TD,
                            const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::memcpy_chk))
     return 0;
@@ -212,7 +212,7 @@
 /// EmitMemChr - Emit a call to the memchr function.  This assumes that Ptr is
 /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
 Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
-                        Value *Len, IRBuilder<> &B, const TargetData *TD,
+                        Value *Len, IRBuilder<> &B, const DataLayout *TD,
                         const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::memchr))
     return 0;
@@ -237,7 +237,7 @@
 
 /// EmitMemCmp - Emit a call to the memcmp function.
 Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
-                        Value *Len, IRBuilder<> &B, const TargetData *TD,
+                        Value *Len, IRBuilder<> &B, const DataLayout *TD,
                         const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::memcmp))
     return 0;
@@ -294,7 +294,7 @@
 
 /// EmitPutChar - Emit a call to the putchar function.  This assumes that Char
 /// is an integer.
-Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD,
+Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
                          const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::putchar))
     return 0;
@@ -316,7 +316,7 @@
 
 /// EmitPutS - Emit a call to the puts function.  This assumes that Str is
 /// some pointer.
-Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD,
+Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
                       const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::puts))
     return 0;
@@ -339,7 +339,7 @@
 /// EmitFPutC - Emit a call to the fputc function.  This assumes that Char is
 /// an integer and File is a pointer to FILE.
 Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
-                       const TargetData *TD, const TargetLibraryInfo *TLI) {
+                       const DataLayout *TD, const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::fputc))
     return 0;
 
@@ -370,7 +370,7 @@
 /// EmitFPutS - Emit a call to the puts function.  Str is required to be a
 /// pointer and File is a pointer to FILE.
 Value *llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
-                       const TargetData *TD, const TargetLibraryInfo *TLI) {
+                       const DataLayout *TD, const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::fputs))
     return 0;
 
@@ -400,7 +400,7 @@
 /// EmitFWrite - Emit a call to the fwrite function.  This assumes that Ptr is
 /// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
 Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
-                        IRBuilder<> &B, const TargetData *TD,
+                        IRBuilder<> &B, const DataLayout *TD,
                         const TargetLibraryInfo *TLI) {
   if (!TLI->has(LibFunc::fwrite))
     return 0;
@@ -436,9 +436,9 @@
 
 SimplifyFortifiedLibCalls::~SimplifyFortifiedLibCalls() { }
 
-bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD,
+bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
                                      const TargetLibraryInfo *TLI) {
-  // We really need TargetData for later.
+  // We really need DataLayout for later.
   if (!TD) return false;
   
   this->CI = CI;
Index: lib/Transforms/Utils/CloneFunction.cpp
===================================================================
--- lib/Transforms/Utils/CloneFunction.cpp	(revision 165037)
+++ lib/Transforms/Utils/CloneFunction.cpp	(working copy)
@@ -202,14 +202,14 @@
     bool ModuleLevelChanges;
     const char *NameSuffix;
     ClonedCodeInfo *CodeInfo;
-    const TargetData *TD;
+    const DataLayout *TD;
   public:
     PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
                           ValueToValueMapTy &valueMap,
                           bool moduleLevelChanges,
                           const char *nameSuffix, 
                           ClonedCodeInfo *codeInfo,
-                          const TargetData *td)
+                          const DataLayout *td)
     : NewFunc(newFunc), OldFunc(oldFunc),
       VMap(valueMap), ModuleLevelChanges(moduleLevelChanges),
       NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
@@ -365,7 +365,7 @@
                                      SmallVectorImpl &Returns,
                                      const char *NameSuffix, 
                                      ClonedCodeInfo *CodeInfo,
-                                     const TargetData *TD,
+                                     const DataLayout *TD,
                                      Instruction *TheCall) {
   assert(NameSuffix && "NameSuffix cannot be null!");
   
Index: lib/Transforms/Utils/InlineFunction.cpp
===================================================================
--- lib/Transforms/Utils/InlineFunction.cpp	(revision 165037)
+++ lib/Transforms/Utils/InlineFunction.cpp	(working copy)
@@ -27,7 +27,7 @@
 #include "llvm/Analysis/CallGraph.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Support/CallSite.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Utils/Local.h"
 using namespace llvm;
 
@@ -357,7 +357,7 @@
 
   Type *VoidPtrTy = Type::getInt8PtrTy(Context);
   
-  // Create the alloca.  If we have TargetData, use nice alignment.
+  // Create the alloca.  If we have DataLayout, use nice alignment.
   unsigned Align = 1;
   if (IFI.TD)
     Align = IFI.TD->getPrefTypeAlignment(AggTy);
Index: lib/Transforms/Utils/Local.cpp
===================================================================
--- lib/Transforms/Utils/Local.cpp	(revision 165037)
+++ lib/Transforms/Utils/Local.cpp	(working copy)
@@ -39,7 +39,7 @@
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/ValueHandle.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 using namespace llvm;
 
 //===----------------------------------------------------------------------===//
@@ -397,7 +397,7 @@
 ///
 /// This returns true if it changed the code, note that it can delete
 /// instructions in other blocks as well in this block.
-bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD,
+bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD,
                                        const TargetLibraryInfo *TLI) {
   bool MadeChange = false;
 
@@ -445,7 +445,7 @@
 /// .. and delete the predecessor corresponding to the '1', this will attempt to
 /// recursively fold the and to 0.
 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
-                                        TargetData *TD) {
+                                        DataLayout *TD) {
   // This only adjusts blocks with PHI nodes.
   if (!isa(BB->begin()))
     return;
@@ -760,7 +760,7 @@
 /// their preferred alignment from the beginning.
 ///
 static unsigned enforceKnownAlignment(Value *V, unsigned Align,
-                                      unsigned PrefAlign, const TargetData *TD) {
+                                      unsigned PrefAlign, const DataLayout *TD) {
   V = V->stripPointerCasts();
 
   if (AllocaInst *AI = dyn_cast(V)) {
@@ -803,7 +803,7 @@
 /// and it is more than the alignment of the ultimate object, see if we can
 /// increase the alignment of the ultimate object, making this check succeed.
 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
-                                          const TargetData *TD) {
+                                          const DataLayout *TD) {
   assert(V->getType()->isPointerTy() &&
          "getOrEnforceKnownAlignment expects a pointer!");
   unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
Index: lib/Transforms/Utils/SimplifyCFG.cpp
===================================================================
--- lib/Transforms/Utils/SimplifyCFG.cpp	(revision 165037)
+++ lib/Transforms/Utils/SimplifyCFG.cpp	(working copy)
@@ -39,7 +39,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/NoFolder.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include 
 #include 
@@ -79,7 +79,7 @@
   };
 
 class SimplifyCFGOpt {
-  const TargetData *const TD;
+  const DataLayout *const TD;
 
   Value *isValueEqualityComparison(TerminatorInst *TI);
   BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,
@@ -99,7 +99,7 @@
   bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder);
 
 public:
-  explicit SimplifyCFGOpt(const TargetData *td) : TD(td) {}
+  explicit SimplifyCFGOpt(const DataLayout *td) : TD(td) {}
   bool run(BasicBlock *BB);
 };
 }
@@ -382,7 +382,7 @@
 
 /// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr
 /// and PointerNullValue. Return NULL if value is not a constant int.
-static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) {
+static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
   // Normal constant int.
   ConstantInt *CI = dyn_cast(V);
   if (CI || !TD || !isa(V) || !V->getType()->isPointerTy())
@@ -416,7 +416,7 @@
 /// Values vector.
 static Value *
 GatherConstantCompares(Value *V, std::vector &Vals, Value *&Extra,
-                       const TargetData *TD, bool isEQ, unsigned &UsedICmps) {
+                       const DataLayout *TD, bool isEQ, unsigned &UsedICmps) {
   Instruction *I = dyn_cast(V);
   if (I == 0) return 0;
 
@@ -982,7 +982,7 @@
       Builder.SetInsertPoint(PTI);
       // Convert pointer to int before we switch.
       if (CV->getType()->isPointerTy()) {
-        assert(TD && "Cannot switch on pointer without TargetData");
+        assert(TD && "Cannot switch on pointer without DataLayout");
         CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getContext()),
                                     "magicptr");
       }
@@ -1510,7 +1510,7 @@
 /// that is defined in the same block as the branch and if any PHI entries are
 /// constants, thread edges corresponding to that entry to be branches to their
 /// ultimate destination.
-static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) {
+static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
   BasicBlock *BB = BI->getParent();
   PHINode *PN = dyn_cast(BI->getCondition());
   // NOTE: we currently cannot transform this case if the PHI node is used
@@ -1606,7 +1606,7 @@
 
 /// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry
 /// PHI node, see if we can eliminate it.
-static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) {
+static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
   // Ok, this is a two entry PHI node.  Check to see if this is a simple "if
   // statement", which has a very simple dominance structure.  Basically, we
   // are trying to find the condition that is being branched on, which
@@ -2522,7 +2522,7 @@
 /// We prefer to split the edge to 'end' so that there is a true/false entry to
 /// the PHI, merging the third icmp into the switch.
 static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI,
-                                                  const TargetData *TD,
+                                                  const DataLayout *TD,
                                                   IRBuilder<> &Builder) {
   BasicBlock *BB = ICI->getParent();
 
@@ -2628,7 +2628,7 @@
 /// SimplifyBranchOnICmpChain - The specified branch is a conditional branch.
 /// Check to see if it is branching on an or/and chain of icmp instructions, and
 /// fold it into a switch instruction if so.
-static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD,
+static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
                                       IRBuilder<> &Builder) {
   Instruction *Cond = dyn_cast(BI->getCondition());
   if (Cond == 0) return false;
@@ -2709,7 +2709,7 @@
   Builder.SetInsertPoint(BI);
   // Convert pointer to int before we switch.
   if (CompVal->getType()->isPointerTy()) {
-    assert(TD && "Cannot switch on pointer without TargetData");
+    assert(TD && "Cannot switch on pointer without DataLayout");
     CompVal = Builder.CreatePtrToInt(CompVal,
                                      TD->getIntPtrType(CompVal->getContext()),
                                      "magicptr");
@@ -3258,7 +3258,7 @@
                       ConstantInt *Offset,
                const SmallVector, 4>& Values,
                       Constant *DefaultValue,
-                      const TargetData *TD);
+                      const DataLayout *TD);
 
     /// BuildLookup - Build instructions with Builder to retrieve the value at
     /// the position given by Index in the lookup table.
@@ -3266,7 +3266,7 @@
 
     /// WouldFitInRegister - Return true if a table with TableSize elements of
     /// type ElementType would fit in a target-legal register.
-    static bool WouldFitInRegister(const TargetData *TD,
+    static bool WouldFitInRegister(const DataLayout *TD,
                                    uint64_t TableSize,
                                    const Type *ElementType);
 
@@ -3305,7 +3305,7 @@
                                      ConstantInt *Offset,
                const SmallVector, 4>& Values,
                                      Constant *DefaultValue,
-                                     const TargetData *TD) {
+                                     const DataLayout *TD) {
   assert(Values.size() && "Can't build lookup table without values!");
   assert(TableSize >= Values.size() && "Can't fit values in table!");
 
@@ -3411,7 +3411,7 @@
   llvm_unreachable("Unknown lookup table kind!");
 }
 
-bool SwitchLookupTable::WouldFitInRegister(const TargetData *TD,
+bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
                                            uint64_t TableSize,
                                            const Type *ElementType) {
   if (!TD)
@@ -3433,7 +3433,7 @@
 /// types of the results.
 static bool ShouldBuildLookupTable(SwitchInst *SI,
                                    uint64_t TableSize,
-                                   const TargetData *TD,
+                                   const DataLayout *TD,
                             const SmallDenseMap& ResultTypes) {
   // The table density should be at least 40%. This is the same criterion as for
   // jump tables, see SelectionDAGBuilder::handleJTSwitchCase.
@@ -3457,7 +3457,7 @@
 /// replace the switch with lookup tables.
 static bool SwitchToLookupTable(SwitchInst *SI,
                                 IRBuilder<> &Builder,
-                                const TargetData* TD) {
+                                const DataLayout* TD) {
   assert(SI->getNumCases() > 1 && "Degenerate switch?");
   // FIXME: Handle unreachable cases.
 
@@ -3915,6 +3915,6 @@
 /// eliminates unreachable basic blocks, and does other "peephole" optimization
 /// of the CFG.  It returns true if a modification was made.
 ///
-bool llvm::SimplifyCFG(BasicBlock *BB, const TargetData *TD) {
+bool llvm::SimplifyCFG(BasicBlock *BB, const DataLayout *TD) {
   return SimplifyCFGOpt(TD).run(BB);
 }
Index: lib/Transforms/Utils/SimplifyIndVar.cpp
===================================================================
--- lib/Transforms/Utils/SimplifyIndVar.cpp	(revision 165037)
+++ lib/Transforms/Utils/SimplifyIndVar.cpp	(working copy)
@@ -24,7 +24,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Statistic.h"
 
@@ -44,7 +44,7 @@
     Loop             *L;
     LoopInfo         *LI;
     ScalarEvolution  *SE;
-    const TargetData *TD; // May be NULL
+    const DataLayout *TD; // May be NULL
 
     SmallVectorImpl &DeadInsts;
 
@@ -56,7 +56,7 @@
       L(Loop),
       LI(LPM->getAnalysisIfAvailable()),
       SE(SE),
-      TD(LPM->getAnalysisIfAvailable()),
+      TD(LPM->getAnalysisIfAvailable()),
       DeadInsts(Dead),
       Changed(false) {
       assert(LI && "IV simplification requires LoopInfo");
Index: lib/Transforms/Utils/SimplifyInstructions.cpp
===================================================================
--- lib/Transforms/Utils/SimplifyInstructions.cpp	(revision 165037)
+++ lib/Transforms/Utils/SimplifyInstructions.cpp	(working copy)
@@ -23,7 +23,7 @@
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/Dominators.h"
 #include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Utils/Local.h"
@@ -46,7 +46,7 @@
     /// runOnFunction - Remove instructions that simplify.
     bool runOnFunction(Function &F) {
       const DominatorTree *DT = getAnalysisIfAvailable();
-      const TargetData *TD = getAnalysisIfAvailable();
+      const DataLayout *TD = getAnalysisIfAvailable();
       const TargetLibraryInfo *TLI = &getAnalysis();
       SmallPtrSet S1, S2, *ToSimplify = &S1, *Next = &S2;
       bool Changed = false;
Index: lib/Transforms/Vectorize/BBVectorize.cpp
===================================================================
--- lib/Transforms/Vectorize/BBVectorize.cpp	(revision 165037)
+++ lib/Transforms/Vectorize/BBVectorize.cpp	(working copy)
@@ -41,7 +41,7 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/ValueHandle.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Vectorize.h"
 #include 
@@ -178,7 +178,7 @@
       : BasicBlockPass(ID), Config(C) {
       AA = &P->getAnalysis();
       SE = &P->getAnalysis();
-      TD = P->getAnalysisIfAvailable();
+      TD = P->getAnalysisIfAvailable();
     }
 
     typedef std::pair ValuePair;
@@ -192,7 +192,7 @@
 
     AliasAnalysis *AA;
     ScalarEvolution *SE;
-    TargetData *TD;
+    DataLayout *TD;
 
     // FIXME: const correct?
 
@@ -364,7 +364,7 @@
     virtual bool runOnBasicBlock(BasicBlock &BB) {
       AA = &getAnalysis();
       SE = &getAnalysis();
-      TD = getAnalysisIfAvailable();
+      TD = getAnalysisIfAvailable();
 
       return vectorizeBB(BB);
     }
Index: lib/VMCore/AutoUpgrade.cpp
===================================================================
--- lib/VMCore/AutoUpgrade.cpp	(revision 165037)
+++ lib/VMCore/AutoUpgrade.cpp	(working copy)
@@ -22,6 +22,8 @@
 #include "llvm/Support/CFG.h"
 #include "llvm/Support/CallSite.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/DataLayout.h"
+#include "llvm/IRBuilder.h"
 #include 
 using namespace llvm;
 
Index: lib/VMCore/CMakeLists.txt
===================================================================
--- lib/VMCore/CMakeLists.txt	(revision 165037)
+++ lib/VMCore/CMakeLists.txt	(working copy)
@@ -8,6 +8,7 @@
   ConstantFold.cpp
   Constants.cpp
   Core.cpp
+  DataLayout.cpp
   DebugInfo.cpp
   DebugLoc.cpp
   DIBuilder.cpp
Index: lib/VMCore/ConstantFold.cpp
===================================================================
--- lib/VMCore/ConstantFold.cpp	(revision 165037)
+++ lib/VMCore/ConstantFold.cpp	(working copy)
@@ -12,7 +12,7 @@
 // ConstantExpr::get* methods to automatically fold constants when possible.
 //
 // The current constant folding implementation is implemented in two pieces: the
-// pieces that don't need TargetData, and the pieces that do. This is to avoid
+// pieces that don't need DataLayout, and the pieces that do. This is to avoid
 // a dependence in VMCore on Target.
 //
 //===----------------------------------------------------------------------===//
Index: lib/VMCore/DataLayout.cpp
===================================================================
--- lib/VMCore/DataLayout.cpp	(revision 0)
+++ lib/VMCore/DataLayout.cpp	(working copy)
@@ -0,0 +1,720 @@
+//===-- DataLayout.cpp - Data size & alignment routines --------------------==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines target properties related to datatype size/offset/alignment
+// information.
+//
+// This structure should be created once, filled in if the defaults are not
+// correct and then passed around by const&.  None of the members functions
+// require modification to the object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/DataLayout.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/ADT/DenseMap.h"
+#include 
+#include 
+using namespace llvm;
+
+// Handle the Pass registration stuff necessary to use DataLayout's.
+
+// Register the default SparcV9 implementation...
+INITIALIZE_PASS(DataLayout, "targetdata", "Target Data Layout", false, true)
+char DataLayout::ID = 0;
+
+//===----------------------------------------------------------------------===//
+// Support for StructLayout
+//===----------------------------------------------------------------------===//
+
+StructLayout::StructLayout(StructType *ST, const DataLayout &TD) {
+  assert(!ST->isOpaque() && "Cannot get layout of opaque structs");
+  StructAlignment = 0;
+  StructSize = 0;
+  NumElements = ST->getNumElements();
+
+  // Loop over each of the elements, placing them in memory.
+  for (unsigned i = 0, e = NumElements; i != e; ++i) {
+    Type *Ty = ST->getElementType(i);
+    unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty);
+
+    // Add padding if necessary to align the data element properly.
+    if ((StructSize & (TyAlign-1)) != 0)
+      StructSize = DataLayout::RoundUpAlignment(StructSize, TyAlign);
+
+    // Keep track of maximum alignment constraint.
+    StructAlignment = std::max(TyAlign, StructAlignment);
+
+    MemberOffsets[i] = StructSize;
+    StructSize += TD.getTypeAllocSize(Ty); // Consume space for this data item
+  }
+
+  // Empty structures have alignment of 1 byte.
+  if (StructAlignment == 0) StructAlignment = 1;
+
+  // Add padding to the end of the struct so that it could be put in an array
+  // and all array elements would be aligned correctly.
+  if ((StructSize & (StructAlignment-1)) != 0)
+    StructSize = DataLayout::RoundUpAlignment(StructSize, StructAlignment);
+}
+
+
+/// getElementContainingOffset - Given a valid offset into the structure,
+/// return the structure index that contains it.
+unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
+  const uint64_t *SI =
+    std::upper_bound(&MemberOffsets[0], &MemberOffsets[NumElements], Offset);
+  assert(SI != &MemberOffsets[0] && "Offset not in structure type!");
+  --SI;
+  assert(*SI <= Offset && "upper_bound didn't work");
+  assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) &&
+         (SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) &&
+         "Upper bound didn't work!");
+
+  // Multiple fields can have the same offset if any of them are zero sized.
+  // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop
+  // at the i32 element, because it is the last element at that offset.  This is
+  // the right one to return, because anything after it will have a higher
+  // offset, implying that this element is non-empty.
+  return SI-&MemberOffsets[0];
+}
+
+//===----------------------------------------------------------------------===//
+// TargetAlignElem, TargetAlign support
+//===----------------------------------------------------------------------===//
+
+TargetAlignElem
+TargetAlignElem::get(AlignTypeEnum align_type, unsigned abi_align,
+                     unsigned pref_align, uint32_t bit_width) {
+  assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+  TargetAlignElem retval;
+  retval.AlignType = align_type;
+  retval.ABIAlign = abi_align;
+  retval.PrefAlign = pref_align;
+  retval.TypeBitWidth = bit_width;
+  return retval;
+}
+
+bool
+TargetAlignElem::operator==(const TargetAlignElem &rhs) const {
+  return (AlignType == rhs.AlignType
+          && ABIAlign == rhs.ABIAlign
+          && PrefAlign == rhs.PrefAlign
+          && TypeBitWidth == rhs.TypeBitWidth);
+}
+
+const TargetAlignElem
+DataLayout::InvalidAlignmentElem = TargetAlignElem::get((AlignTypeEnum) -1, 0, 0, 0);
+
+//===----------------------------------------------------------------------===//
+// PointerAlignElem, PointerAlign support
+//===----------------------------------------------------------------------===//
+
+PointerAlignElem
+PointerAlignElem::get(uint32_t addr_space, unsigned abi_align,
+                     unsigned pref_align, uint32_t bit_width) {
+  assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+  PointerAlignElem retval;
+  retval.AddressSpace = addr_space;
+  retval.ABIAlign = abi_align;
+  retval.PrefAlign = pref_align;
+  retval.TypeBitWidth = bit_width;
+  return retval;
+}
+
+bool
+PointerAlignElem::operator==(const PointerAlignElem &rhs) const {
+  return (ABIAlign == rhs.ABIAlign
+          && AddressSpace == rhs.AddressSpace
+          && PrefAlign == rhs.PrefAlign
+          && TypeBitWidth == rhs.TypeBitWidth);
+}
+
+const PointerAlignElem
+DataLayout::InvalidPointerElem = PointerAlignElem::get(~0U, 0U, 0U, 0U);
+
+//===----------------------------------------------------------------------===//
+//                       DataLayout Class Implementation
+//===----------------------------------------------------------------------===//
+
+/// getInt - Get an integer ignoring errors.
+static int getInt(StringRef R) {
+  int Result = 0;
+  R.getAsInteger(10, Result);
+  return Result;
+}
+
+void DataLayout::init() {
+  initializeDataLayoutPass(*PassRegistry::getPassRegistry());
+
+  LayoutMap = 0;
+  LittleEndian = false;
+  StackNaturalAlign = 0;
+
+  // Default alignments
+  setAlignment(INTEGER_ALIGN,   1,  1, 1);   // i1
+  setAlignment(INTEGER_ALIGN,   1,  1, 8);   // i8
+  setAlignment(INTEGER_ALIGN,   2,  2, 16);  // i16
+  setAlignment(INTEGER_ALIGN,   4,  4, 32);  // i32
+  setAlignment(INTEGER_ALIGN,   4,  8, 64);  // i64
+  setAlignment(FLOAT_ALIGN,     2,  2, 16);  // half
+  setAlignment(FLOAT_ALIGN,     4,  4, 32);  // float
+  setAlignment(FLOAT_ALIGN,     8,  8, 64);  // double
+  setAlignment(FLOAT_ALIGN,    16, 16, 128); // ppcf128, quad, ...
+  setAlignment(VECTOR_ALIGN,    8,  8, 64);  // v2i32, v1i64, ...
+  setAlignment(VECTOR_ALIGN,   16, 16, 128); // v16i8, v8i16, v4i32, ...
+  setAlignment(AGGREGATE_ALIGN, 0,  8,  0);  // struct
+  setPointerAlignment(0, 8, 8, 8);
+}
+
+std::string DataLayout::parseSpecifier(StringRef Desc, DataLayout *td) {
+
+  if (td)
+    td->init();
+
+  while (!Desc.empty()) {
+    std::pair Split = Desc.split('-');
+    StringRef Token = Split.first;
+    Desc = Split.second;
+
+    if (Token.empty())
+      continue;
+
+    Split = Token.split(':');
+    StringRef Specifier = Split.first;
+    Token = Split.second;
+
+    assert(!Specifier.empty() && "Can't be empty here");
+
+    switch (Specifier[0]) {
+    case 'E':
+      if (td)
+        td->LittleEndian = false;
+      break;
+    case 'e':
+      if (td)
+        td->LittleEndian = true;
+      break;
+    case 'p': {
+      int AddrSpace = 0;
+      if (Specifier.size() > 1) {
+        AddrSpace = getInt(Specifier.substr(1));
+      }
+      Split = Token.split(':');
+      int PointerMemSizeBits = getInt(Split.first);
+      if (PointerMemSizeBits < 0 || PointerMemSizeBits % 8 != 0)
+        return "invalid pointer size, must be a positive 8-bit multiple";
+
+      // Pointer ABI alignment.
+      Split = Split.second.split(':');
+      int PointerABIAlignBits = getInt(Split.first);
+      if (PointerABIAlignBits < 0 || PointerABIAlignBits % 8 != 0) {
+        return "invalid pointer ABI alignment, "
+               "must be a positive 8-bit multiple";
+      }
+
+      // Pointer preferred alignment.
+      Split = Split.second.split(':');
+      int PointerPrefAlignBits = getInt(Split.first);
+      if (PointerPrefAlignBits < 0 || PointerPrefAlignBits % 8 != 0) {
+        return "invalid pointer preferred alignment, "
+               "must be a positive 8-bit multiple";
+      }
+
+      if (PointerPrefAlignBits == 0)
+        PointerPrefAlignBits = PointerABIAlignBits;
+      if (td)
+        td->setPointerAlignment(AddrSpace, PointerABIAlignBits/8,
+            PointerPrefAlignBits/8, PointerMemSizeBits/8);
+      break;
+    }
+    case 'i':
+    case 'v':
+    case 'f':
+    case 'a':
+    case 's': {
+      AlignTypeEnum AlignType;
+      char field = Specifier[0];
+      switch (field) {
+      default:
+      case 'i': AlignType = INTEGER_ALIGN; break;
+      case 'v': AlignType = VECTOR_ALIGN; break;
+      case 'f': AlignType = FLOAT_ALIGN; break;
+      case 'a': AlignType = AGGREGATE_ALIGN; break;
+      case 's': AlignType = STACK_ALIGN; break;
+      }
+      int Size = getInt(Specifier.substr(1));
+      if (Size < 0) {
+        return std::string("invalid ") + field + "-size field, "
+               "must be positive";
+      }
+
+      Split = Token.split(':');
+      int ABIAlignBits = getInt(Split.first);
+      if (ABIAlignBits < 0 || ABIAlignBits % 8 != 0) {
+        return std::string("invalid ") + field +"-abi-alignment field, "
+               "must be a positive 8-bit multiple";
+      }
+      unsigned ABIAlign = ABIAlignBits / 8;
+
+      Split = Split.second.split(':');
+
+      int PrefAlignBits = getInt(Split.first);
+      if (PrefAlignBits < 0 || PrefAlignBits % 8 != 0) {
+        return std::string("invalid ") + field +"-preferred-alignment field, "
+               "must be a positive 8-bit multiple";
+      }
+      unsigned PrefAlign = PrefAlignBits / 8;
+      if (PrefAlign == 0)
+        PrefAlign = ABIAlign;
+     
+      if (td)
+        td->setAlignment(AlignType, ABIAlign, PrefAlign, Size);
+      break;
+    }
+    case 'n':  // Native integer types.
+      Specifier = Specifier.substr(1);
+      do {
+        int Width = getInt(Specifier);
+        if (Width <= 0) {
+          return std::string("invalid native integer size \'") + Specifier.str() +
+                 "\', must be a positive integer.";
+        }
+        if (td && Width != 0)
+          td->LegalIntWidths.push_back(Width);
+        Split = Token.split(':');
+        Specifier = Split.first;
+        Token = Split.second;
+      } while (!Specifier.empty() || !Token.empty());
+      break;
+    case 'S': { // Stack natural alignment.
+      int StackNaturalAlignBits = getInt(Specifier.substr(1));
+      if (StackNaturalAlignBits < 0 || StackNaturalAlignBits % 8 != 0) {
+        return "invalid natural stack alignment (S-field), "
+               "must be a positive 8-bit multiple";
+      }
+      if (td)
+        td->StackNaturalAlign = StackNaturalAlignBits / 8;
+      break;
+    }
+    default:
+      break;
+    }
+  }
+
+  return "";
+}
+
+/// Default ctor.
+///
+/// @note This has to exist, because this is a pass, but it should never be
+/// used.
+DataLayout::DataLayout() : ImmutablePass(ID) {
+  report_fatal_error("Bad DataLayout ctor used.  "
+                    "Tool did not specify a DataLayout to use?");
+}
+
+DataLayout::DataLayout(const Module *M)
+  : ImmutablePass(ID) {
+  std::string errMsg = parseSpecifier(M->getDataLayout(), this);
+  assert(errMsg == "" && "Module M has malformed target data layout string.");
+  (void)errMsg;
+}
+
+void
+DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+                         unsigned pref_align, uint32_t bit_width) {
+  assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+  for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
+    if (Alignments[i].AlignType == align_type &&
+        Alignments[i].TypeBitWidth == bit_width) {
+      // Update the abi, preferred alignments.
+      Alignments[i].ABIAlign = abi_align;
+      Alignments[i].PrefAlign = pref_align;
+      return;
+    }
+  }
+
+  Alignments.push_back(TargetAlignElem::get(align_type, abi_align,
+                                            pref_align, bit_width));
+}
+
+void
+DataLayout::setPointerAlignment(uint32_t addr_space, unsigned abi_align,
+                         unsigned pref_align, uint32_t bit_width) {
+  assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
+  DenseMap::iterator val = Pointers.find(addr_space);
+  if (val == Pointers.end()) {
+    Pointers[addr_space] = PointerAlignElem::get(addr_space,
+          abi_align, pref_align, bit_width);
+  } else {
+    val->second.ABIAlign = abi_align;
+    val->second.PrefAlign = pref_align;
+    val->second.TypeBitWidth = bit_width;
+  }
+}
+
+/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
+/// preferred if ABIInfo = false) the target wants for the specified datatype.
+unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
+                                      uint32_t BitWidth, bool ABIInfo,
+                                      Type *Ty) const {
+  // Check to see if we have an exact match and remember the best match we see.
+  int BestMatchIdx = -1;
+  int LargestInt = -1;
+  for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
+    if (Alignments[i].AlignType == AlignType &&
+        Alignments[i].TypeBitWidth == BitWidth)
+      return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign;
+
+    // The best match so far depends on what we're looking for.
+     if (AlignType == INTEGER_ALIGN &&
+         Alignments[i].AlignType == INTEGER_ALIGN) {
+      // The "best match" for integers is the smallest size that is larger than
+      // the BitWidth requested.
+      if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 ||
+           Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth))
+        BestMatchIdx = i;
+      // However, if there isn't one that's larger, then we must use the
+      // largest one we have (see below)
+      if (LargestInt == -1 ||
+          Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth)
+        LargestInt = i;
+    }
+  }
+
+  // Okay, we didn't find an exact solution.  Fall back here depending on what
+  // is being looked for.
+  if (BestMatchIdx == -1) {
+    // If we didn't find an integer alignment, fall back on most conservative.
+    if (AlignType == INTEGER_ALIGN) {
+      BestMatchIdx = LargestInt;
+    } else {
+      assert(AlignType == VECTOR_ALIGN && "Unknown alignment type!");
+
+      // By default, use natural alignment for vector types. This is consistent
+      // with what clang and llvm-gcc do.
+      unsigned Align = getTypeAllocSize(cast(Ty)->getElementType());
+      Align *= cast(Ty)->getNumElements();
+      // If the alignment is not a power of 2, round up to the next power of 2.
+      // This happens for non-power-of-2 length vectors.
+      if (Align & (Align-1))
+        Align = NextPowerOf2(Align);
+      return Align;
+    }
+  }
+
+  // Since we got a "best match" index, just return it.
+  return ABIInfo ? Alignments[BestMatchIdx].ABIAlign
+                 : Alignments[BestMatchIdx].PrefAlign;
+}
+
+namespace {
+
+class StructLayoutMap {
+  typedef DenseMap LayoutInfoTy;
+  LayoutInfoTy LayoutInfo;
+
+public:
+  virtual ~StructLayoutMap() {
+    // Remove any layouts.
+    for (LayoutInfoTy::iterator I = LayoutInfo.begin(), E = LayoutInfo.end();
+         I != E; ++I) {
+      StructLayout *Value = I->second;
+      Value->~StructLayout();
+      free(Value);
+    }
+  }
+
+  StructLayout *&operator[](StructType *STy) {
+    return LayoutInfo[STy];
+  }
+
+  // for debugging...
+  virtual void dump() const {}
+};
+
+} // end anonymous namespace
+
+DataLayout::~DataLayout() {
+  delete static_cast(LayoutMap);
+}
+
+const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
+  if (!LayoutMap)
+    LayoutMap = new StructLayoutMap();
+
+  StructLayoutMap *STM = static_cast(LayoutMap);
+  StructLayout *&SL = (*STM)[Ty];
+  if (SL) return SL;
+
+  // Otherwise, create the struct layout.  Because it is variable length, we
+  // malloc it, then use placement new.
+  int NumElts = Ty->getNumElements();
+  StructLayout *L =
+    (StructLayout *)malloc(sizeof(StructLayout)+(NumElts-1) * sizeof(uint64_t));
+
+  // Set SL before calling StructLayout's ctor.  The ctor could cause other
+  // entries to be added to TheMap, invalidating our reference.
+  SL = L;
+
+  new (L) StructLayout(Ty, *this);
+
+  return L;
+}
+
+std::string DataLayout::getStringRepresentation() const {
+  std::string Result;
+  raw_string_ostream OS(Result);
+
+  OS << (LittleEndian ? "e" : "E");
+  for (DenseMap::const_iterator
+      pib = Pointers.begin(), pie = Pointers.end();
+      pib != pie; ++pib) {
+    const PointerAlignElem &PI = pib->second;
+    OS << "-p";
+    if (PI.AddressSpace) {
+      OS << PI.AddressSpace;
+    }
+     OS << ":" << PI.TypeBitWidth*8 << ':' << PI.ABIAlign*8
+        << ':' << PI.PrefAlign*8;
+  }
+  OS << "-S" << StackNaturalAlign*8;
+
+  for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
+    const TargetAlignElem &AI = Alignments[i];
+    OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':'
+       << AI.ABIAlign*8 << ':' << AI.PrefAlign*8;
+  }
+
+  if (!LegalIntWidths.empty()) {
+    OS << "-n" << (unsigned)LegalIntWidths[0];
+
+    for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i)
+      OS << ':' << (unsigned)LegalIntWidths[i];
+  }
+  return OS.str();
+}
+
+
+uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
+  assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
+  switch (Ty->getTypeID()) {
+  case Type::LabelTyID:
+    return getPointerSizeInBits(0);
+  case Type::PointerTyID: {
+    unsigned AS = dyn_cast(Ty)->getAddressSpace();
+    return getPointerSizeInBits(AS);
+    }
+  case Type::ArrayTyID: {
+    ArrayType *ATy = cast(Ty);
+    return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements();
+  }
+  case Type::StructTyID:
+    // Get the layout annotation... which is lazily created on demand.
+    return getStructLayout(cast(Ty))->getSizeInBits();
+  case Type::IntegerTyID:
+    return cast(Ty)->getBitWidth();
+  case Type::VoidTyID:
+    return 8;
+  case Type::HalfTyID:
+    return 16;
+  case Type::FloatTyID:
+    return 32;
+  case Type::DoubleTyID:
+  case Type::X86_MMXTyID:
+    return 64;
+  case Type::PPC_FP128TyID:
+  case Type::FP128TyID:
+    return 128;
+  // In memory objects this is always aligned to a higher boundary, but
+  // only 80 bits contain information.
+  case Type::X86_FP80TyID:
+    return 80;
+  case Type::VectorTyID:
+    return cast(Ty)->getBitWidth();
+  default:
+    llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type");
+  }
+}
+
+/*!
+  \param abi_or_pref Flag that determines which alignment is returned. true
+  returns the ABI alignment, false returns the preferred alignment.
+  \param Ty The underlying type for which alignment is determined.
+
+  Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
+  == false) for the requested type \a Ty.
+ */
+unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
+  int AlignType = -1;
+
+  assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
+  switch (Ty->getTypeID()) {
+  // Early escape for the non-numeric types.
+  case Type::LabelTyID:
+    return (abi_or_pref
+            ? getPointerABIAlignment(0)
+            : getPointerPrefAlignment(0));
+  case Type::PointerTyID: {
+    unsigned AS = dyn_cast(Ty)->getAddressSpace();
+    return (abi_or_pref
+            ? getPointerABIAlignment(AS)
+            : getPointerPrefAlignment(AS));
+    }
+  case Type::ArrayTyID:
+    return getAlignment(cast(Ty)->getElementType(), abi_or_pref);
+
+  case Type::StructTyID: {
+    // Packed structure types always have an ABI alignment of one.
+    if (cast(Ty)->isPacked() && abi_or_pref)
+      return 1;
+
+    // Get the layout annotation... which is lazily created on demand.
+    const StructLayout *Layout = getStructLayout(cast(Ty));
+    unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
+    return std::max(Align, Layout->getAlignment());
+  }
+  case Type::IntegerTyID:
+  case Type::VoidTyID:
+    AlignType = INTEGER_ALIGN;
+    break;
+  case Type::HalfTyID:
+  case Type::FloatTyID:
+  case Type::DoubleTyID:
+  // PPC_FP128TyID and FP128TyID have different data contents, but the
+  // same size and alignment, so they look the same here.
+  case Type::PPC_FP128TyID:
+  case Type::FP128TyID:
+  case Type::X86_FP80TyID:
+    AlignType = FLOAT_ALIGN;
+    break;
+  case Type::X86_MMXTyID:
+  case Type::VectorTyID:
+    AlignType = VECTOR_ALIGN;
+    break;
+  default:
+    llvm_unreachable("Bad type for getAlignment!!!");
+  }
+
+  return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty),
+                          abi_or_pref, Ty);
+}
+
+unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
+  return getAlignment(Ty, true);
+}
+
+/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
+/// an integer type of the specified bitwidth.
+unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
+  return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0);
+}
+
+
+unsigned DataLayout::getCallFrameTypeAlignment(Type *Ty) const {
+  for (unsigned i = 0, e = Alignments.size(); i != e; ++i)
+    if (Alignments[i].AlignType == STACK_ALIGN)
+      return Alignments[i].ABIAlign;
+
+  return getABITypeAlignment(Ty);
+}
+
+unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const {
+  return getAlignment(Ty, false);
+}
+
+unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const {
+  unsigned Align = getPrefTypeAlignment(Ty);
+  assert(!(Align & (Align-1)) && "Alignment is not a power of two!");
+  return Log2_32(Align);
+}
+
+/// getIntPtrType - Return an unsigned integer type that is the same size or
+/// greater to the host pointer size.
+IntegerType *DataLayout::getIntPtrType(LLVMContext &C, unsigned AddressSpace) const {
+  return IntegerType::get(C, getPointerSizeInBits(AddressSpace));
+}
+
+
+uint64_t DataLayout::getIndexedOffset(Type *ptrTy,
+                                      ArrayRef Indices) const {
+  Type *Ty = ptrTy;
+  assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()");
+  uint64_t Result = 0;
+
+  generic_gep_type_iterator
+    TI = gep_type_begin(ptrTy, Indices);
+  for (unsigned CurIDX = 0, EndIDX = Indices.size(); CurIDX != EndIDX;
+       ++CurIDX, ++TI) {
+    if (StructType *STy = dyn_cast(*TI)) {
+      assert(Indices[CurIDX]->getType() ==
+             Type::getInt32Ty(ptrTy->getContext()) &&
+             "Illegal struct idx");
+      unsigned FieldNo = cast(Indices[CurIDX])->getZExtValue();
+
+      // Get structure layout information...
+      const StructLayout *Layout = getStructLayout(STy);
+
+      // Add in the offset, as calculated by the structure layout info...
+      Result += Layout->getElementOffset(FieldNo);
+
+      // Update Ty to refer to current element
+      Ty = STy->getElementType(FieldNo);
+    } else {
+      // Update Ty to refer to current element
+      Ty = cast(Ty)->getElementType();
+
+      // Get the array index and the size of each array element.
+      if (int64_t arrayIdx = cast(Indices[CurIDX])->getSExtValue())
+        Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty);
+    }
+  }
+
+  return Result;
+}
+
+/// getPreferredAlignment - Return the preferred alignment of the specified
+/// global.  This includes an explicitly requested alignment (if the global
+/// has one).
+unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const {
+  Type *ElemType = GV->getType()->getElementType();
+  unsigned Alignment = getPrefTypeAlignment(ElemType);
+  unsigned GVAlignment = GV->getAlignment();
+  if (GVAlignment >= Alignment) {
+    Alignment = GVAlignment;
+  } else if (GVAlignment != 0) {
+    Alignment = std::max(GVAlignment, getABITypeAlignment(ElemType));
+  }
+
+  if (GV->hasInitializer() && GVAlignment == 0) {
+    if (Alignment < 16) {
+      // If the global is not external, see if it is large.  If so, give it a
+      // larger alignment.
+      if (getTypeSizeInBits(ElemType) > 128)
+        Alignment = 16;    // 16-byte alignment.
+    }
+  }
+  return Alignment;
+}
+
+/// getPreferredAlignmentLog - Return the preferred alignment of the
+/// specified global, returned in log form.  This includes an explicitly
+/// requested alignment (if the global has one).
+unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const {
+  return Log2_32(getPreferredAlignment(GV));
+}
Index: lib/VMCore/Value.cpp
===================================================================
--- lib/VMCore/Value.cpp	(revision 165037)
+++ lib/VMCore/Value.cpp	(working copy)
@@ -394,7 +394,7 @@
   // It's also not always safe to follow a bitcast, for example:
   //   bitcast i8* (alloca i8) to i32*
   // would result in a 4-byte load from a 1-byte alloca. Some cases could
-  // be handled using TargetData to check sizes and alignments though.
+  // be handled using DataLayout to check sizes and alignments though.
 
   // These are obviously ok.
   if (isa(V)) return true;
Index: lib/VMCore/Verifier.cpp
===================================================================
--- lib/VMCore/Verifier.cpp	(revision 165037)
+++ lib/VMCore/Verifier.cpp	(working copy)
@@ -71,6 +71,7 @@
 #include "llvm/Support/ConstantRange.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DataLayout.h"
 #include 
 #include 
 using namespace llvm;
Index: tools/bugpoint/ExtractFunction.cpp
===================================================================
--- tools/bugpoint/ExtractFunction.cpp	(revision 165037)
+++ tools/bugpoint/ExtractFunction.cpp	(working copy)
@@ -25,7 +25,7 @@
 #include "llvm/Transforms/Scalar.h"
 #include "llvm/Transforms/Utils/Cloning.h"
 #include "llvm/Transforms/Utils/CodeExtractor.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/FileUtilities.h"
Index: tools/bugpoint/OptimizerDriver.cpp
===================================================================
--- tools/bugpoint/OptimizerDriver.cpp	(revision 165037)
+++ tools/bugpoint/OptimizerDriver.cpp	(working copy)
@@ -20,11 +20,11 @@
 #include "llvm/PassManager.h"
 #include "llvm/Analysis/Verifier.h"
 #include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Support/FileUtilities.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/SystemUtils.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/ToolOutputFile.h"
 #include "llvm/Support/Path.h"
 #include "llvm/Support/Program.h"
Index: tools/llc/llc.cpp
===================================================================
--- tools/llc/llc.cpp	(revision 165037)
+++ tools/llc/llc.cpp	(working copy)
@@ -34,7 +34,7 @@
 #include "llvm/Support/Signals.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/TargetSelect.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include 
@@ -506,10 +506,10 @@
   PM.add(TLI);
 
   // Add the target data from the target machine, if it exists, or the module.
-  if (const TargetData *TD = Target.getTargetData())
-    PM.add(new TargetData(*TD));
+  if (const DataLayout *TD = Target.getDataLayout())
+    PM.add(new DataLayout(*TD));
   else
-    PM.add(new TargetData(mod));
+    PM.add(new DataLayout(mod));
 
   // Override default to generate verbose assembly.
   Target.setAsmVerbosityDefault(true);
Index: tools/llvm-extract/llvm-extract.cpp
===================================================================
--- tools/llvm-extract/llvm-extract.cpp	(revision 165037)
+++ tools/llvm-extract/llvm-extract.cpp	(working copy)
@@ -18,7 +18,6 @@
 #include "llvm/Assembly/PrintModulePass.h"
 #include "llvm/Bitcode/ReaderWriter.h"
 #include "llvm/Transforms/IPO.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/IRReader.h"
 #include "llvm/Support/ManagedStatic.h"
@@ -27,6 +26,7 @@
 #include "llvm/Support/SystemUtils.h"
 #include "llvm/Support/Signals.h"
 #include "llvm/Support/Regex.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SetVector.h"
 #include 
@@ -206,7 +206,7 @@
   // In addition to deleting all other functions, we also want to spiff it
   // up a little bit.  Do this now.
   PassManager Passes;
-  Passes.add(new TargetData(M.get())); // Use correct TargetData
+  Passes.add(new DataLayout(M.get())); // Use correct DataLayout
 
   std::vector Gvs(GVs.begin(), GVs.end());
 
Index: tools/lto/LTOCodeGenerator.cpp
===================================================================
--- tools/lto/LTOCodeGenerator.cpp	(revision 165037)
+++ tools/lto/LTOCodeGenerator.cpp	(working copy)
@@ -29,7 +29,6 @@
 #include "llvm/MC/SubtargetFeature.h"
 #include "llvm/Target/Mangler.h"
 #include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Transforms/IPO.h"
@@ -40,6 +39,7 @@
 #include "llvm/Support/ToolOutputFile.h"
 #include "llvm/Support/Host.h"
 #include "llvm/Support/Signals.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/TargetRegistry.h"
 #include "llvm/Support/TargetSelect.h"
 #include "llvm/Support/system_error.h"
Index: tools/opt/opt.cpp
===================================================================
--- tools/opt/opt.cpp	(revision 165037)
+++ tools/opt/opt.cpp	(working copy)
@@ -23,7 +23,6 @@
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Analysis/RegionPass.h"
 #include "llvm/Analysis/CallGraph.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetLibraryInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/ADT/StringSet.h"
@@ -36,6 +35,7 @@
 #include "llvm/Support/PluginLoader.h"
 #include "llvm/Support/PrettyStackTrace.h"
 #include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/ToolOutputFile.h"
 #include "llvm/LinkAllPasses.h"
 #include "llvm/LinkAllVMCore.h"
@@ -568,13 +568,13 @@
     TLI->disableAllFunctions();
   Passes.add(TLI);
 
-  // Add an appropriate TargetData instance for this module.
-  TargetData *TD = 0;
+  // Add an appropriate DataLayout instance for this module.
+  DataLayout *TD = 0;
   const std::string &ModuleDataLayout = M.get()->getDataLayout();
   if (!ModuleDataLayout.empty())
-    TD = new TargetData(ModuleDataLayout);
+    TD = new DataLayout(ModuleDataLayout);
   else if (!DefaultDataLayout.empty())
-    TD = new TargetData(DefaultDataLayout);
+    TD = new DataLayout(DefaultDataLayout);
 
   if (TD)
     Passes.add(TD);
@@ -583,7 +583,7 @@
   if (OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz || OptLevelO3) {
     FPasses.reset(new FunctionPassManager(M.get()));
     if (TD)
-      FPasses->add(new TargetData(*TD));
+      FPasses->add(new DataLayout(*TD));
   }
 
   if (PrintBreakpoints) {
Index: unittests/VMCore/InstructionsTest.cpp
===================================================================
--- unittests/VMCore/InstructionsTest.cpp	(revision 165037)
+++ unittests/VMCore/InstructionsTest.cpp	(working copy)
@@ -17,7 +17,7 @@
 #include "llvm/Operator.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "gtest/gtest.h"
 
 namespace llvm {
@@ -183,7 +183,7 @@
   EXPECT_NE(S3, Gep3);
 
   int64_t Offset;
-  TargetData TD("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3"
+  DataLayout TD("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3"
                 "2:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80"
                 ":128:128-n8:16:32:64-S128");
   // Make sure we don't crash
Index: unittests/VMCore/PassManagerTest.cpp
===================================================================
--- unittests/VMCore/PassManagerTest.cpp	(revision 165037)
+++ unittests/VMCore/PassManagerTest.cpp	(working copy)
@@ -14,7 +14,7 @@
 #include "llvm/Pass.h"
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/CallGraphSCCPass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/Support/DataLayout.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/Constants.h"
@@ -94,7 +94,7 @@
         initializeModuleNDMPass(*PassRegistry::getPassRegistry());
       }
       virtual bool runOnModule(Module &M) {
-        EXPECT_TRUE(getAnalysisIfAvailable());
+        EXPECT_TRUE(getAnalysisIfAvailable());
         run++;
         return false;
       }
@@ -167,7 +167,7 @@
         initializeCGPassPass(*PassRegistry::getPassRegistry());
       }
       virtual bool runOnSCC(CallGraphSCC &SCMM) {
-        EXPECT_TRUE(getAnalysisIfAvailable());
+        EXPECT_TRUE(getAnalysisIfAvailable());
         run();
         return false;
       }
@@ -177,7 +177,7 @@
     public:
       virtual bool runOnFunction(Function &F) {
         // FIXME: PR4112
-        // EXPECT_TRUE(getAnalysisIfAvailable());
+        // EXPECT_TRUE(getAnalysisIfAvailable());
         run();
         return false;
       }
@@ -204,7 +204,7 @@
         return false;
       }
       virtual bool runOnLoop(Loop *L, LPPassManager &LPM) {
-        EXPECT_TRUE(getAnalysisIfAvailable());
+        EXPECT_TRUE(getAnalysisIfAvailable());
         run();
         return false;
       }
@@ -241,7 +241,7 @@
         return false;
       }
       virtual bool runOnBasicBlock(BasicBlock &BB) {
-        EXPECT_TRUE(getAnalysisIfAvailable());
+        EXPECT_TRUE(getAnalysisIfAvailable());
         run();
         return false;
       }
@@ -266,7 +266,7 @@
         initializeFPassPass(*PassRegistry::getPassRegistry());
       }
       virtual bool runOnModule(Module &M) {
-        EXPECT_TRUE(getAnalysisIfAvailable());
+        EXPECT_TRUE(getAnalysisIfAvailable());
         for (Module::iterator I=M.begin(),E=M.end(); I != E; ++I) {
           Function &F = *I;
           {
@@ -292,7 +292,7 @@
       mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
 
       PassManager Passes;
-      Passes.add(new TargetData(&M));
+      Passes.add(new DataLayout(&M));
       Passes.add(mNDM2);
       Passes.add(mNDM);
       Passes.add(mNDNM);
@@ -316,7 +316,7 @@
       mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
 
       PassManager Passes;
-      Passes.add(new TargetData(&M));
+      Passes.add(new DataLayout(&M));
       Passes.add(mNDM);
       Passes.add(mNDNM);
       Passes.add(mNDM2);// invalidates mNDM needed by mDNM
@@ -338,7 +338,7 @@
       OwningPtr M(makeLLVMModule());
       T *P = new T();
       PassManager Passes;
-      Passes.add(new TargetData(M.get()));
+      Passes.add(new DataLayout(M.get()));
       Passes.add(P);
       Passes.run(*M);
       T::finishedOK(run);
@@ -349,7 +349,7 @@
       Module *M = makeLLVMModule();
       T *P = new T();
       PassManager Passes;
-      Passes.add(new TargetData(M));
+      Passes.add(new DataLayout(M));
       Passes.add(P);
       Passes.run(*M);
       T::finishedOK(run, N);
@@ -387,7 +387,7 @@
         SCOPED_TRACE("Running OnTheFlyTest");
         struct OnTheFlyTest *O = new OnTheFlyTest();
         PassManager Passes;
-        Passes.add(new TargetData(M));
+        Passes.add(new DataLayout(M));
         Passes.add(O);
         Passes.run(*M);
 
Index: utils/TableGen/CallingConvEmitter.cpp
===================================================================
--- utils/TableGen/CallingConvEmitter.cpp	(revision 165037)
+++ utils/TableGen/CallingConvEmitter.cpp	(working copy)
@@ -177,12 +177,12 @@
       if (Size)
         O << Size << ", ";
       else
-        O << "\n" << IndentStr << "  State.getTarget().getTargetData()"
+        O << "\n" << IndentStr << "  State.getTarget().getDataLayout()"
           "->getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext())), ";
       if (Align)
         O << Align;
       else
-        O << "\n" << IndentStr << "  State.getTarget().getTargetData()"
+        O << "\n" << IndentStr << "  State.getTarget().getDataLayout()"
           "->getABITypeAlignment(EVT(LocVT).getTypeForEVT(State.getContext()))";
       if (Action->isSubClassOf("CCAssignToStackWithShadow"))
         O << ", " << getQualifiedName(Action->getValueAsDef("ShadowReg"));