[llvm] r201827 - Rename many DataLayout variables from TD to DL.
Sean Silva
silvas at purdue.edu
Sat Mar 1 15:50:28 PST 2014
I woke up in the middle of last night and seemed to remember that nobody
said thanks for doing this long-needed fixup.
Well, Thanks!
-- Sean Silva
On Thu, Feb 20, 2014 at 7:06 PM, Rafael Espindola <
rafael.espindola at gmail.com> wrote:
> Author: rafael
> Date: Thu Feb 20 18:06:31 2014
> New Revision: 201827
>
> URL: http://llvm.org/viewvc/llvm-project?rev=201827&view=rev
> Log:
> Rename many DataLayout variables from TD to DL.
>
> I am really sorry for the noise, but the current state where some parts of
> the
> code use TD (from the old name: TargetData) and other parts use DL makes it
> hard to write a patch that changes where those variables come from and how
> they are passed along.
>
> Modified:
> llvm/trunk/lib/Analysis/IPA/InlineCost.cpp
> llvm/trunk/lib/Analysis/InstructionSimplify.cpp
> llvm/trunk/lib/Analysis/Lint.cpp
> llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp
> llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp
> llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombine.h
> llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
> llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
> llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp
> llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp
> llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp
> llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
> llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
> llvm/trunk/lib/Transforms/Scalar/GVN.cpp
> llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp
> llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp
> llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp
> llvm/trunk/lib/Transforms/Scalar/LICM.cpp
> llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
> llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp
> llvm/trunk/lib/Transforms/Scalar/SCCP.cpp
> llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp
> llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp
> llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp
> llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp
> llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp
> llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp
>
> Modified: llvm/trunk/lib/Analysis/IPA/InlineCost.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/IPA/InlineCost.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Analysis/IPA/InlineCost.cpp (original)
> +++ llvm/trunk/lib/Analysis/IPA/InlineCost.cpp Thu Feb 20 18:06:31 2014
> @@ -43,7 +43,7 @@ class CallAnalyzer : public InstVisitor<
> friend class InstVisitor<CallAnalyzer, bool>;
>
> // DataLayout if available, or null.
> - const DataLayout *const TD;
> + const DataLayout *const DL;
>
> /// The TargetTransformInfo available for this compilation.
> const TargetTransformInfo &TTI;
> @@ -142,9 +142,9 @@ class CallAnalyzer : public InstVisitor<
> bool visitUnreachableInst(UnreachableInst &I);
>
> public:
> - CallAnalyzer(const DataLayout *TD, const TargetTransformInfo &TTI,
> + CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,
> Function &Callee, int Threshold)
> - : TD(TD), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
> + : DL(DL), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
> IsCallerRecursive(false), IsRecursiveCall(false),
> ExposesReturnsTwice(false), HasDynamicAlloca(false),
> ContainsNoDuplicateCall(false), HasReturn(false),
> HasIndirectBr(false),
> @@ -256,10 +256,10 @@ bool CallAnalyzer::isGEPOffsetConstant(G
> /// Returns false if unable to compute the offset for any reason.
> Respects any
> /// simplified values known during the analysis of this callsite.
> bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
> - if (!TD)
> + if (!DL)
> return false;
>
> - unsigned IntPtrWidth = TD->getPointerSizeInBits();
> + unsigned IntPtrWidth = DL->getPointerSizeInBits();
> assert(IntPtrWidth == Offset.getBitWidth());
>
> for (gep_type_iterator GTI = gep_type_begin(GEP), GTE =
> gep_type_end(GEP);
> @@ -275,12 +275,12 @@ bool CallAnalyzer::accumulateGEPOffset(G
> // Handle a struct index, which adds its field offset to the pointer.
> if (StructType *STy = dyn_cast<StructType>(*GTI)) {
> unsigned ElementIdx = OpC->getZExtValue();
> - const StructLayout *SL = TD->getStructLayout(STy);
> + const StructLayout *SL = DL->getStructLayout(STy);
> Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
> continue;
> }
>
> - APInt TypeSize(IntPtrWidth,
> TD->getTypeAllocSize(GTI.getIndexedType()));
> + APInt TypeSize(IntPtrWidth,
> DL->getTypeAllocSize(GTI.getIndexedType()));
> Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
> }
> return true;
> @@ -293,7 +293,7 @@ bool CallAnalyzer::visitAlloca(AllocaIns
> // Accumulate the allocated size.
> if (I.isStaticAlloca()) {
> Type *Ty = I.getAllocatedType();
> - AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
> + AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :
> Ty->getPrimitiveSizeInBits());
> }
>
> @@ -330,7 +330,7 @@ bool CallAnalyzer::visitGetElementPtr(Ge
>
> // Try to fold GEPs of constant-offset call site argument pointers. This
> // requires target data and inbounds GEPs.
> - if (TD && I.isInBounds()) {
> + if (DL && I.isInBounds()) {
> // Check if we have a base + offset for the pointer.
> Value *Ptr = I.getPointerOperand();
> std::pair<Value *, APInt> BaseAndOffset =
> ConstantOffsetPtrs.lookup(Ptr);
> @@ -412,7 +412,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIn
> // Track base/offset pairs when converted to a plain integer provided
> the
> // integer is large enough to represent the pointer.
> unsigned IntegerSize = I.getType()->getScalarSizeInBits();
> - if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
> + if (DL && IntegerSize >= DL->getPointerSizeInBits()) {
> std::pair<Value *, APInt> BaseAndOffset
> = ConstantOffsetPtrs.lookup(I.getOperand(0));
> if (BaseAndOffset.first)
> @@ -449,7 +449,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPt
> // modifications provided the integer is not too large.
> Value *Op = I.getOperand(0);
> unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
> - if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
> + if (DL && IntegerSize <= DL->getPointerSizeInBits()) {
> std::pair<Value *, APInt> BaseAndOffset =
> ConstantOffsetPtrs.lookup(Op);
> if (BaseAndOffset.first)
> ConstantOffsetPtrs[&I] = BaseAndOffset;
> @@ -488,7 +488,7 @@ bool CallAnalyzer::visitUnaryInstruction
> COp = SimplifiedValues.lookup(Operand);
> if (COp)
> if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
> - COp, TD)) {
> + COp, DL)) {
> SimplifiedValues[&I] = C;
> return true;
> }
> @@ -602,7 +602,7 @@ bool CallAnalyzer::visitBinaryOperator(B
> if (!isa<Constant>(RHS))
> if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
> RHS = SimpleRHS;
> - Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);
> + Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
> if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
> SimplifiedValues[&I] = C;
> return true;
> @@ -784,7 +784,7 @@ bool CallAnalyzer::visitCallSite(CallSit
> // during devirtualization and so we want to give it a hefty bonus for
> // inlining, but cap that bonus in the event that inlining wouldn't pan
> // out. Pretend to inline the function, with a custom threshold.
> - CallAnalyzer CA(TD, TTI, *F, InlineConstants::IndirectCallThreshold);
> + CallAnalyzer CA(DL, TTI, *F, InlineConstants::IndirectCallThreshold);
> if (CA.analyzeCall(CS)) {
> // We were able to inline the indirect call! Subtract the cost from
> the
> // bonus we want to apply, but don't go below zero.
> @@ -931,10 +931,10 @@ bool CallAnalyzer::analyzeBlock(BasicBlo
> /// returns 0 if V is not a pointer, and returns the constant '0' if
> there are
> /// no constant offsets applied.
> ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value
> *&V) {
> - if (!TD || !V->getType()->isPointerTy())
> + if (!DL || !V->getType()->isPointerTy())
> return 0;
>
> - unsigned IntPtrWidth = TD->getPointerSizeInBits();
> + unsigned IntPtrWidth = DL->getPointerSizeInBits();
> APInt Offset = APInt::getNullValue(IntPtrWidth);
>
> // Even though we don't look through PHI nodes, we could be called on an
> @@ -958,7 +958,7 @@ ConstantInt *CallAnalyzer::stripAndCompu
> assert(V->getType()->isPointerTy() && "Unexpected operand type!");
> } while (Visited.insert(V));
>
> - Type *IntPtrTy = TD->getIntPtrType(V->getContext());
> + Type *IntPtrTy = DL->getIntPtrType(V->getContext());
> return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
> }
>
> @@ -993,12 +993,12 @@ bool CallAnalyzer::analyzeCall(CallSite
> // Give out bonuses per argument, as the instructions setting them up
> will
> // be gone after inlining.
> for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
> - if (TD && CS.isByValArgument(I)) {
> + if (DL && CS.isByValArgument(I)) {
> // We approximate the number of loads and stores needed by dividing
> the
> // size of the byval type by the target's pointer size.
> PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
> - unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
> - unsigned PointerSize = TD->getPointerSizeInBits();
> + unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType());
> + unsigned PointerSize = DL->getPointerSizeInBits();
> // Ceiling division.
> unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
>
>
> Modified: llvm/trunk/lib/Analysis/InstructionSimplify.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/InstructionSimplify.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Analysis/InstructionSimplify.cpp (original)
> +++ llvm/trunk/lib/Analysis/InstructionSimplify.cpp Thu Feb 20 18:06:31
> 2014
> @@ -42,12 +42,12 @@ STATISTIC(NumFactor , "Number of factori
> STATISTIC(NumReassoc, "Number of reassociations");
>
> struct Query {
> - const DataLayout *TD;
> + const DataLayout *DL;
> const TargetLibraryInfo *TLI;
> const DominatorTree *DT;
>
> - Query(const DataLayout *td, const TargetLibraryInfo *tli,
> - const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
> + Query(const DataLayout *DL, const TargetLibraryInfo *tli,
> + const DominatorTree *dt) : DL(DL), TLI(tli), DT(dt) {}
> };
>
> static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
> @@ -595,7 +595,7 @@ static Value *SimplifyAddInst(Value *Op0
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(),
> Ops,
> - Q.TD, Q.TLI);
> + Q.DL, Q.TLI);
> }
>
> // Canonicalize the constant to the RHS.
> @@ -651,9 +651,9 @@ static Value *SimplifyAddInst(Value *Op0
> }
>
> Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool
> isNUW,
> - const DataLayout *TD, const
> TargetLibraryInfo *TLI,
> + const DataLayout *DL, const
> TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
> + return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -667,17 +667,17 @@ Value *llvm::SimplifyAddInst(Value *Op0,
> /// This is very similar to GetPointerBaseWithConstantOffset except it
> doesn't
> /// follow non-inbounds geps. This allows it to remain usable for icmp
> ult/etc.
> /// folding.
> -static Constant *stripAndComputeConstantOffsets(const DataLayout *TD,
> +static Constant *stripAndComputeConstantOffsets(const DataLayout *DL,
> Value *&V,
> bool AllowNonInbounds =
> false) {
> assert(V->getType()->getScalarType()->isPointerTy());
>
> // Without DataLayout, just be conservative for now. Theoretically,
> more could
> // be done in this case.
> - if (!TD)
> + if (!DL)
> return ConstantInt::get(IntegerType::get(V->getContext(), 64), 0);
>
> - Type *IntPtrTy = TD->getIntPtrType(V->getType())->getScalarType();
> + Type *IntPtrTy = DL->getIntPtrType(V->getType())->getScalarType();
> APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
>
> // Even though we don't look through PHI nodes, we could be called on an
> @@ -687,7 +687,7 @@ static Constant *stripAndComputeConstant
> do {
> if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
> if ((!AllowNonInbounds && !GEP->isInBounds()) ||
> - !GEP->accumulateConstantOffset(*TD, Offset))
> + !GEP->accumulateConstantOffset(*DL, Offset))
> break;
> V = GEP->getPointerOperand();
> } else if (Operator::getOpcode(V) == Instruction::BitCast) {
> @@ -712,10 +712,10 @@ static Constant *stripAndComputeConstant
>
> /// \brief Compute the constant difference between two pointer values.
> /// If the difference is not a constant, returns zero.
> -static Constant *computePointerDifference(const DataLayout *TD,
> +static Constant *computePointerDifference(const DataLayout *DL,
> Value *LHS, Value *RHS) {
> - Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
> - Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);
> + Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
> + Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
>
> // If LHS and RHS are not related via constant offsets to the same base
> // value, there is nothing we can do here.
> @@ -737,7 +737,7 @@ static Value *SimplifySubInst(Value *Op0
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(),
> - Ops, Q.TD, Q.TLI);
> + Ops, Q.DL, Q.TLI);
> }
>
> // X - undef -> undef
> @@ -831,7 +831,7 @@ static Value *SimplifySubInst(Value *Op0
> // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null,
> I-i, ...).
> if (match(Op0, m_PtrToInt(m_Value(X))) &&
> match(Op1, m_PtrToInt(m_Value(Y))))
> - if (Constant *Result = computePointerDifference(Q.TD, X, Y))
> + if (Constant *Result = computePointerDifference(Q.DL, X, Y))
> return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
>
> // Mul distributes over Sub. Try some generic simplifications based on
> this.
> @@ -857,9 +857,9 @@ static Value *SimplifySubInst(Value *Op0
> }
>
> Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool
> isNUW,
> - const DataLayout *TD, const
> TargetLibraryInfo *TLI,
> + const DataLayout *DL, const
> TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
> + return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -871,7 +871,7 @@ static Value *SimplifyFAddInst(Value *Op
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::FAdd, CLHS->getType(),
> - Ops, Q.TD, Q.TLI);
> + Ops, Q.DL, Q.TLI);
> }
>
> // Canonicalize the constant to the RHS.
> @@ -913,7 +913,7 @@ static Value *SimplifyFSubInst(Value *Op
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::FSub, CLHS->getType(),
> - Ops, Q.TD, Q.TLI);
> + Ops, Q.DL, Q.TLI);
> }
> }
>
> @@ -951,7 +951,7 @@ static Value *SimplifyFMulInst(Value *Op
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::FMul, CLHS->getType(),
> - Ops, Q.TD, Q.TLI);
> + Ops, Q.DL, Q.TLI);
> }
>
> // Canonicalize the constant to the RHS.
> @@ -977,7 +977,7 @@ static Value *SimplifyMulInst(Value *Op0
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(),
> - Ops, Q.TD, Q.TLI);
> + Ops, Q.DL, Q.TLI);
> }
>
> // Canonicalize the constant to the RHS.
> @@ -1035,29 +1035,29 @@ static Value *SimplifyMulInst(Value *Op0
> }
>
> Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
> - const DataLayout *TD, const
> TargetLibraryInfo *TLI,
> + const DataLayout *DL, const
> TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyFAddInst(Op0, Op1, FMF, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifyFAddInst(Op0, Op1, FMF, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
> - const DataLayout *TD, const
> TargetLibraryInfo *TLI,
> + const DataLayout *DL, const
> TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyFSubInst(Op0, Op1, FMF, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifyFSubInst(Op0, Op1, FMF, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1,
> FastMathFlags FMF,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyFMulInst(Op0, Op1, FMF, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifyFMulInst(Op0, Op1, FMF, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> -Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,
> +Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
> + return ::SimplifyMulInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
> }
>
> /// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can
> @@ -1067,7 +1067,7 @@ static Value *SimplifyDiv(Instruction::B
> if (Constant *C0 = dyn_cast<Constant>(Op0)) {
> if (Constant *C1 = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { C0, C1 };
> - return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD,
> Q.TLI);
> + return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL,
> Q.TLI);
> }
> }
>
> @@ -1142,10 +1142,10 @@ static Value *SimplifySDivInst(Value *Op
> return 0;
> }
>
> -Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout
> *TD,
> +Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout
> *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifySDivInst(Op0, Op1, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> /// SimplifyUDivInst - Given operands for a UDiv, see if we can
> @@ -1158,10 +1158,10 @@ static Value *SimplifyUDivInst(Value *Op
> return 0;
> }
>
> -Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout
> *TD,
> +Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout
> *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifyUDivInst(Op0, Op1, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
> @@ -1177,10 +1177,10 @@ static Value *SimplifyFDivInst(Value *Op
> return 0;
> }
>
> -Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout
> *TD,
> +Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout
> *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifyFDivInst(Op0, Op1, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> /// SimplifyRem - Given operands for an SRem or URem, see if we can
> @@ -1190,7 +1190,7 @@ static Value *SimplifyRem(Instruction::B
> if (Constant *C0 = dyn_cast<Constant>(Op0)) {
> if (Constant *C1 = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { C0, C1 };
> - return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD,
> Q.TLI);
> + return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL,
> Q.TLI);
> }
> }
>
> @@ -1247,10 +1247,10 @@ static Value *SimplifySRemInst(Value *Op
> return 0;
> }
>
> -Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout
> *TD,
> +Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout
> *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifySRemInst(Op0, Op1, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> /// SimplifyURemInst - Given operands for a URem, see if we can
> @@ -1263,10 +1263,10 @@ static Value *SimplifyURemInst(Value *Op
> return 0;
> }
>
> -Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout
> *TD,
> +Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout
> *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifyURemInst(Op0, Op1, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
> @@ -1282,10 +1282,10 @@ static Value *SimplifyFRemInst(Value *Op
> return 0;
> }
>
> -Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout
> *TD,
> +Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout
> *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifyFRemInst(Op0, Op1, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> /// isUndefShift - Returns true if a shift by \c Amount always yields
> undef.
> @@ -1322,7 +1322,7 @@ static Value *SimplifyShift(unsigned Opc
> if (Constant *C0 = dyn_cast<Constant>(Op0)) {
> if (Constant *C1 = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { C0, C1 };
> - return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD,
> Q.TLI);
> + return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL,
> Q.TLI);
> }
> }
>
> @@ -1372,9 +1372,9 @@ static Value *SimplifyShlInst(Value *Op0
> }
>
> Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool
> isNUW,
> - const DataLayout *TD, const
> TargetLibraryInfo *TLI,
> + const DataLayout *DL, const
> TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
> + return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -1403,10 +1403,10 @@ static Value *SimplifyLShrInst(Value *Op
> }
>
> Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
> + return ::SimplifyLShrInst(Op0, Op1, isExact, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -1439,10 +1439,10 @@ static Value *SimplifyAShrInst(Value *Op
> }
>
> Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
> + return ::SimplifyAShrInst(Op0, Op1, isExact, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -1454,7 +1454,7 @@ static Value *SimplifyAndInst(Value *Op0
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),
> - Ops, Q.TD, Q.TLI);
> + Ops, Q.DL, Q.TLI);
> }
>
> // Canonicalize the constant to the RHS.
> @@ -1539,10 +1539,10 @@ static Value *SimplifyAndInst(Value *Op0
> return 0;
> }
>
> -Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,
> +Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
> + return ::SimplifyAndInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
> }
>
> /// SimplifyOrInst - Given operands for an Or, see if we can
> @@ -1553,7 +1553,7 @@ static Value *SimplifyOrInst(Value *Op0,
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(),
> - Ops, Q.TD, Q.TLI);
> + Ops, Q.DL, Q.TLI);
> }
>
> // Canonicalize the constant to the RHS.
> @@ -1633,10 +1633,10 @@ static Value *SimplifyOrInst(Value *Op0,
> return 0;
> }
>
> -Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,
> +Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
> + return ::SimplifyOrInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
> }
>
> /// SimplifyXorInst - Given operands for a Xor, see if we can
> @@ -1647,7 +1647,7 @@ static Value *SimplifyXorInst(Value *Op0
> if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
> Constant *Ops[] = { CLHS, CRHS };
> return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(),
> - Ops, Q.TD, Q.TLI);
> + Ops, Q.DL, Q.TLI);
> }
>
> // Canonicalize the constant to the RHS.
> @@ -1693,10 +1693,10 @@ static Value *SimplifyXorInst(Value *Op0
> return 0;
> }
>
> -Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,
> +Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
> + return ::SimplifyXorInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
> }
>
> static Type *GetCompareTy(Value *Op) {
> @@ -1751,7 +1751,7 @@ static Value *ExtractEquivalentCondition
> // If the C and C++ standards are ever made sufficiently restrictive in
> this
> // area, it may be possible to update LLVM's semantics accordingly and
> reinstate
> // this optimization.
> -static Constant *computePointerICmp(const DataLayout *TD,
> +static Constant *computePointerICmp(const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> CmpInst::Predicate Pred,
> Value *LHS, Value *RHS) {
> @@ -1793,8 +1793,8 @@ static Constant *computePointerICmp(cons
> // numerous hazards. AliasAnalysis and its utilities rely on special
> rules
> // governing loads and stores which don't apply to icmps. Also,
> AliasAnalysis
> // doesn't need to guarantee pointer inequality when it says NoAlias.
> - Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
> - Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);
> + Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
> + Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
>
> // If LHS and RHS are related via constant offsets to the same base
> // value, we can replace it with an icmp which just compares the
> offsets.
> @@ -1838,8 +1838,8 @@ static Constant *computePointerICmp(cons
> ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
> uint64_t LHSSize, RHSSize;
> if (LHSOffsetCI && RHSOffsetCI &&
> - getObjectSize(LHS, LHSSize, TD, TLI) &&
> - getObjectSize(RHS, RHSSize, TD, TLI)) {
> + getObjectSize(LHS, LHSSize, DL, TLI) &&
> + getObjectSize(RHS, RHSSize, DL, TLI)) {
> const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
> const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
> if (!LHSOffsetValue.isNegative() &&
> @@ -1865,8 +1865,8 @@ static Constant *computePointerICmp(cons
> // equality comparisons concerning the result. We avoid walking the
> whole
> // chain again by starting where the last calls to
> // stripAndComputeConstantOffsets left off and accumulate the offsets.
> - Constant *LHSNoBound = stripAndComputeConstantOffsets(TD, LHS, true);
> - Constant *RHSNoBound = stripAndComputeConstantOffsets(TD, RHS, true);
> + Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
> + Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
> if (LHS == RHS)
> return ConstantExpr::getICmp(Pred,
> ConstantExpr::getAdd(LHSOffset,
> LHSNoBound),
> @@ -1886,7 +1886,7 @@ static Value *SimplifyICmpInst(unsigned
>
> if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
> if (Constant *CRHS = dyn_cast<Constant>(RHS))
> - return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.TD,
> Q.TLI);
> + return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL,
> Q.TLI);
>
> // If we have a constant, make sure it is on the RHS.
> std::swap(LHS, RHS);
> @@ -1950,40 +1950,40 @@ static Value *SimplifyICmpInst(unsigned
> return getTrue(ITy);
> case ICmpInst::ICMP_EQ:
> case ICmpInst::ICMP_ULE:
> - if (isKnownNonZero(LHS, Q.TD))
> + if (isKnownNonZero(LHS, Q.DL))
> return getFalse(ITy);
> break;
> case ICmpInst::ICMP_NE:
> case ICmpInst::ICMP_UGT:
> - if (isKnownNonZero(LHS, Q.TD))
> + if (isKnownNonZero(LHS, Q.DL))
> return getTrue(ITy);
> break;
> case ICmpInst::ICMP_SLT:
> - ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
> + ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
> if (LHSKnownNegative)
> return getTrue(ITy);
> if (LHSKnownNonNegative)
> return getFalse(ITy);
> break;
> case ICmpInst::ICMP_SLE:
> - ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
> + ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
> if (LHSKnownNegative)
> return getTrue(ITy);
> - if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.TD))
> + if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL))
> return getFalse(ITy);
> break;
> case ICmpInst::ICMP_SGE:
> - ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
> + ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
> if (LHSKnownNegative)
> return getFalse(ITy);
> if (LHSKnownNonNegative)
> return getTrue(ITy);
> break;
> case ICmpInst::ICMP_SGT:
> - ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
> + ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
> if (LHSKnownNegative)
> return getFalse(ITy);
> - if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.TD))
> + if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL))
> return getTrue(ITy);
> break;
> }
> @@ -2066,8 +2066,8 @@ static Value *SimplifyICmpInst(unsigned
>
> // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the
> input
> // if the integer type is the same size as the pointer type.
> - if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
> - Q.TD->getTypeSizeInBits(SrcTy) ==
> DstTy->getPrimitiveSizeInBits()) {
> + if (MaxRecurse && Q.DL && isa<PtrToIntInst>(LI) &&
> + Q.DL->getTypeSizeInBits(SrcTy) ==
> DstTy->getPrimitiveSizeInBits()) {
> if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
> // Transfer the cast to the constant.
> if (Value *V = SimplifyICmpInst(Pred, SrcOp,
> @@ -2287,7 +2287,7 @@ static Value *SimplifyICmpInst(unsigned
> break;
> case ICmpInst::ICMP_SGT:
> case ICmpInst::ICMP_SGE:
> - ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.TD);
> + ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL);
> if (!KnownNonNegative)
> break;
> // fall-through
> @@ -2297,7 +2297,7 @@ static Value *SimplifyICmpInst(unsigned
> return getFalse(ITy);
> case ICmpInst::ICMP_SLT:
> case ICmpInst::ICMP_SLE:
> - ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.TD);
> + ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL);
> if (!KnownNonNegative)
> break;
> // fall-through
> @@ -2316,7 +2316,7 @@ static Value *SimplifyICmpInst(unsigned
> break;
> case ICmpInst::ICMP_SGT:
> case ICmpInst::ICMP_SGE:
> - ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.TD);
> + ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL);
> if (!KnownNonNegative)
> break;
> // fall-through
> @@ -2326,7 +2326,7 @@ static Value *SimplifyICmpInst(unsigned
> return getTrue(ITy);
> case ICmpInst::ICMP_SLT:
> case ICmpInst::ICMP_SLE:
> - ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.TD);
> + ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL);
> if (!KnownNonNegative)
> break;
> // fall-through
> @@ -2569,7 +2569,7 @@ static Value *SimplifyICmpInst(unsigned
> // Simplify comparisons of related pointers using a powerful, recursive
> // GEP-walk when we have target data available..
> if (LHS->getType()->isPointerTy())
> - if (Constant *C = computePointerICmp(Q.TD, Q.TLI, Pred, LHS, RHS))
> + if (Constant *C = computePointerICmp(Q.DL, Q.TLI, Pred, LHS, RHS))
> return C;
>
> if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
> @@ -2609,10 +2609,10 @@ static Value *SimplifyICmpInst(unsigned
> }
>
> Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
> + return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -2625,7 +2625,7 @@ static Value *SimplifyFCmpInst(unsigned
>
> if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
> if (Constant *CRHS = dyn_cast<Constant>(RHS))
> - return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.TD,
> Q.TLI);
> + return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL,
> Q.TLI);
>
> // If we have a constant, make sure it is on the RHS.
> std::swap(LHS, RHS);
> @@ -2706,10 +2706,10 @@ static Value *SimplifyFCmpInst(unsigned
> }
>
> Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
> + return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -2745,10 +2745,10 @@ static Value *SimplifySelectInst(Value *
> }
>
> Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value
> *FalseVal,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI,
> DT),
> + return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (DL, TLI,
> DT),
> RecursionLimit);
> }
>
> @@ -2776,9 +2776,9 @@ static Value *SimplifyGEPInst(ArrayRef<V
> if (match(Ops[1], m_Zero()))
> return Ops[0];
> // getelementptr P, N -> P if P points to a type of zero size.
> - if (Q.TD) {
> + if (Q.DL) {
> Type *Ty = PtrTy->getElementType();
> - if (Ty->isSized() && Q.TD->getTypeAllocSize(Ty) == 0)
> + if (Ty->isSized() && Q.DL->getTypeAllocSize(Ty) == 0)
> return Ops[0];
> }
> }
> @@ -2791,10 +2791,10 @@ static Value *SimplifyGEPInst(ArrayRef<V
> return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]),
> Ops.slice(1));
> }
>
> -Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD,
> +Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
> + return ::SimplifyGEPInst(Ops, Query (DL, TLI, DT), RecursionLimit);
> }
>
> /// SimplifyInsertValueInst - Given operands for an InsertValueInst, see
> if we
> @@ -2828,10 +2828,10 @@ static Value *SimplifyInsertValueInst(Va
>
> Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
> ArrayRef<unsigned> Idxs,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
> + return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -2871,15 +2871,15 @@ static Value *SimplifyPHINode(PHINode *P
>
> static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q,
> unsigned) {
> if (Constant *C = dyn_cast<Constant>(Op))
> - return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.TD,
> Q.TLI);
> + return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.DL,
> Q.TLI);
>
> return 0;
> }
>
> -Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,
> +Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
> + return ::SimplifyTruncInst(Op, Ty, Query (DL, TLI, DT), RecursionLimit);
> }
>
> //=== Helper functions for higher up the class hierarchy.
> @@ -2924,7 +2924,7 @@ static Value *SimplifyBinOp(unsigned Opc
> if (Constant *CLHS = dyn_cast<Constant>(LHS))
> if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
> Constant *COps[] = {CLHS, CRHS};
> - return ConstantFoldInstOperands(Opcode, LHS->getType(), COps,
> Q.TD,
> + return ConstantFoldInstOperands(Opcode, LHS->getType(), COps,
> Q.DL,
> Q.TLI);
> }
>
> @@ -2950,9 +2950,9 @@ static Value *SimplifyBinOp(unsigned Opc
> }
>
> Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
> - const DataLayout *TD, const TargetLibraryInfo
> *TLI,
> + const DataLayout *DL, const TargetLibraryInfo
> *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT),
> RecursionLimit);
> + return ::SimplifyBinOp(Opcode, LHS, RHS, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> /// SimplifyCmpInst - Given operands for a CmpInst, see if we can
> @@ -2965,9 +2965,9 @@ static Value *SimplifyCmpInst(unsigned P
> }
>
> Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
> - const DataLayout *TD, const
> TargetLibraryInfo *TLI,
> + const DataLayout *DL, const
> TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
> + return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
> RecursionLimit);
> }
>
> @@ -3040,136 +3040,136 @@ static Value *SimplifyCall(Value *V, Ite
> }
>
> Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
> - User::op_iterator ArgEnd, const DataLayout *TD,
> + User::op_iterator ArgEnd, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(TD, TLI, DT),
> + return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT),
> RecursionLimit);
> }
>
> Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
> - const DataLayout *TD, const TargetLibraryInfo
> *TLI,
> + const DataLayout *DL, const TargetLibraryInfo
> *TLI,
> const DominatorTree *DT) {
> - return ::SimplifyCall(V, Args.begin(), Args.end(), Query(TD, TLI, DT),
> + return ::SimplifyCall(V, Args.begin(), Args.end(), Query(DL, TLI, DT),
> RecursionLimit);
> }
>
> /// SimplifyInstruction - See if we can compute a simplified version of
> this
> /// instruction. If not, this returns null.
> -Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
> +Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> Value *Result;
>
> switch (I->getOpcode()) {
> default:
> - Result = ConstantFoldInstruction(I, TD, TLI);
> + Result = ConstantFoldInstruction(I, DL, TLI);
> break;
> case Instruction::FAdd:
> Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
> - I->getFastMathFlags(), TD, TLI, DT);
> + I->getFastMathFlags(), DL, TLI, DT);
> break;
> case Instruction::Add:
> Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
> cast<BinaryOperator>(I)->hasNoSignedWrap(),
> cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
> - TD, TLI, DT);
> + DL, TLI, DT);
> break;
> case Instruction::FSub:
> Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
> - I->getFastMathFlags(), TD, TLI, DT);
> + I->getFastMathFlags(), DL, TLI, DT);
> break;
> case Instruction::Sub:
> Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
> cast<BinaryOperator>(I)->hasNoSignedWrap(),
> cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
> - TD, TLI, DT);
> + DL, TLI, DT);
> break;
> case Instruction::FMul:
> Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
> - I->getFastMathFlags(), TD, TLI, DT);
> + I->getFastMathFlags(), DL, TLI, DT);
> break;
> case Instruction::Mul:
> - Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, TLI,
> DT);
> + Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI,
> DT);
> break;
> case Instruction::SDiv:
> - Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD,
> TLI, DT);
> + Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL,
> TLI, DT);
> break;
> case Instruction::UDiv:
> - Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD,
> TLI, DT);
> + Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL,
> TLI, DT);
> break;
> case Instruction::FDiv:
> - Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD,
> TLI, DT);
> + Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), DL,
> TLI, DT);
> break;
> case Instruction::SRem:
> - Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), TD,
> TLI, DT);
> + Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL,
> TLI, DT);
> break;
> case Instruction::URem:
> - Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), TD,
> TLI, DT);
> + Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL,
> TLI, DT);
> break;
> case Instruction::FRem:
> - Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), TD,
> TLI, DT);
> + Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), DL,
> TLI, DT);
> break;
> case Instruction::Shl:
> Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
> cast<BinaryOperator>(I)->hasNoSignedWrap(),
> cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
> - TD, TLI, DT);
> + DL, TLI, DT);
> break;
> case Instruction::LShr:
> Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
> cast<BinaryOperator>(I)->isExact(),
> - TD, TLI, DT);
> + DL, TLI, DT);
> break;
> case Instruction::AShr:
> Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
> cast<BinaryOperator>(I)->isExact(),
> - TD, TLI, DT);
> + DL, TLI, DT);
> break;
> case Instruction::And:
> - Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, TLI,
> DT);
> + Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI,
> DT);
> break;
> case Instruction::Or:
> - Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, TLI,
> DT);
> + Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI,
> DT);
> break;
> case Instruction::Xor:
> - Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, TLI,
> DT);
> + Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI,
> DT);
> break;
> case Instruction::ICmp:
> Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
> - I->getOperand(0), I->getOperand(1), TD,
> TLI, DT);
> + I->getOperand(0), I->getOperand(1), DL,
> TLI, DT);
> break;
> case Instruction::FCmp:
> Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
> - I->getOperand(0), I->getOperand(1), TD,
> TLI, DT);
> + I->getOperand(0), I->getOperand(1), DL,
> TLI, DT);
> break;
> case Instruction::Select:
> Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
> - I->getOperand(2), TD, TLI, DT);
> + I->getOperand(2), DL, TLI, DT);
> break;
> case Instruction::GetElementPtr: {
> SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
> - Result = SimplifyGEPInst(Ops, TD, TLI, DT);
> + Result = SimplifyGEPInst(Ops, DL, TLI, DT);
> break;
> }
> case Instruction::InsertValue: {
> InsertValueInst *IV = cast<InsertValueInst>(I);
> Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
> IV->getInsertedValueOperand(),
> - IV->getIndices(), TD, TLI, DT);
> + IV->getIndices(), DL, TLI, DT);
> break;
> }
> case Instruction::PHI:
> - Result = SimplifyPHINode(cast<PHINode>(I), Query (TD, TLI, DT));
> + Result = SimplifyPHINode(cast<PHINode>(I), Query (DL, TLI, DT));
> break;
> case Instruction::Call: {
> CallSite CS(cast<CallInst>(I));
> Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(),
> CS.arg_end(),
> - TD, TLI, DT);
> + DL, TLI, DT);
> break;
> }
> case Instruction::Trunc:
> - Result = SimplifyTruncInst(I->getOperand(0), I->getType(), TD, TLI,
> DT);
> + Result = SimplifyTruncInst(I->getOperand(0), I->getType(), DL, TLI,
> DT);
> break;
> }
>
> @@ -3191,7 +3191,7 @@ Value *llvm::SimplifyInstruction(Instruc
> /// This routine returns 'true' only when *it* simplifies something. The
> passed
> /// in simplified value does not count toward this.
> static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value
> *SimpleV,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo
> *TLI,
> const DominatorTree *DT) {
> bool Simplified = false;
> @@ -3221,7 +3221,7 @@ static bool replaceAndRecursivelySimplif
> I = Worklist[Idx];
>
> // See if this instruction simplifies.
> - SimpleV = SimplifyInstruction(I, TD, TLI, DT);
> + SimpleV = SimplifyInstruction(I, DL, TLI, DT);
> if (!SimpleV)
> continue;
>
> @@ -3246,17 +3246,17 @@ static bool replaceAndRecursivelySimplif
> }
>
> bool llvm::recursivelySimplifyInstruction(Instruction *I,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> - return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
> + return replaceAndRecursivelySimplifyImpl(I, 0, DL, TLI, DT);
> }
>
> bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const DominatorTree *DT) {
> assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not
> valid!");
> assert(SimpleV && "Must provide a simplified value.");
> - return replaceAndRecursivelySimplifyImpl(I, SimpleV, TD, TLI, DT);
> + return replaceAndRecursivelySimplifyImpl(I, SimpleV, DL, TLI, DT);
> }
>
> Modified: llvm/trunk/lib/Analysis/Lint.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/Lint.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Analysis/Lint.cpp (original)
> +++ llvm/trunk/lib/Analysis/Lint.cpp Thu Feb 20 18:06:31 2014
> @@ -102,7 +102,7 @@ namespace {
> Module *Mod;
> AliasAnalysis *AA;
> DominatorTree *DT;
> - DataLayout *TD;
> + DataLayout *DL;
> TargetLibraryInfo *TLI;
>
> std::string Messages;
> @@ -176,7 +176,7 @@ bool Lint::runOnFunction(Function &F) {
> Mod = F.getParent();
> AA = &getAnalysis<AliasAnalysis>();
> DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
> visit(F);
> dbgs() << MessagesStr.str();
> @@ -247,7 +247,7 @@ void Lint::visitCallSite(CallSite CS) {
> Type *Ty =
> cast<PointerType>(Formal->getType())->getElementType();
> visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
> - TD ? TD->getABITypeAlignment(Ty) : 0,
> + DL ? DL->getABITypeAlignment(Ty) : 0,
> Ty, MemRef::Read | MemRef::Write);
> }
> }
> @@ -414,7 +414,7 @@ void Lint::visitMemoryReference(Instruct
> // Only handles memory references that read/write something simple like
> an
> // alloca instruction or a global variable.
> int64_t Offset = 0;
> - if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, TD)) {
> + if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) {
> // OK, so the access is to a constant offset from Ptr. Check that
> Ptr is
> // something we can handle and if so extract the size of this base
> object
> // along with its alignment.
> @@ -423,21 +423,21 @@ void Lint::visitMemoryReference(Instruct
>
> if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
> Type *ATy = AI->getAllocatedType();
> - if (TD && !AI->isArrayAllocation() && ATy->isSized())
> - BaseSize = TD->getTypeAllocSize(ATy);
> + if (DL && !AI->isArrayAllocation() && ATy->isSized())
> + BaseSize = DL->getTypeAllocSize(ATy);
> BaseAlign = AI->getAlignment();
> - if (TD && BaseAlign == 0 && ATy->isSized())
> - BaseAlign = TD->getABITypeAlignment(ATy);
> + if (DL && BaseAlign == 0 && ATy->isSized())
> + BaseAlign = DL->getABITypeAlignment(ATy);
> } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
> // If the global may be defined differently in another compilation
> unit
> // then don't warn about funky memory accesses.
> if (GV->hasDefinitiveInitializer()) {
> Type *GTy = GV->getType()->getElementType();
> - if (TD && GTy->isSized())
> - BaseSize = TD->getTypeAllocSize(GTy);
> + if (DL && GTy->isSized())
> + BaseSize = DL->getTypeAllocSize(GTy);
> BaseAlign = GV->getAlignment();
> - if (TD && BaseAlign == 0 && GTy->isSized())
> - BaseAlign = TD->getABITypeAlignment(GTy);
> + if (DL && BaseAlign == 0 && GTy->isSized())
> + BaseAlign = DL->getABITypeAlignment(GTy);
> }
> }
>
> @@ -450,8 +450,8 @@ void Lint::visitMemoryReference(Instruct
>
> // Accesses that say that the memory is more aligned than it is are
> not
> // defined.
> - if (TD && Align == 0 && Ty && Ty->isSized())
> - Align = TD->getABITypeAlignment(Ty);
> + if (DL && Align == 0 && Ty && Ty->isSized())
> + Align = DL->getABITypeAlignment(Ty);
> Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
> "Undefined behavior: Memory reference address is misaligned",
> &I);
> }
> @@ -542,22 +542,22 @@ static bool isZero(Value *V, DataLayout
> }
>
> void Lint::visitSDiv(BinaryOperator &I) {
> - Assert1(!isZero(I.getOperand(1), TD),
> + Assert1(!isZero(I.getOperand(1), DL),
> "Undefined behavior: Division by zero", &I);
> }
>
> void Lint::visitUDiv(BinaryOperator &I) {
> - Assert1(!isZero(I.getOperand(1), TD),
> + Assert1(!isZero(I.getOperand(1), DL),
> "Undefined behavior: Division by zero", &I);
> }
>
> void Lint::visitSRem(BinaryOperator &I) {
> - Assert1(!isZero(I.getOperand(1), TD),
> + Assert1(!isZero(I.getOperand(1), DL),
> "Undefined behavior: Division by zero", &I);
> }
>
> void Lint::visitURem(BinaryOperator &I) {
> - Assert1(!isZero(I.getOperand(1), TD),
> + Assert1(!isZero(I.getOperand(1), DL),
> "Undefined behavior: Division by zero", &I);
> }
>
> @@ -631,7 +631,7 @@ Value *Lint::findValueImpl(Value *V, boo
> // TODO: Look through eliminable cast pairs.
> // TODO: Look through calls with unique return values.
> // TODO: Look through vector insert/extract/shuffle.
> - V = OffsetOk ? GetUnderlyingObject(V, TD) : V->stripPointerCasts();
> + V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts();
> if (LoadInst *L = dyn_cast<LoadInst>(V)) {
> BasicBlock::iterator BBI = L;
> BasicBlock *BB = L->getParent();
> @@ -651,7 +651,7 @@ Value *Lint::findValueImpl(Value *V, boo
> if (W != V)
> return findValueImpl(W, OffsetOk, Visited);
> } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
> - if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
> + if (CI->isNoopCast(DL ? DL->getIntPtrType(V->getContext()) :
> Type::getInt64Ty(V->getContext())))
> return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
> } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
> @@ -665,7 +665,7 @@ Value *Lint::findValueImpl(Value *V, boo
> if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
> CE->getOperand(0)->getType(),
> CE->getType(),
> - TD ? TD->getIntPtrType(V->getContext()) :
> + DL ? DL->getIntPtrType(V->getContext()) :
> Type::getInt64Ty(V->getContext())))
> return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
> } else if (CE->getOpcode() == Instruction::ExtractValue) {
> @@ -678,10 +678,10 @@ Value *Lint::findValueImpl(Value *V, boo
>
> // As a last resort, try SimplifyInstruction or constant folding.
> if (Instruction *Inst = dyn_cast<Instruction>(V)) {
> - if (Value *W = SimplifyInstruction(Inst, TD, TLI, DT))
> + if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT))
> return findValueImpl(W, OffsetOk, Visited);
> } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
> - if (Value *W = ConstantFoldConstantExpression(CE, TD, TLI))
> + if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI))
> if (W != V)
> return findValueImpl(W, OffsetOk, Visited);
> }
>
> Modified: llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp (original)
> +++ llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp Thu Feb 20 18:06:31
> 2014
> @@ -51,7 +51,7 @@ namespace {
> // alignment to a concrete value.
> unsigned getAlignment(GlobalVariable *GV) const;
>
> - const DataLayout *TD;
> + const DataLayout *DL;
> };
> }
>
> @@ -89,20 +89,20 @@ static bool IsBetterCanonical(const Glob
> }
>
> bool ConstantMerge::hasKnownAlignment(GlobalVariable *GV) const {
> - return TD || GV->getAlignment() != 0;
> + return DL || GV->getAlignment() != 0;
> }
>
> unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {
> unsigned Align = GV->getAlignment();
> if (Align)
> return Align;
> - if (TD)
> - return TD->getPreferredAlignment(GV);
> + if (DL)
> + return DL->getPreferredAlignment(GV);
> return 0;
> }
>
> bool ConstantMerge::runOnModule(Module &M) {
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
>
> // Find all the globals that are marked "used". These cannot be merged.
> SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
>
> Modified: llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp (original)
> +++ llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp Thu Feb 20 18:06:31 2014
> @@ -84,7 +84,7 @@ namespace {
> const GlobalStatus &GS);
> bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
>
> - DataLayout *TD;
> + DataLayout *DL;
> TargetLibraryInfo *TLI;
> };
> }
> @@ -266,7 +266,7 @@ static bool CleanupPointerRootUsers(Glob
> /// quick scan over the use list to clean up the easy and obvious cruft.
> This
> /// returns true if it made a change.
> static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
> - DataLayout *TD, TargetLibraryInfo
> *TLI) {
> + DataLayout *DL, TargetLibraryInfo
> *TLI) {
> bool Changed = false;
> // Note that we need to use a weak value handle for the worklist items.
> When
> // we delete a constant array, we may also be holding pointer to one of
> its
> @@ -296,12 +296,12 @@ static bool CleanupConstantGlobalUsers(V
> Constant *SubInit = 0;
> if (Init)
> SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
> - Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI);
> + Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
> } else if ((CE->getOpcode() == Instruction::BitCast &&
> CE->getType()->isPointerTy()) ||
> CE->getOpcode() == Instruction::AddrSpaceCast) {
> // Pointer cast, delete any stores and memsets to the global.
> - Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI);
> + Changed |= CleanupConstantGlobalUsers(CE, 0, DL, TLI);
> }
>
> if (CE->use_empty()) {
> @@ -315,7 +315,7 @@ static bool CleanupConstantGlobalUsers(V
> Constant *SubInit = 0;
> if (!isa<ConstantExpr>(GEP->getOperand(0))) {
> ConstantExpr *CE =
> - dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD,
> TLI));
> + dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL,
> TLI));
> if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
> SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
>
> @@ -325,7 +325,7 @@ static bool CleanupConstantGlobalUsers(V
> if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
> SubInit =
> Constant::getNullValue(GEP->getType()->getElementType());
> }
> - Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI);
> + Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
>
> if (GEP->use_empty()) {
> GEP->eraseFromParent();
> @@ -342,7 +342,7 @@ static bool CleanupConstantGlobalUsers(V
> // us, and if they are all dead, nuke them without remorse.
> if (isSafeToDestroyConstant(C)) {
> C->destroyConstant();
> - CleanupConstantGlobalUsers(V, Init, TD, TLI);
> + CleanupConstantGlobalUsers(V, Init, DL, TLI);
> return true;
> }
> }
> @@ -467,7 +467,7 @@ static bool GlobalUsersSafeToSRA(GlobalV
> /// behavior of the program in a more fine-grained way. We have
> determined that
> /// this transformation is safe already. We return the first global
> variable we
> /// insert so that the caller can reprocess it.
> -static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout
> &TD) {
> +static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout
> &DL) {
> // Make sure this global only has simple uses that we can SRA.
> if (!GlobalUsersSafeToSRA(GV))
> return 0;
> @@ -482,11 +482,11 @@ static GlobalVariable *SRAGlobal(GlobalV
> // Get the alignment of the global, either explicit or target-specific.
> unsigned StartAlignment = GV->getAlignment();
> if (StartAlignment == 0)
> - StartAlignment = TD.getABITypeAlignment(GV->getType());
> + StartAlignment = DL.getABITypeAlignment(GV->getType());
>
> if (StructType *STy = dyn_cast<StructType>(Ty)) {
> NewGlobals.reserve(STy->getNumElements());
> - const StructLayout &Layout = *TD.getStructLayout(STy);
> + const StructLayout &Layout = *DL.getStructLayout(STy);
> for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
> Constant *In = Init->getAggregateElement(i);
> assert(In && "Couldn't get element of initializer?");
> @@ -503,7 +503,7 @@ static GlobalVariable *SRAGlobal(GlobalV
> // propagate info to each field.
> uint64_t FieldOffset = Layout.getElementOffset(i);
> unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
> - if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))
> + if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
> NGV->setAlignment(NewAlign);
> }
> } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
> @@ -517,8 +517,8 @@ static GlobalVariable *SRAGlobal(GlobalV
> return 0; // It's not worth it.
> NewGlobals.reserve(NumElements);
>
> - uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
> - unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
> + uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
> + unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());
> for (unsigned i = 0, e = NumElements; i != e; ++i) {
> Constant *In = Init->getAggregateElement(i);
> assert(In && "Couldn't get element of initializer?");
> @@ -743,7 +743,7 @@ static bool OptimizeAwayTrappingUsesOfVa
> /// if the loaded value is dynamically null, then we know that they
> cannot be
> /// reachable with a null optimize away the load.
> static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant
> *LV,
> - DataLayout *TD,
> + DataLayout *DL,
> TargetLibraryInfo *TLI) {
> bool Changed = false;
>
> @@ -792,7 +792,7 @@ static bool OptimizeAwayTrappingUsesOfLo
> Changed |= CleanupPointerRootUsers(GV, TLI);
> } else {
> Changed = true;
> - CleanupConstantGlobalUsers(GV, 0, TD, TLI);
> + CleanupConstantGlobalUsers(GV, 0, DL, TLI);
> }
> if (GV->use_empty()) {
> DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
> @@ -807,10 +807,10 @@ static bool OptimizeAwayTrappingUsesOfLo
> /// ConstantPropUsersOf - Walk the use list of V, constant folding all of
> the
> /// instructions that are foldable.
> static void ConstantPropUsersOf(Value *V,
> - DataLayout *TD, TargetLibraryInfo *TLI) {
> + DataLayout *DL, TargetLibraryInfo *TLI) {
> for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI !=
> E; )
> if (Instruction *I = dyn_cast<Instruction>(*UI++))
> - if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
> + if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
> I->replaceAllUsesWith(NewC);
>
> // Advance UI to the next non-I use to avoid invalidating it!
> @@ -830,7 +830,7 @@ static GlobalVariable *OptimizeGlobalAdd
> CallInst *CI,
> Type *AllocTy,
> ConstantInt
> *NElements,
> - DataLayout *TD,
> + DataLayout *DL,
> TargetLibraryInfo
> *TLI) {
> DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI <<
> '\n');
>
> @@ -949,9 +949,9 @@ static GlobalVariable *OptimizeGlobalAdd
> // To further other optimizations, loop over all users of NewGV and try
> to
> // constant prop them. This will promote GEP instructions with constant
> // indices into GEP constant-exprs, which will allow global-opt to hack
> on it.
> - ConstantPropUsersOf(NewGV, TD, TLI);
> + ConstantPropUsersOf(NewGV, DL, TLI);
> if (RepValue != NewGV)
> - ConstantPropUsersOf(RepValue, TD, TLI);
> + ConstantPropUsersOf(RepValue, DL, TLI);
>
> return NewGV;
> }
> @@ -1278,7 +1278,7 @@ static void RewriteUsesOfLoadForHeapSRoA
> /// PerformHeapAllocSRoA - CI is an allocation of an array of structures.
> Break
> /// it up into multiple allocations of arrays of the fields.
> static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst
> *CI,
> - Value *NElems, DataLayout *TD,
> + Value *NElems, DataLayout *DL,
> const TargetLibraryInfo *TLI)
> {
> DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI <<
> '\n');
> Type *MAT = getMallocAllocatedType(CI, TLI);
> @@ -1307,10 +1307,10 @@ static GlobalVariable *PerformHeapAllocS
> GV->getThreadLocalMode());
> FieldGlobals.push_back(NGV);
>
> - unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
> + unsigned TypeSize = DL->getTypeAllocSize(FieldTy);
> if (StructType *ST = dyn_cast<StructType>(FieldTy))
> - TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
> - Type *IntPtrTy = TD->getIntPtrType(CI->getType());
> + TypeSize = DL->getStructLayout(ST)->getSizeInBytes();
> + Type *IntPtrTy = DL->getIntPtrType(CI->getType());
> Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
> ConstantInt::get(IntPtrTy,
> TypeSize),
> NElems, 0,
> @@ -1470,9 +1470,9 @@ static bool TryToOptimizeStoreOfMallocTo
> Type *AllocTy,
> AtomicOrdering Ordering,
> Module::global_iterator
> &GVI,
> - DataLayout *TD,
> + DataLayout *DL,
> TargetLibraryInfo *TLI) {
> - if (!TD)
> + if (!DL)
> return false;
>
> // If this is a malloc of an abstract type, don't touch it.
> @@ -1502,7 +1502,7 @@ static bool TryToOptimizeStoreOfMallocTo
> // This eliminates dynamic allocation, avoids an indirection accessing
> the
> // data, and exposes the resultant global to further GlobalOpt.
> // We cannot optimize the malloc if we cannot determine malloc array
> size.
> - Value *NElems = getMallocArraySize(CI, TD, TLI, true);
> + Value *NElems = getMallocArraySize(CI, DL, TLI, true);
> if (!NElems)
> return false;
>
> @@ -1510,8 +1510,8 @@ static bool TryToOptimizeStoreOfMallocTo
> // Restrict this transformation to only working on small allocations
> // (2048 bytes currently), as we don't want to introduce a 16M global
> or
> // something.
> - if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048)
> {
> - GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD,
> TLI);
> + if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048)
> {
> + GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL,
> TLI);
> return true;
> }
>
> @@ -1540,8 +1540,8 @@ static bool TryToOptimizeStoreOfMallocTo
> // If this is a fixed size array, transform the Malloc to be an alloc
> of
> // structs. malloc [100 x struct],1 -> malloc struct, 100
> if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI,
> TLI))) {
> - Type *IntPtrTy = TD->getIntPtrType(CI->getType());
> - unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
> + Type *IntPtrTy = DL->getIntPtrType(CI->getType());
> + unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes();
> Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
> Value *NumElements = ConstantInt::get(IntPtrTy,
> AT->getNumElements());
> Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
> @@ -1556,8 +1556,8 @@ static bool TryToOptimizeStoreOfMallocTo
> CI = cast<CallInst>(Malloc);
> }
>
> - GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI,
> true),
> - TD, TLI);
> + GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI,
> true),
> + DL, TLI);
> return true;
> }
>
> @@ -1569,7 +1569,7 @@ static bool TryToOptimizeStoreOfMallocTo
> static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value
> *StoredOnceVal,
> AtomicOrdering Ordering,
> Module::global_iterator &GVI,
> - DataLayout *TD, TargetLibraryInfo
> *TLI) {
> + DataLayout *DL, TargetLibraryInfo
> *TLI) {
> // Ignore no-op GEPs and bitcasts.
> StoredOnceVal = StoredOnceVal->stripPointerCasts();
>
> @@ -1584,13 +1584,13 @@ static bool OptimizeOnceStoredGlobal(Glo
> SOVC = ConstantExpr::getBitCast(SOVC,
> GV->getInitializer()->getType());
>
> // Optimize away any trapping uses of the loaded value.
> - if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
> + if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
> return true;
> } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
> Type *MallocType = getMallocAllocatedType(CI, TLI);
> if (MallocType &&
> TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
> Ordering, GVI,
> - TD, TLI))
> + DL, TLI))
> return true;
> }
> }
> @@ -1784,7 +1784,7 @@ bool GlobalOpt::ProcessInternalGlobal(Gl
> } else {
> // Delete any stores we can find to the global. We may not be able
> to
> // make it completely dead though.
> - Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD,
> TLI);
> + Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL,
> TLI);
> }
>
> // If the global is dead now, delete it.
> @@ -1800,7 +1800,7 @@ bool GlobalOpt::ProcessInternalGlobal(Gl
> GV->setConstant(true);
>
> // Clean up any obviously simplifiable users now.
> - CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
> + CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
>
> // If the global is dead now, just nuke it.
> if (GV->use_empty()) {
> @@ -1813,8 +1813,8 @@ bool GlobalOpt::ProcessInternalGlobal(Gl
> ++NumMarked;
> return true;
> } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
> - if (DataLayout *TD = getAnalysisIfAvailable<DataLayout>())
> - if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
> + if (DataLayout *DL = getAnalysisIfAvailable<DataLayout>())
> + if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *DL)) {
> GVI = FirstNewGV; // Don't skip the newly produced globals!
> return true;
> }
> @@ -1829,7 +1829,7 @@ bool GlobalOpt::ProcessInternalGlobal(Gl
> GV->setInitializer(SOVConstant);
>
> // Clean up any obviously simplifiable users now.
> - CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
> + CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
>
> if (GV->use_empty()) {
> DEBUG(dbgs() << " *** Substituting initializer allowed us to "
> @@ -1846,7 +1846,7 @@ bool GlobalOpt::ProcessInternalGlobal(Gl
> // Try to optimize globals based on the knowledge that only one value
> // (besides its initializer) is ever stored to the global.
> if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
> - TD, TLI))
> + DL, TLI))
> return true;
>
> // Otherwise, if the global was not a boolean, we can shrink it to be
> a
> @@ -1947,7 +1947,7 @@ bool GlobalOpt::OptimizeGlobalVars(Modul
> // Simplify the initializer.
> if (GV->hasInitializer())
> if (ConstantExpr *CE =
> dyn_cast<ConstantExpr>(GV->getInitializer())) {
> - Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);
> + Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
> if (New && New != CE)
> GV->setInitializer(New);
> }
> @@ -2070,7 +2070,7 @@ static GlobalVariable *InstallGlobalCtor
> static inline bool
> isSimpleEnoughValueToCommit(Constant *C,
> SmallPtrSet<Constant*, 8> &SimpleConstants,
> - const DataLayout *TD);
> + const DataLayout *DL);
>
>
> /// isSimpleEnoughValueToCommit - Return true if the specified constant
> can be
> @@ -2083,7 +2083,7 @@ isSimpleEnoughValueToCommit(Constant *C,
> /// time.
> static bool isSimpleEnoughValueToCommitHelper(Constant *C,
> SmallPtrSet<Constant*, 8>
> &SimpleConstants,
> - const DataLayout *TD) {
> + const DataLayout *DL) {
> // Simple integer, undef, constant aggregate zero, global addresses,
> etc are
> // all supported.
> if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
> @@ -2095,7 +2095,7 @@ static bool isSimpleEnoughValueToCommitH
> isa<ConstantVector>(C)) {
> for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
> Constant *Op = cast<Constant>(C->getOperand(i));
> - if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD))
> + if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL))
> return false;
> }
> return true;
> @@ -2108,29 +2108,29 @@ static bool isSimpleEnoughValueToCommitH
> switch (CE->getOpcode()) {
> case Instruction::BitCast:
> // Bitcast is fine if the casted value is fine.
> - return isSimpleEnoughValueToCommit(CE->getOperand(0),
> SimpleConstants, TD);
> + return isSimpleEnoughValueToCommit(CE->getOperand(0),
> SimpleConstants, DL);
>
> case Instruction::IntToPtr:
> case Instruction::PtrToInt:
> // int <=> ptr is fine if the int type is the same size as the
> // pointer type.
> - if (!TD || TD->getTypeSizeInBits(CE->getType()) !=
> - TD->getTypeSizeInBits(CE->getOperand(0)->getType()))
> + if (!DL || DL->getTypeSizeInBits(CE->getType()) !=
> + DL->getTypeSizeInBits(CE->getOperand(0)->getType()))
> return false;
> - return isSimpleEnoughValueToCommit(CE->getOperand(0),
> SimpleConstants, TD);
> + return isSimpleEnoughValueToCommit(CE->getOperand(0),
> SimpleConstants, DL);
>
> // GEP is fine if it is simple + constant offset.
> case Instruction::GetElementPtr:
> for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
> if (!isa<ConstantInt>(CE->getOperand(i)))
> return false;
> - return isSimpleEnoughValueToCommit(CE->getOperand(0),
> SimpleConstants, TD);
> + return isSimpleEnoughValueToCommit(CE->getOperand(0),
> SimpleConstants, DL);
>
> case Instruction::Add:
> // We allow simple+cst.
> if (!isa<ConstantInt>(CE->getOperand(1)))
> return false;
> - return isSimpleEnoughValueToCommit(CE->getOperand(0),
> SimpleConstants, TD);
> + return isSimpleEnoughValueToCommit(CE->getOperand(0),
> SimpleConstants, DL);
> }
> return false;
> }
> @@ -2138,11 +2138,11 @@ static bool isSimpleEnoughValueToCommitH
> static inline bool
> isSimpleEnoughValueToCommit(Constant *C,
> SmallPtrSet<Constant*, 8> &SimpleConstants,
> - const DataLayout *TD) {
> + const DataLayout *DL) {
> // If we already checked this constant, we win.
> if (!SimpleConstants.insert(C)) return true;
> // Check the constant.
> - return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD);
> + return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
> }
>
>
> @@ -2269,8 +2269,8 @@ namespace {
> /// Once an evaluation call fails, the evaluation object should not be
> reused.
> class Evaluator {
> public:
> - Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI)
> - : TD(TD), TLI(TLI) {
> + Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI)
> + : DL(DL), TLI(TLI) {
> ValueStack.push_back(new DenseMap<Value*, Constant*>);
> }
>
> @@ -2350,7 +2350,7 @@ private:
> /// simple enough to live in a static initializer of a global.
> SmallPtrSet<Constant*, 8> SimpleConstants;
>
> - const DataLayout *TD;
> + const DataLayout *DL;
> const TargetLibraryInfo *TLI;
> };
>
> @@ -2403,7 +2403,7 @@ bool Evaluator::EvaluateBlock(BasicBlock
> Constant *Ptr = getVal(SI->getOperand(1));
> if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
> DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
> - Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
> + Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
> DEBUG(dbgs() << "; To: " << *Ptr << "\n");
> }
> if (!isSimpleEnoughPointerToCommit(Ptr)) {
> @@ -2416,7 +2416,7 @@ bool Evaluator::EvaluateBlock(BasicBlock
>
> // If this might be too difficult for the backend to handle (e.g.
> the addr
> // of one global variable divided by another) then we can't commit
> it.
> - if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD)) {
> + if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
> DEBUG(dbgs() << "Store value is too complex to evaluate store. "
> << *Val
> << "\n");
> return false;
> @@ -2448,7 +2448,7 @@ bool Evaluator::EvaluateBlock(BasicBlock
>
> Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
> if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
> - Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
> + Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
>
> // If we can't improve the situation by introspecting NewTy,
> // we have to give up.
> @@ -2512,7 +2512,7 @@ bool Evaluator::EvaluateBlock(BasicBlock
>
> Constant *Ptr = getVal(LI->getOperand(0));
> if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
> - Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
> + Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
> DEBUG(dbgs() << "Found a constant pointer expression, constant "
> "folding: " << *Ptr << "\n");
> }
> @@ -2589,9 +2589,9 @@ bool Evaluator::EvaluateBlock(BasicBlock
> Value *Ptr = PtrArg->stripPointerCasts();
> if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
> Type *ElemTy =
> cast<PointerType>(GV->getType())->getElementType();
> - if (TD && !Size->isAllOnesValue() &&
> + if (DL && !Size->isAllOnesValue() &&
> Size->getValue().getLimitedValue() >=
> - TD->getTypeStoreSize(ElemTy)) {
> + DL->getTypeStoreSize(ElemTy)) {
> Invariants.insert(GV);
> DEBUG(dbgs() << "Found a global var that is an invariant: "
> << *GV
> << "\n");
> @@ -2697,7 +2697,7 @@ bool Evaluator::EvaluateBlock(BasicBlock
>
> if (!CurInst->use_empty()) {
> if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
> - InstResult = ConstantFoldConstantExpression(CE, TD, TLI);
> + InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
>
> setVal(CurInst, InstResult);
> }
> @@ -2780,10 +2780,10 @@ bool Evaluator::EvaluateFunction(Functio
>
> /// EvaluateStaticConstructor - Evaluate static constructors in the
> function, if
> /// we can. Return true if we can, false otherwise.
> -static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,
> +static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL,
> const TargetLibraryInfo *TLI) {
> // Call the function.
> - Evaluator Eval(TD, TLI);
> + Evaluator Eval(DL, TLI);
> Constant *RetValDummy;
> bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
> SmallVector<Constant*, 0>());
> @@ -2831,7 +2831,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(
> if (F->empty()) continue;
>
> // If we can evaluate the ctor at compile time, do.
> - if (EvaluateStaticConstructor(F, TD, TLI)) {
> + if (EvaluateStaticConstructor(F, DL, TLI)) {
> Ctors.erase(Ctors.begin()+i);
> MadeChange = true;
> --i;
> @@ -3159,7 +3159,7 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDt
> bool GlobalOpt::runOnModule(Module &M) {
> bool Changed = false;
>
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
>
> // Try to find the llvm.globalctors list.
>
> Modified: llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp (original)
> +++ llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp Thu Feb 20 18:06:31
> 2014
> @@ -108,12 +108,12 @@ public:
> static const ComparableFunction TombstoneKey;
> static DataLayout * const LookupOnly;
>
> - ComparableFunction(Function *Func, DataLayout *TD)
> - : Func(Func), Hash(profileFunction(Func)), TD(TD) {}
> + ComparableFunction(Function *Func, DataLayout *DL)
> + : Func(Func), Hash(profileFunction(Func)), DL(DL) {}
>
> Function *getFunc() const { return Func; }
> unsigned getHash() const { return Hash; }
> - DataLayout *getTD() const { return TD; }
> + DataLayout *getDataLayout() const { return DL; }
>
> // Drops AssertingVH reference to the function. Outside of debug mode,
> this
> // does nothing.
> @@ -125,11 +125,11 @@ public:
>
> private:
> explicit ComparableFunction(unsigned Hash)
> - : Func(NULL), Hash(Hash), TD(NULL) {}
> + : Func(NULL), Hash(Hash), DL(NULL) {}
>
> AssertingVH<Function> Func;
> unsigned Hash;
> - DataLayout *TD;
> + DataLayout *DL;
> };
>
> const ComparableFunction ComparableFunction::EmptyKey =
> ComparableFunction(0);
> @@ -164,9 +164,9 @@ namespace {
> /// side of claiming that two functions are different).
> class FunctionComparator {
> public:
> - FunctionComparator(const DataLayout *TD, const Function *F1,
> + FunctionComparator(const DataLayout *DL, const Function *F1,
> const Function *F2)
> - : F1(F1), F2(F2), TD(TD) {}
> + : F1(F1), F2(F2), DL(DL) {}
>
> /// Test whether the two functions have equivalent behaviour.
> bool compare();
> @@ -199,7 +199,7 @@ private:
> // The two functions undergoing comparison.
> const Function *F1, *F2;
>
> - const DataLayout *TD;
> + const DataLayout *DL;
>
> DenseMap<const Value *, const Value *> id_map;
> DenseSet<const Value *> seen_values;
> @@ -214,9 +214,9 @@ bool FunctionComparator::isEquivalentTyp
> PointerType *PTy1 = dyn_cast<PointerType>(Ty1);
> PointerType *PTy2 = dyn_cast<PointerType>(Ty2);
>
> - if (TD) {
> - if (PTy1 && PTy1->getAddressSpace() == 0) Ty1 =
> TD->getIntPtrType(Ty1);
> - if (PTy2 && PTy2->getAddressSpace() == 0) Ty2 =
> TD->getIntPtrType(Ty2);
> + if (DL) {
> + if (PTy1 && PTy1->getAddressSpace() == 0) Ty1 =
> DL->getIntPtrType(Ty1);
> + if (PTy2 && PTy2->getAddressSpace() == 0) Ty2 =
> DL->getIntPtrType(Ty2);
> }
>
> if (Ty1 == Ty2)
> @@ -359,13 +359,13 @@ bool FunctionComparator::isEquivalentGEP
> if (AS != GEP2->getPointerAddressSpace())
> return false;
>
> - if (TD) {
> + if (DL) {
> // When we have target data, we can reduce the GEP down to the value
> in bytes
> // added to the address.
> - unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 1;
> + unsigned BitWidth = DL ? DL->getPointerSizeInBits(AS) : 1;
> APInt Offset1(BitWidth, 0), Offset2(BitWidth, 0);
> - if (GEP1->accumulateConstantOffset(*TD, Offset1) &&
> - GEP2->accumulateConstantOffset(*TD, Offset2)) {
> + if (GEP1->accumulateConstantOffset(*DL, Offset1) &&
> + GEP2->accumulateConstantOffset(*DL, Offset2)) {
> return Offset1 == Offset2;
> }
> }
> @@ -606,7 +606,7 @@ private:
> FnSetType FnSet;
>
> /// DataLayout for more accurate GEP comparisons. May be NULL.
> - DataLayout *TD;
> + DataLayout *DL;
>
> /// Whether or not the target supports global aliases.
> bool HasGlobalAliases;
> @@ -623,7 +623,7 @@ ModulePass *llvm::createMergeFunctionsPa
>
> bool MergeFunctions::runOnModule(Module &M) {
> bool Changed = false;
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
>
> for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
> if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
> @@ -646,7 +646,7 @@ bool MergeFunctions::runOnModule(Module
> Function *F = cast<Function>(*I);
> if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
> !F->mayBeOverridden()) {
> - ComparableFunction CF = ComparableFunction(F, TD);
> + ComparableFunction CF = ComparableFunction(F, DL);
> Changed |= insert(CF);
> }
> }
> @@ -661,7 +661,7 @@ bool MergeFunctions::runOnModule(Module
> Function *F = cast<Function>(*I);
> if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
> F->mayBeOverridden()) {
> - ComparableFunction CF = ComparableFunction(F, TD);
> + ComparableFunction CF = ComparableFunction(F, DL);
> Changed |= insert(CF);
> }
> }
> @@ -682,14 +682,14 @@ bool DenseMapInfo<ComparableFunction>::i
> return false;
>
> // One of these is a special "underlying pointer comparison only"
> object.
> - if (LHS.getTD() == ComparableFunction::LookupOnly ||
> - RHS.getTD() == ComparableFunction::LookupOnly)
> + if (LHS.getDataLayout() == ComparableFunction::LookupOnly ||
> + RHS.getDataLayout() == ComparableFunction::LookupOnly)
> return false;
>
> - assert(LHS.getTD() == RHS.getTD() &&
> + assert(LHS.getDataLayout() == RHS.getDataLayout() &&
> "Comparing functions for different targets");
>
> - return FunctionComparator(LHS.getTD(), LHS.getFunc(),
> + return FunctionComparator(LHS.getDataLayout(), LHS.getFunc(),
> RHS.getFunc()).compare();
> }
>
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombine.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombine.h?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombine.h (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombine.h Thu Feb 20
> 18:06:31 2014
> @@ -81,7 +81,7 @@ public:
> class LLVM_LIBRARY_VISIBILITY InstCombiner
> : public FunctionPass,
> public InstVisitor<InstCombiner,
> Instruction*> {
> - DataLayout *TD;
> + DataLayout *DL;
> TargetLibraryInfo *TLI;
> bool MadeIRChange;
> LibCallSimplifier *Simplifier;
> @@ -96,7 +96,7 @@ public:
> BuilderTy *Builder;
>
> static char ID; // Pass identification, replacement for typeid
> - InstCombiner() : FunctionPass(ID), TD(0), Builder(0) {
> + InstCombiner() : FunctionPass(ID), DL(0), Builder(0) {
> MinimizeSize = false;
> initializeInstCombinerPass(*PassRegistry::getPassRegistry());
> }
> @@ -108,7 +108,7 @@ public:
>
> virtual void getAnalysisUsage(AnalysisUsage &AU) const;
>
> - DataLayout *getDataLayout() const { return TD; }
> + DataLayout *getDataLayout() const { return DL; }
>
> TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
>
> @@ -234,7 +234,7 @@ private:
> Type *Ty);
>
> Instruction *visitCallSite(CallSite CS);
> - Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *TD);
> + Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *DL);
> bool transformConstExprCastCall(CallSite CS);
> Instruction *transformCallThroughTrampoline(CallSite CS,
> IntrinsicInst *Tramp);
> @@ -311,15 +311,15 @@ public:
>
> void ComputeMaskedBits(Value *V, APInt &KnownZero,
> APInt &KnownOne, unsigned Depth = 0) const {
> - return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
> + return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, DL, Depth);
> }
>
> bool MaskedValueIsZero(Value *V, const APInt &Mask,
> unsigned Depth = 0) const {
> - return llvm::MaskedValueIsZero(V, Mask, TD, Depth);
> + return llvm::MaskedValueIsZero(V, Mask, DL, Depth);
> }
> unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {
> - return llvm::ComputeNumSignBits(Op, TD, Depth);
> + return llvm::ComputeNumSignBits(Op, DL, Depth);
> }
>
> private:
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp Thu Feb 20
> 18:06:31 2014
> @@ -919,7 +919,7 @@ Instruction *InstCombiner::visitAdd(Bina
> Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
>
> if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
> - I.hasNoUnsignedWrap(), TD))
> + I.hasNoUnsignedWrap(), DL))
> return ReplaceInstUsesWith(I, V);
>
> // (A*B)+(A*C) -> A*(B+C) etc
> @@ -1193,7 +1193,7 @@ Instruction *InstCombiner::visitFAdd(Bin
> bool Changed = SimplifyAssociativeOrCommutative(I);
> Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
>
> - if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), TD))
> + if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL))
> return ReplaceInstUsesWith(I, V);
>
> if (isa<Constant>(RHS)) {
> @@ -1300,7 +1300,7 @@ Instruction *InstCombiner::visitFAdd(Bin
> ///
> Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
> Type *Ty) {
> - assert(TD && "Must have target data info for this");
> + assert(DL && "Must have target data info for this");
>
> // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can
> optimize
> // this.
> @@ -1369,7 +1369,7 @@ Instruction *InstCombiner::visitSub(Bina
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
> - I.hasNoUnsignedWrap(), TD))
> + I.hasNoUnsignedWrap(), DL))
> return ReplaceInstUsesWith(I, V);
>
> // (A*B)-(A*C) -> A*(B-C) etc
> @@ -1518,7 +1518,7 @@ Instruction *InstCombiner::visitSub(Bina
>
> // Optimize pointer differences into the same array into a size.
> Consider:
> // &A[10] - &A[0]: we should compile this to "10".
> - if (TD) {
> + if (DL) {
> Value *LHSOp, *RHSOp;
> if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
> match(Op1, m_PtrToInt(m_Value(RHSOp))))
> @@ -1538,7 +1538,7 @@ Instruction *InstCombiner::visitSub(Bina
> Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), TD))
> + if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL))
> return ReplaceInstUsesWith(I, V);
>
> if (isa<Constant>(Op0))
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp Thu Feb
> 20 18:06:31 2014
> @@ -1104,7 +1104,7 @@ Instruction *InstCombiner::visitAnd(Bina
> bool Changed = SimplifyAssociativeOrCommutative(I);
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyAndInst(Op0, Op1, TD))
> + if (Value *V = SimplifyAndInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // (A|B)&(A|C) -> A|(B&C) etc
> @@ -1905,7 +1905,7 @@ Instruction *InstCombiner::visitOr(Binar
> bool Changed = SimplifyAssociativeOrCommutative(I);
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyOrInst(Op0, Op1, TD))
> + if (Value *V = SimplifyOrInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // (A&B)|(A&C) -> A&(B|C) etc
> @@ -2237,7 +2237,7 @@ Instruction *InstCombiner::visitXor(Bina
> bool Changed = SimplifyAssociativeOrCommutative(I);
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyXorInst(Op0, Op1, TD))
> + if (Value *V = SimplifyXorInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // (A&B)^(A&C) -> A&(B^C) etc
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp Thu Feb 20
> 18:06:31 2014
> @@ -56,8 +56,8 @@ static Type *reduceToSingleValueType(Typ
> }
>
> Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
> - unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
> - unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
> + unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL);
> + unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL);
> unsigned MinAlign = std::min(DstAlign, SrcAlign);
> unsigned CopyAlign = MI->getAlignment();
>
> @@ -103,7 +103,7 @@ Instruction *InstCombiner::SimplifyMemTr
> if (StrippedDest != MI->getArgOperand(0)) {
> Type *SrcETy = cast<PointerType>(StrippedDest->getType())
> ->getElementType();
> - if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
> + if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {
> // The SrcETy might be something like {{{double}}} or [1 x double].
> Rip
> // down through these levels if so.
> SrcETy = reduceToSingleValueType(SrcETy);
> @@ -152,7 +152,7 @@ Instruction *InstCombiner::SimplifyMemTr
> }
>
> Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
> - unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
> + unsigned Alignment = getKnownAlignment(MI->getDest(), DL);
> if (MI->getAlignment() < Alignment) {
> MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
> Alignment, false));
> @@ -274,7 +274,7 @@ Instruction *InstCombiner::visitCallInst
> default: break;
> case Intrinsic::objectsize: {
> uint64_t Size;
> - if (getObjectSize(II->getArgOperand(0), Size, TD, TLI))
> + if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
> return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(),
> Size));
> return 0;
> }
> @@ -504,7 +504,7 @@ Instruction *InstCombiner::visitCallInst
> case Intrinsic::ppc_altivec_lvx:
> case Intrinsic::ppc_altivec_lvxl:
> // Turn PPC lvx -> load if the pointer is known aligned.
> - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
> + if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
> Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
>
> PointerType::getUnqual(II->getType()));
> return new LoadInst(Ptr);
> @@ -513,7 +513,7 @@ Instruction *InstCombiner::visitCallInst
> case Intrinsic::ppc_altivec_stvx:
> case Intrinsic::ppc_altivec_stvxl:
> // Turn stvx -> store if the pointer is known aligned.
> - if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
> + if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL) >= 16) {
> Type *OpPtrTy =
> PointerType::getUnqual(II->getArgOperand(0)->getType());
> Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
> @@ -524,7 +524,7 @@ Instruction *InstCombiner::visitCallInst
> case Intrinsic::x86_sse2_storeu_pd:
> case Intrinsic::x86_sse2_storeu_dq:
> // Turn X86 storeu -> store if the pointer is known aligned.
> - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
> + if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
> Type *OpPtrTy =
> PointerType::getUnqual(II->getArgOperand(1)->getType());
> Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
> @@ -641,7 +641,7 @@ Instruction *InstCombiner::visitCallInst
> case Intrinsic::arm_neon_vst2lane:
> case Intrinsic::arm_neon_vst3lane:
> case Intrinsic::arm_neon_vst4lane: {
> - unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
> + unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL);
> unsigned AlignArg = II->getNumArgOperands() - 1;
> ConstantInt *IntrAlign =
> dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
> if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
> @@ -747,7 +747,7 @@ Instruction *InstCombiner::visitInvokeIn
> /// passed through the varargs area, we can eliminate the use of the cast.
> static bool isSafeToEliminateVarargsCast(const CallSite CS,
> const CastInst * const CI,
> - const DataLayout * const TD,
> + const DataLayout * const DL,
> const int ix) {
> if (!CI->isLosslessCast())
> return false;
> @@ -763,7 +763,7 @@ static bool isSafeToEliminateVarargsCast
> Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
> if (!SrcTy->isSized() || !DstTy->isSized())
> return false;
> - if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
> + if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))
> return false;
> return true;
> }
> @@ -772,7 +772,7 @@ static bool isSafeToEliminateVarargsCast
> // Currently we're only working with the checking functions, memcpy_chk,
> // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk,
> strncpy_chk,
> // strcat_chk and strncat_chk.
> -Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout
> *TD) {
> +Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout
> *DL) {
> if (CI->getCalledFunction() == 0) return 0;
>
> if (Value *With = Simplifier->optimizeCall(CI)) {
> @@ -934,7 +934,7 @@ Instruction *InstCombiner::visitCallSite
> for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
> E = CS.arg_end(); I != E; ++I, ++ix) {
> CastInst *CI = dyn_cast<CastInst>(*I);
> - if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
> + if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {
> *I = CI->getOperand(0);
> Changed = true;
> }
> @@ -951,7 +951,7 @@ Instruction *InstCombiner::visitCallSite
> // this. None of these calls are seen as possibly dead so go ahead and
> // delete the instruction now.
> if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
> - Instruction *I = tryOptimizeCall(CI, TD);
> + Instruction *I = tryOptimizeCall(CI, DL);
> // If we changed something return the result, etc. Otherwise let
> // the fallthrough check.
> if (I) return EraseInstFromFunction(*I);
> @@ -1043,12 +1043,12 @@ bool InstCombiner::transformConstExprCas
> CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
>
> Attribute::ByVal)) {
> PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
> - if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD
> == 0)
> + if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || DL
> == 0)
> return false;
>
> Type *CurElTy = ActTy->getPointerElementType();
> - if (TD->getTypeAllocSize(CurElTy) !=
> - TD->getTypeAllocSize(ParamPTy->getElementType()))
> + if (DL->getTypeAllocSize(CurElTy) !=
> + DL->getTypeAllocSize(ParamPTy->getElementType()))
> return false;
> }
> }
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp Thu Feb 20
> 18:06:31 2014
> @@ -79,7 +79,7 @@ static Value *DecomposeSimpleLinearExpr(
> Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
> AllocaInst &AI) {
> // This requires DataLayout to get the alloca alignment and size
> information.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> PointerType *PTy = cast<PointerType>(CI.getType());
>
> @@ -91,8 +91,8 @@ Instruction *InstCombiner::PromoteCastOf
> Type *CastElTy = PTy->getElementType();
> if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
>
> - unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
> - unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
> + unsigned AllocElTyAlign = DL->getABITypeAlignment(AllocElTy);
> + unsigned CastElTyAlign = DL->getABITypeAlignment(CastElTy);
> if (CastElTyAlign < AllocElTyAlign) return 0;
>
> // If the allocation has multiple uses, only promote it if we are
> strictly
> @@ -100,14 +100,14 @@ Instruction *InstCombiner::PromoteCastOf
> // same, we open the door to infinite loops of various kinds.
> if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0;
>
> - uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
> - uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
> + uint64_t AllocElTySize = DL->getTypeAllocSize(AllocElTy);
> + uint64_t CastElTySize = DL->getTypeAllocSize(CastElTy);
> if (CastElTySize == 0 || AllocElTySize == 0) return 0;
>
> // If the allocation has multiple uses, only promote it if we're not
> // shrinking the amount of memory being allocated.
> - uint64_t AllocElTyStoreSize = TD->getTypeStoreSize(AllocElTy);
> - uint64_t CastElTyStoreSize = TD->getTypeStoreSize(CastElTy);
> + uint64_t AllocElTyStoreSize = DL->getTypeStoreSize(AllocElTy);
> + uint64_t CastElTyStoreSize = DL->getTypeStoreSize(CastElTy);
> if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return 0;
>
> // See if we can satisfy the modulus by pulling a scale out of the array
> @@ -161,9 +161,9 @@ Value *InstCombiner::EvaluateInDifferent
> bool isSigned) {
> if (Constant *C = dyn_cast<Constant>(V)) {
> C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
> - // If we got a constantexpr back, try to simplify it with TD info.
> + // If we got a constantexpr back, try to simplify it with DL info.
> if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
> - C = ConstantFoldConstantExpression(CE, TD, TLI);
> + C = ConstantFoldConstantExpression(CE, DL, TLI);
> return C;
> }
>
> @@ -235,7 +235,7 @@ isEliminableCastPair(
> const CastInst *CI, ///< The first cast instruction
> unsigned opcode, ///< The opcode of the second cast instruction
> Type *DstTy, ///< The target type for the second cast instruction
> - DataLayout *TD ///< The target data for pointer size
> + DataLayout *DL ///< The target data for pointer size
> ) {
>
> Type *SrcTy = CI->getOperand(0)->getType(); // A from above
> @@ -244,12 +244,12 @@ isEliminableCastPair(
> // Get the opcodes of the two Cast instructions
> Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
> Instruction::CastOps secondOp = Instruction::CastOps(opcode);
> - Type *SrcIntPtrTy = TD && SrcTy->isPtrOrPtrVectorTy() ?
> - TD->getIntPtrType(SrcTy) : 0;
> - Type *MidIntPtrTy = TD && MidTy->isPtrOrPtrVectorTy() ?
> - TD->getIntPtrType(MidTy) : 0;
> - Type *DstIntPtrTy = TD && DstTy->isPtrOrPtrVectorTy() ?
> - TD->getIntPtrType(DstTy) : 0;
> + Type *SrcIntPtrTy = DL && SrcTy->isPtrOrPtrVectorTy() ?
> + DL->getIntPtrType(SrcTy) : 0;
> + Type *MidIntPtrTy = DL && MidTy->isPtrOrPtrVectorTy() ?
> + DL->getIntPtrType(MidTy) : 0;
> + Type *DstIntPtrTy = DL && DstTy->isPtrOrPtrVectorTy() ?
> + DL->getIntPtrType(DstTy) : 0;
> unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy,
> MidTy,
> DstTy, SrcIntPtrTy,
> MidIntPtrTy,
> DstIntPtrTy);
> @@ -275,7 +275,7 @@ bool InstCombiner::ShouldOptimizeCast(In
> // If this is another cast that can be eliminated, we prefer to have it
> // eliminated.
> if (const CastInst *CI = dyn_cast<CastInst>(V))
> - if (isEliminableCastPair(CI, opc, Ty, TD))
> + if (isEliminableCastPair(CI, opc, Ty, DL))
> return false;
>
> // If this is a vector sext from a compare, then we don't want to break
> the
> @@ -295,7 +295,7 @@ Instruction *InstCombiner::commonCastTra
> // eliminate it now.
> if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
> if (Instruction::CastOps opc =
> - isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
> + isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), DL)) {
> // The first cast (CSrc) is eliminable so we need to fix up or
> replace
> // the second cast (CI). CSrc will then have a good chance of being
> dead.
> return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
> @@ -1405,11 +1405,11 @@ Instruction *InstCombiner::visitIntToPtr
> // trunc or zext to the intptr_t type, then inttoptr of it. This
> allows the
> // cast to be exposed to other transforms.
>
> - if (TD) {
> + if (DL) {
> unsigned AS = CI.getAddressSpace();
> if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
> - TD->getPointerSizeInBits(AS)) {
> - Type *Ty = TD->getIntPtrType(CI.getContext(), AS);
> + DL->getPointerSizeInBits(AS)) {
> + Type *Ty = DL->getIntPtrType(CI.getContext(), AS);
> if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
> Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
>
> @@ -1440,7 +1440,7 @@ Instruction *InstCombiner::commonPointer
> return &CI;
> }
>
> - if (!TD)
> + if (!DL)
> return commonCastTransforms(CI);
>
> // If the GEP has a single use, and the base pointer is a bitcast,
> and the
> @@ -1448,12 +1448,12 @@ Instruction *InstCombiner::commonPointer
> // instructions into fewer. This typically happens with unions and
> other
> // non-type-safe code.
> unsigned AS = GEP->getPointerAddressSpace();
> - unsigned OffsetBits = TD->getPointerSizeInBits(AS);
> + unsigned OffsetBits = DL->getPointerSizeInBits(AS);
> APInt Offset(OffsetBits, 0);
> BitCastInst *BCI = dyn_cast<BitCastInst>(GEP->getOperand(0));
> if (GEP->hasOneUse() &&
> BCI &&
> - GEP->accumulateConstantOffset(*TD, Offset)) {
> + GEP->accumulateConstantOffset(*DL, Offset)) {
> // Get the base pointer input of the bitcast, and the type it
> points to.
> Value *OrigBase = BCI->getOperand(0);
> SmallVector<Value*, 8> NewIndices;
> @@ -1484,16 +1484,16 @@ Instruction *InstCombiner::visitPtrToInt
> // do a ptrtoint to intptr_t then do a trunc or zext. This allows the
> cast
> // to be exposed to other transforms.
>
> - if (!TD)
> + if (!DL)
> return commonPointerCastTransforms(CI);
>
> Type *Ty = CI.getType();
> unsigned AS = CI.getPointerAddressSpace();
>
> - if (Ty->getScalarSizeInBits() == TD->getPointerSizeInBits(AS))
> + if (Ty->getScalarSizeInBits() == DL->getPointerSizeInBits(AS))
> return commonPointerCastTransforms(CI);
>
> - Type *PtrTy = TD->getIntPtrType(CI.getContext(), AS);
> + Type *PtrTy = DL->getIntPtrType(CI.getContext(), AS);
> if (Ty->isVectorTy()) // Handle vectors of pointers.
> PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
>
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp Thu Feb
> 20 18:06:31 2014
> @@ -218,7 +218,7 @@ Instruction *InstCombiner::
> FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
> CmpInst &ICI, ConstantInt *AndCst) {
> // We need TD information to know the pointer size unless this is
> inbounds.
> - if (!GEP->isInBounds() && TD == 0)
> + if (!GEP->isInBounds() && DL == 0)
> return 0;
>
> Constant *Init = GV->getInitializer();
> @@ -307,7 +307,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementP
>
> // Find out if the comparison would be true or false for the i'th
> element.
> Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
> - CompareRHS, TD, TLI);
> + CompareRHS, DL, TLI);
> // If the result is undef for this element, ignore it.
> if (isa<UndefValue>(C)) {
> // Extend range state machines to cover this element in case there
> is an
> @@ -386,7 +386,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementP
> // index down like the GEP would do implicitly. We don't have to do
> this for
> // an inbounds GEP because the index can't be out of range.
> if (!GEP->isInBounds()) {
> - Type *IntPtrTy = TD->getIntPtrType(GEP->getType());
> + Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
> unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
> if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
> Idx = Builder->CreateTrunc(Idx, IntPtrTy);
> @@ -475,8 +475,8 @@ FoldCmpLoadFromIndexedGlobal(GetElementP
> // - Default to i32
> if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
> Ty = Idx->getType();
> - else if (TD)
> - Ty = TD->getSmallestLegalIntType(Init->getContext(),
> ArrayElementCount);
> + else if (DL)
> + Ty = DL->getSmallestLegalIntType(Init->getContext(),
> ArrayElementCount);
> else if (ArrayElementCount <= 32)
> Ty = Type::getInt32Ty(Init->getContext());
>
> @@ -503,7 +503,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementP
> /// If we can't emit an optimized form for this expression, this returns
> null.
> ///
> static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
> - DataLayout &TD = *IC.getDataLayout();
> + DataLayout &DL = *IC.getDataLayout();
> gep_type_iterator GTI = gep_type_begin(GEP);
>
> // Check to see if this gep only has a single variable index. If so,
> and if
> @@ -520,9 +520,9 @@ static Value *EvaluateGEPOffsetExpressio
>
> // Handle a struct index, which adds its field offset to the
> pointer.
> if (StructType *STy = dyn_cast<StructType>(*GTI)) {
> - Offset +=
> TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
> + Offset +=
> DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
> } else {
> - uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
> + uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
> Offset += Size*CI->getSExtValue();
> }
> } else {
> @@ -538,7 +538,7 @@ static Value *EvaluateGEPOffsetExpressio
> Value *VariableIdx = GEP->getOperand(i);
> // Determine the scale factor of the variable element. For example,
> this is
> // 4 if the variable index is into an array of i32.
> - uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
> + uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
>
> // Verify that there are no other variable indices. If so, emit the
> hard way.
> for (++i, ++GTI; i != e; ++i, ++GTI) {
> @@ -550,9 +550,9 @@ static Value *EvaluateGEPOffsetExpressio
>
> // Handle a struct index, which adds its field offset to the pointer.
> if (StructType *STy = dyn_cast<StructType>(*GTI)) {
> - Offset +=
> TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
> + Offset +=
> DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
> } else {
> - uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
> + uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
> Offset += Size*CI->getSExtValue();
> }
> }
> @@ -562,7 +562,7 @@ static Value *EvaluateGEPOffsetExpressio
> // Okay, we know we have a single variable index, which must be a
> // pointer/array/vector index. If there is no offset, life is simple,
> return
> // the index.
> - Type *IntPtrTy = TD.getIntPtrType(GEP->getOperand(0)->getType());
> + Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
> unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
> if (Offset == 0) {
> // Cast to intptrty in case a truncation occurs. If an extension is
> needed,
> @@ -615,7 +615,7 @@ Instruction *InstCombiner::FoldGEPICmp(G
> RHS = BCI->getOperand(0);
>
> Value *PtrBase = GEPLHS->getOperand(0);
> - if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
> + if (DL && PtrBase == RHS && GEPLHS->isInBounds()) {
> // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
> // This transformation (ignoring the base and scales) is valid
> because we
> // know pointers can't overflow since the gep is inbounds. See if we
> can
> @@ -648,7 +648,7 @@ Instruction *InstCombiner::FoldGEPICmp(G
> // If we're comparing GEPs with two base pointers that only differ
> in type
> // and both GEPs have only constant indices or just one use, then
> fold
> // the compare with the adjusted indices.
> - if (TD && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
> + if (DL && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
> (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
> (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
> PtrBase->stripPointerCasts() ==
> @@ -719,7 +719,7 @@ Instruction *InstCombiner::FoldGEPICmp(G
>
> // Only lower this if the icmp is the only user of the GEP or if we
> expect
> // the result to fold to a constant!
> - if (TD &&
> + if (DL &&
> GEPsInBounds &&
> (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
> (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
> @@ -1792,8 +1792,8 @@ Instruction *InstCombiner::visitICmpInst
>
> // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if
> the
> // integer type is the same size as the pointer type.
> - if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
> - TD->getPointerTypeSizeInBits(SrcTy) ==
> DestTy->getIntegerBitWidth()) {
> + if (DL && LHSCI->getOpcode() == Instruction::PtrToInt &&
> + DL->getPointerTypeSizeInBits(SrcTy) ==
> DestTy->getIntegerBitWidth()) {
> Value *RHSOp = 0;
> if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
> RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
> @@ -2104,7 +2104,7 @@ Instruction *InstCombiner::visitICmpInst
> Changed = true;
> }
>
> - if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
> + if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // comparing -val or val with non-zero is the same as just comparing val
> @@ -2172,8 +2172,8 @@ Instruction *InstCombiner::visitICmpInst
> unsigned BitWidth = 0;
> if (Ty->isIntOrIntVectorTy())
> BitWidth = Ty->getScalarSizeInBits();
> - else if (TD) // Pointers require TD info to get their size.
> - BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
> + else if (DL) // Pointers require DL info to get their size.
> + BitWidth = DL->getTypeSizeInBits(Ty->getScalarType());
>
> bool isSignBit = false;
>
> @@ -2532,8 +2532,8 @@ Instruction *InstCombiner::visitICmpInst
> }
> case Instruction::IntToPtr:
> // icmp pred inttoptr(X), null -> icmp pred X, 0
> - if (RHSC->isNullValue() && TD &&
> - TD->getIntPtrType(RHSC->getType()) ==
> + if (RHSC->isNullValue() && DL &&
> + DL->getIntPtrType(RHSC->getType()) ==
> LHSI->getOperand(0)->getType())
> return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
>
> Constant::getNullValue(LHSI->getOperand(0)->getType()));
> @@ -3229,7 +3229,7 @@ Instruction *InstCombiner::visitFCmpInst
>
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))
> + if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // Simplify 'fcmp pred X, X'
>
> Modified:
> llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
> Thu Feb 20 18:06:31 2014
> @@ -157,8 +157,8 @@ isOnlyCopiedFromConstantGlobal(AllocaIns
> Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
> // Ensure that the alloca array size argument has type intptr_t, so that
> // any casting is exposed early.
> - if (TD) {
> - Type *IntPtrTy = TD->getIntPtrType(AI.getType());
> + if (DL) {
> + Type *IntPtrTy = DL->getIntPtrType(AI.getType());
> if (AI.getArraySize()->getType() != IntPtrTy) {
> Value *V = Builder->CreateIntCast(AI.getArraySize(),
> IntPtrTy, false);
> @@ -184,8 +184,8 @@ Instruction *InstCombiner::visitAllocaIn
> // Now that I is pointing to the first non-allocation-inst in the
> block,
> // insert our getelementptr instruction...
> //
> - Type *IdxTy = TD
> - ? TD->getIntPtrType(AI.getType())
> + Type *IdxTy = DL
> + ? DL->getIntPtrType(AI.getType())
> : Type::getInt64Ty(AI.getContext());
> Value *NullIdx = Constant::getNullValue(IdxTy);
> Value *Idx[2] = { NullIdx, NullIdx };
> @@ -201,15 +201,15 @@ Instruction *InstCombiner::visitAllocaIn
> }
> }
>
> - if (TD && AI.getAllocatedType()->isSized()) {
> + if (DL && AI.getAllocatedType()->isSized()) {
> // If the alignment is 0 (unspecified), assign it the preferred
> alignment.
> if (AI.getAlignment() == 0)
> - AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
> + AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
>
> // Move all alloca's of zero byte objects to the entry block and
> merge them
> // together. Note that we only do this for alloca's, because malloc
> should
> // allocate and return a unique pointer, even for a zero byte
> allocation.
> - if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
> + if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
> // For a zero sized alloca there is no point in doing an array
> allocation.
> // This is helpful if the array size is a complicated expression
> not used
> // elsewhere.
> @@ -227,7 +227,7 @@ Instruction *InstCombiner::visitAllocaIn
> // dominance as the array size was forced to a constant earlier
> already.
> AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
> if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
> - TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
> + DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
> AI.moveBefore(FirstInst);
> return &AI;
> }
> @@ -236,7 +236,7 @@ Instruction *InstCombiner::visitAllocaIn
> // assign it the preferred alignment.
> if (EntryAI->getAlignment() == 0)
> EntryAI->setAlignment(
> - TD->getPrefTypeAlignment(EntryAI->getAllocatedType()));
> + DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
> // Replace this zero-sized alloca with the one at the start of
> the entry
> // block after ensuring that the address will be aligned enough
> for both
> // types.
> @@ -260,7 +260,7 @@ Instruction *InstCombiner::visitAllocaIn
> SmallVector<Instruction *, 4> ToDelete;
> if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI,
> ToDelete)) {
> unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),
> -
> AI.getAlignment(), TD);
> +
> AI.getAlignment(), DL);
> if (AI.getAlignment() <= SourceAlign) {
> DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
> DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
> @@ -285,7 +285,7 @@ Instruction *InstCombiner::visitAllocaIn
>
> /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when
> possible.
> static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
> - const DataLayout *TD) {
> + const DataLayout *DL) {
> User *CI = cast<User>(LI.getOperand(0));
> Value *CastOp = CI->getOperand(0);
>
> @@ -307,8 +307,8 @@ static Instruction *InstCombineLoadCast(
> if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
> if (Constant *CSrc = dyn_cast<Constant>(CastOp))
> if (ASrcTy->getNumElements() != 0) {
> - Type *IdxTy = TD
> - ? TD->getIntPtrType(SrcTy)
> + Type *IdxTy = DL
> + ? DL->getIntPtrType(SrcTy)
> : Type::getInt64Ty(SrcTy->getContext());
> Value *Idx = Constant::getNullValue(IdxTy);
> Value *Idxs[2] = { Idx, Idx };
> @@ -346,12 +346,12 @@ Instruction *InstCombiner::visitLoadInst
> Value *Op = LI.getOperand(0);
>
> // Attempt to improve the alignment.
> - if (TD) {
> + if (DL) {
> unsigned KnownAlign =
> - getOrEnforceKnownAlignment(Op,
> TD->getPrefTypeAlignment(LI.getType()),TD);
> + getOrEnforceKnownAlignment(Op,
> DL->getPrefTypeAlignment(LI.getType()),DL);
> unsigned LoadAlign = LI.getAlignment();
> unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
> - TD->getABITypeAlignment(LI.getType());
> + DL->getABITypeAlignment(LI.getType());
>
> if (KnownAlign > EffectiveLoadAlign)
> LI.setAlignment(KnownAlign);
> @@ -361,7 +361,7 @@ Instruction *InstCombiner::visitLoadInst
>
> // load (cast X) --> cast (load X) iff safe.
> if (isa<CastInst>(Op))
> - if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
> + if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))
> return Res;
>
> // None of the following transforms are legal for volatile/atomic loads.
> @@ -405,7 +405,7 @@ Instruction *InstCombiner::visitLoadInst
> // Instcombine load (constantexpr_cast global) -> cast (load global)
> if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
> if (CE->isCast())
> - if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
> + if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))
> return Res;
>
> if (Op->hasOneUse()) {
> @@ -422,8 +422,8 @@ Instruction *InstCombiner::visitLoadInst
> if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
> // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load
> &V2).
> unsigned Align = LI.getAlignment();
> - if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
> - isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
> + if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
> + isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
> LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
>
> SI->getOperand(1)->getName()+".val");
> LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
> @@ -572,13 +572,13 @@ Instruction *InstCombiner::visitStoreIns
> Value *Ptr = SI.getOperand(1);
>
> // Attempt to improve the alignment.
> - if (TD) {
> + if (DL) {
> unsigned KnownAlign =
> - getOrEnforceKnownAlignment(Ptr,
> TD->getPrefTypeAlignment(Val->getType()),
> - TD);
> + getOrEnforceKnownAlignment(Ptr,
> DL->getPrefTypeAlignment(Val->getType()),
> + DL);
> unsigned StoreAlign = SI.getAlignment();
> unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
> - TD->getABITypeAlignment(Val->getType());
> + DL->getABITypeAlignment(Val->getType());
>
> if (KnownAlign > EffectiveStoreAlign)
> SI.setAlignment(KnownAlign);
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp Thu Feb
> 20 18:06:31 2014
> @@ -118,7 +118,7 @@ Instruction *InstCombiner::visitMul(Bina
> bool Changed = SimplifyAssociativeOrCommutative(I);
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyMulInst(Op0, Op1, TD))
> + if (Value *V = SimplifyMulInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> if (Value *V = SimplifyUsingDistributiveLaws(I))
> @@ -429,7 +429,7 @@ Instruction *InstCombiner::visitFMul(Bin
> if (isa<Constant>(Op0))
> std::swap(Op0, Op1);
>
> - if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), TD))
> + if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL))
> return ReplaceInstUsesWith(I, V);
>
> bool AllowReassociate = I.hasUnsafeAlgebra();
> @@ -875,7 +875,7 @@ static size_t visitUDivOperand(Value *Op
> Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyUDivInst(Op0, Op1, TD))
> + if (Value *V = SimplifyUDivInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // Handle the integer div common cases
> @@ -934,7 +934,7 @@ Instruction *InstCombiner::visitUDiv(Bin
> Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifySDivInst(Op0, Op1, TD))
> + if (Value *V = SimplifySDivInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // Handle the integer div common cases
> @@ -1020,7 +1020,7 @@ static Instruction *CvtFDivConstToRecipr
> Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyFDivInst(Op0, Op1, TD))
> + if (Value *V = SimplifyFDivInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> if (isa<Constant>(Op0))
> @@ -1182,7 +1182,7 @@ Instruction *InstCombiner::commonIRemTra
> Instruction *InstCombiner::visitURem(BinaryOperator &I) {
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyURemInst(Op0, Op1, TD))
> + if (Value *V = SimplifyURemInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> if (Instruction *common = commonIRemTransforms(I))
> @@ -1214,7 +1214,7 @@ Instruction *InstCombiner::visitURem(Bin
> Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifySRemInst(Op0, Op1, TD))
> + if (Value *V = SimplifySRemInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // Handle the integer rem common cases
> @@ -1285,7 +1285,7 @@ Instruction *InstCombiner::visitSRem(Bin
> Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
> Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
>
> - if (Value *V = SimplifyFRemInst(Op0, Op1, TD))
> + if (Value *V = SimplifyFRemInst(Op0, Op1, DL))
> return ReplaceInstUsesWith(I, V);
>
> // Handle cases involving: rem X, (select Cond, Y, Z)
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp Thu Feb 20
> 18:06:31 2014
> @@ -790,7 +790,7 @@ Instruction *InstCombiner::SliceUpIllega
> // PHINode simplification
> //
> Instruction *InstCombiner::visitPHINode(PHINode &PN) {
> - if (Value *V = SimplifyInstruction(&PN, TD, TLI))
> + if (Value *V = SimplifyInstruction(&PN, DL, TLI))
> return ReplaceInstUsesWith(PN, V);
>
> // If all PHI operands are the same operation, pull them through the
> PHI,
> @@ -893,8 +893,8 @@ Instruction *InstCombiner::visitPHINode(
> // it is only used by trunc or trunc(lshr) operations. If so, we split
> the
> // PHI into the various pieces being extracted. This sort of thing is
> // introduced when SROA promotes an aggregate to a single large integer
> type.
> - if (PN.getType()->isIntegerTy() && TD &&
> - !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
> + if (PN.getType()->isIntegerTy() && DL &&
> + !DL->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
> if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
> return Res;
>
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp Thu Feb 20
> 18:06:31 2014
> @@ -554,18 +554,18 @@ Instruction *InstCombiner::visitSelectIn
> // arms of the select. See if substituting this value into the arm and
> // simplifying the result yields the same value as the other arm.
> if (Pred == ICmpInst::ICMP_EQ) {
> - if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) ==
> TrueVal ||
> - SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) ==
> TrueVal)
> + if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI) ==
> TrueVal ||
> + SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI) ==
> TrueVal)
> return ReplaceInstUsesWith(SI, FalseVal);
> - if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) ==
> FalseVal ||
> - SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) ==
> FalseVal)
> + if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI) ==
> FalseVal ||
> + SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI) ==
> FalseVal)
> return ReplaceInstUsesWith(SI, FalseVal);
> } else if (Pred == ICmpInst::ICMP_NE) {
> - if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) ==
> FalseVal ||
> - SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) ==
> FalseVal)
> + if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI) ==
> FalseVal ||
> + SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI) ==
> FalseVal)
> return ReplaceInstUsesWith(SI, TrueVal);
> - if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) ==
> TrueVal ||
> - SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) ==
> TrueVal)
> + if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI) ==
> TrueVal ||
> + SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI) ==
> TrueVal)
> return ReplaceInstUsesWith(SI, TrueVal);
> }
>
> @@ -734,7 +734,7 @@ Instruction *InstCombiner::visitSelectIn
> Value *TrueVal = SI.getTrueValue();
> Value *FalseVal = SI.getFalseValue();
>
> - if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal, TD))
> + if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal, DL))
> return ReplaceInstUsesWith(SI, V);
>
> if (SI.getType()->isIntegerTy(1)) {
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp Thu Feb 20
> 18:06:31 2014
> @@ -677,7 +677,7 @@ Instruction *InstCombiner::FoldShiftByCo
> Instruction *InstCombiner::visitShl(BinaryOperator &I) {
> if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),
> I.hasNoSignedWrap(),
> I.hasNoUnsignedWrap(),
> - TD))
> + DL))
> return ReplaceInstUsesWith(I, V);
>
> if (Instruction *V = commonShiftTransforms(I))
> @@ -714,7 +714,7 @@ Instruction *InstCombiner::visitShl(Bina
>
> Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
> if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1),
> - I.isExact(), TD))
> + I.isExact(), DL))
> return ReplaceInstUsesWith(I, V);
>
> if (Instruction *R = commonShiftTransforms(I))
> @@ -754,7 +754,7 @@ Instruction *InstCombiner::visitLShr(Bin
>
> Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
> if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1),
> - I.isExact(), TD))
> + I.isExact(), DL))
> return ReplaceInstUsesWith(I, V);
>
> if (Instruction *R = commonShiftTransforms(I))
>
> Modified:
> llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
> Thu Feb 20 18:06:31 2014
> @@ -105,9 +105,9 @@ Value *InstCombiner::SimplifyDemandedUse
> assert(Depth <= 6 && "Limit Search Depth");
> uint32_t BitWidth = DemandedMask.getBitWidth();
> Type *VTy = V->getType();
> - assert((TD || !VTy->isPointerTy()) &&
> + assert((DL || !VTy->isPointerTy()) &&
> "SimplifyDemandedBits needs to know bit widths!");
> - assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth)
> &&
> + assert((!DL || DL->getTypeSizeInBits(VTy->getScalarType()) == BitWidth)
> &&
> (!VTy->isIntOrIntVectorTy() ||
> VTy->getScalarSizeInBits() == BitWidth) &&
> KnownZero.getBitWidth() == BitWidth &&
>
> Modified: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp Thu Feb
> 20 18:06:31 2014
> @@ -103,13 +103,13 @@ Value *InstCombiner::EmitGEPOffset(User
> bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
> assert(From->isIntegerTy() && To->isIntegerTy());
>
> - // If we don't have TD, we don't know if the source/dest are legal.
> - if (!TD) return false;
> + // If we don't have DL, we don't know if the source/dest are legal.
> + if (!DL) return false;
>
> unsigned FromWidth = From->getPrimitiveSizeInBits();
> unsigned ToWidth = To->getPrimitiveSizeInBits();
> - bool FromLegal = TD->isLegalInteger(FromWidth);
> - bool ToLegal = TD->isLegalInteger(ToWidth);
> + bool FromLegal = DL->isLegalInteger(FromWidth);
> + bool ToLegal = DL->isLegalInteger(ToWidth);
>
> // If this is a legal integer from type, and the result would be an
> illegal
> // type, don't do the transformation.
> @@ -221,7 +221,7 @@ bool InstCombiner::SimplifyAssociativeOr
> Value *C = I.getOperand(1);
>
> // Does "B op C" simplify?
> - if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
> + if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) {
> // It simplifies to V. Form "A op V".
> I.setOperand(0, A);
> I.setOperand(1, V);
> @@ -250,7 +250,7 @@ bool InstCombiner::SimplifyAssociativeOr
> Value *C = Op1->getOperand(1);
>
> // Does "A op B" simplify?
> - if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
> + if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) {
> // It simplifies to V. Form "V op C".
> I.setOperand(0, V);
> I.setOperand(1, C);
> @@ -272,7 +272,7 @@ bool InstCombiner::SimplifyAssociativeOr
> Value *C = I.getOperand(1);
>
> // Does "C op A" simplify?
> - if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
> + if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
> // It simplifies to V. Form "V op B".
> I.setOperand(0, V);
> I.setOperand(1, B);
> @@ -292,7 +292,7 @@ bool InstCombiner::SimplifyAssociativeOr
> Value *C = Op1->getOperand(1);
>
> // Does "C op A" simplify?
> - if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
> + if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
> // It simplifies to V. Form "B op V".
> I.setOperand(0, B);
> I.setOperand(1, V);
> @@ -425,7 +425,7 @@ Value *InstCombiner::SimplifyUsingDistri
> std::swap(C, D);
> // Consider forming "A op' (B op D)".
> // If "B op D" simplifies then it can be formed with no cost.
> - Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
> + Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
> // If "B op D" doesn't simplify then only go on if both of the
> existing
> // operations "A op' B" and "C op' D" will be zapped as no longer
> used.
> if (!V && Op0->hasOneUse() && Op1->hasOneUse())
> @@ -447,7 +447,7 @@ Value *InstCombiner::SimplifyUsingDistri
> std::swap(C, D);
> // Consider forming "(A op C) op' B".
> // If "A op C" simplifies then it can be formed with no cost.
> - Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
> + Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
> // If "A op C" doesn't simplify then only go on if both of the
> existing
> // operations "A op' B" and "C op' D" will be zapped as no longer
> used.
> if (!V && Op0->hasOneUse() && Op1->hasOneUse())
> @@ -469,8 +469,8 @@ Value *InstCombiner::SimplifyUsingDistri
> Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
>
> // Do "A op C" and "B op C" both simplify?
> - if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
> - if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
> + if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL))
> + if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {
> // They do! Return "L op' R".
> ++NumExpand;
> // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
> @@ -478,7 +478,7 @@ Value *InstCombiner::SimplifyUsingDistri
> (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
> return Op0;
> // Otherwise return "L op' R" if it simplifies.
> - if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
> + if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
> return V;
> // Otherwise, create a new instruction.
> C = Builder->CreateBinOp(InnerOpcode, L, R);
> @@ -494,8 +494,8 @@ Value *InstCombiner::SimplifyUsingDistri
> Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
>
> // Do "A op B" and "A op C" both simplify?
> - if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
> - if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
> + if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL))
> + if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {
> // They do! Return "L op' R".
> ++NumExpand;
> // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
> @@ -503,7 +503,7 @@ Value *InstCombiner::SimplifyUsingDistri
> (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
> return Op1;
> // Otherwise return "L op' R" if it simplifies.
> - if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
> + if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
> return V;
> // Otherwise, create a new instruction.
> A = Builder->CreateBinOp(InnerOpcode, L, R);
> @@ -777,7 +777,7 @@ Type *InstCombiner::FindElementAtOffset(
> SmallVectorImpl<Value*>
> &NewIndices) {
> assert(PtrTy->isPtrOrPtrVectorTy());
>
> - if (!TD)
> + if (!DL)
> return 0;
>
> Type *Ty = PtrTy->getPointerElementType();
> @@ -787,9 +787,9 @@ Type *InstCombiner::FindElementAtOffset(
> // Start with the index over the outer type. Note that the type size
> // might be zero (even if the offset isn't zero) if the indexed type
> // is something like [0 x {int, int}]
> - Type *IntPtrTy = TD->getIntPtrType(PtrTy);
> + Type *IntPtrTy = DL->getIntPtrType(PtrTy);
> int64_t FirstIdx = 0;
> - if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
> + if (int64_t TySize = DL->getTypeAllocSize(Ty)) {
> FirstIdx = Offset/TySize;
> Offset -= FirstIdx*TySize;
>
> @@ -807,11 +807,11 @@ Type *InstCombiner::FindElementAtOffset(
> // Index into the types. If we fail, set OrigBase to null.
> while (Offset) {
> // Indexing into tail padding between struct/array elements.
> - if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
> + if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))
> return 0;
>
> if (StructType *STy = dyn_cast<StructType>(Ty)) {
> - const StructLayout *SL = TD->getStructLayout(STy);
> + const StructLayout *SL = DL->getStructLayout(STy);
> assert(Offset < (int64_t)SL->getSizeInBytes() &&
> "Offset must stay within the indexed type");
>
> @@ -822,7 +822,7 @@ Type *InstCombiner::FindElementAtOffset(
> Offset -= SL->getElementOffset(Elt);
> Ty = STy->getElementType(Elt);
> } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
> - uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
> + uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType());
> assert(EltSize && "Cannot index into a zero-sized array");
> NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
> Offset %= EltSize;
> @@ -1087,16 +1087,16 @@ Value *InstCombiner::Descale(Value *Val,
> Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP)
> {
> SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
>
> - if (Value *V = SimplifyGEPInst(Ops, TD))
> + if (Value *V = SimplifyGEPInst(Ops, DL))
> return ReplaceInstUsesWith(GEP, V);
>
> Value *PtrOp = GEP.getOperand(0);
>
> // Eliminate unneeded casts for indices, and replace indices which
> displace
> // by multiples of a zero size type with zero.
> - if (TD) {
> + if (DL) {
> bool MadeChange = false;
> - Type *IntPtrTy = TD->getIntPtrType(GEP.getPointerOperandType());
> + Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType());
>
> gep_type_iterator GTI = gep_type_begin(GEP);
> for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
> @@ -1108,7 +1108,7 @@ Instruction *InstCombiner::visitGetEleme
> // If the element type has zero size then any index over it is
> equivalent
> // to an index of zero, so replace it with zero if it is not zero
> already.
> if (SeqTy->getElementType()->isSized() &&
> - TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
> + DL->getTypeAllocSize(SeqTy->getElementType()) == 0)
> if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
> *I = Constant::getNullValue(IntPtrTy);
> MadeChange = true;
> @@ -1199,12 +1199,12 @@ Instruction *InstCombiner::visitGetEleme
> // Canonicalize (gep i8* X, -(ptrtoint Y)) to (sub (ptrtoint X),
> (ptrtoint Y))
> // The GEP pattern is emitted by the SCEV expander for certain kinds of
> // pointer arithmetic.
> - if (TD && GEP.getNumIndices() == 1 &&
> + if (DL && GEP.getNumIndices() == 1 &&
> match(GEP.getOperand(1), m_Neg(m_PtrToInt(m_Value())))) {
> unsigned AS = GEP.getPointerAddressSpace();
> if (GEP.getType() == Builder->getInt8PtrTy(AS) &&
> GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
> - TD->getPointerSizeInBits(AS)) {
> + DL->getPointerSizeInBits(AS)) {
> Operator *Index = cast<Operator>(GEP.getOperand(1));
> Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
> Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
> @@ -1266,10 +1266,10 @@ Instruction *InstCombiner::visitGetEleme
> // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V;
> bitcast
> Type *SrcElTy = StrippedPtrTy->getElementType();
> Type *ResElTy = PtrOp->getType()->getPointerElementType();
> - if (TD && SrcElTy->isArrayTy() &&
> - TD->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
> - TD->getTypeAllocSize(ResElTy)) {
> - Type *IdxType = TD->getIntPtrType(GEP.getType());
> + if (DL && SrcElTy->isArrayTy() &&
> + DL->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
> + DL->getTypeAllocSize(ResElTy)) {
> + Type *IdxType = DL->getIntPtrType(GEP.getType());
> Value *Idx[2] = { Constant::getNullValue(IdxType),
> GEP.getOperand(1) };
> Value *NewGEP = GEP.isInBounds() ?
> Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
> @@ -1285,11 +1285,11 @@ Instruction *InstCombiner::visitGetEleme
> // %V = mul i64 %N, 4
> // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
> // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
> - if (TD && ResElTy->isSized() && SrcElTy->isSized()) {
> + if (DL && ResElTy->isSized() && SrcElTy->isSized()) {
> // Check that changing the type amounts to dividing the index by
> a scale
> // factor.
> - uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
> - uint64_t SrcSize = TD->getTypeAllocSize(SrcElTy);
> + uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
> + uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy);
> if (ResSize && SrcSize % ResSize == 0) {
> Value *Idx = GEP.getOperand(1);
> unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
> @@ -1297,7 +1297,7 @@ Instruction *InstCombiner::visitGetEleme
>
> // Earlier transforms ensure that the index has type
> IntPtrType, which
> // considerably simplifies the logic by eliminating implicit
> casts.
> - assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
> + assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
> "Index not cast to pointer width?");
>
> bool NSW;
> @@ -1321,13 +1321,13 @@ Instruction *InstCombiner::visitGetEleme
> // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
> // (where tmp = 8*tmp2) into:
> // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
> - if (TD && ResElTy->isSized() && SrcElTy->isSized() &&
> + if (DL && ResElTy->isSized() && SrcElTy->isSized() &&
> SrcElTy->isArrayTy()) {
> // Check that changing to the array element type amounts to
> dividing the
> // index by a scale factor.
> - uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
> + uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
> uint64_t ArrayEltSize
> - = TD->getTypeAllocSize(SrcElTy->getArrayElementType());
> + = DL->getTypeAllocSize(SrcElTy->getArrayElementType());
> if (ResSize && ArrayEltSize % ResSize == 0) {
> Value *Idx = GEP.getOperand(1);
> unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
> @@ -1335,7 +1335,7 @@ Instruction *InstCombiner::visitGetEleme
>
> // Earlier transforms ensure that the index has type
> IntPtrType, which
> // considerably simplifies the logic by eliminating implicit
> casts.
> - assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
> + assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
> "Index not cast to pointer width?");
>
> bool NSW;
> @@ -1344,7 +1344,7 @@ Instruction *InstCombiner::visitGetEleme
> // If the multiplication NewIdx * Scale may overflow then the
> new
> // GEP may not be "inbounds".
> Value *Off[2] = {
> - Constant::getNullValue(TD->getIntPtrType(GEP.getType())),
> + Constant::getNullValue(DL->getIntPtrType(GEP.getType())),
> NewIdx
> };
>
> @@ -1361,7 +1361,7 @@ Instruction *InstCombiner::visitGetEleme
> }
> }
>
> - if (!TD)
> + if (!DL)
> return 0;
>
> /// See if we can simplify:
> @@ -1372,10 +1372,10 @@ Instruction *InstCombiner::visitGetEleme
> if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
> Value *Operand = BCI->getOperand(0);
> PointerType *OpType = cast<PointerType>(Operand->getType());
> - unsigned OffsetBits = TD->getPointerTypeSizeInBits(OpType);
> + unsigned OffsetBits = DL->getPointerTypeSizeInBits(OpType);
> APInt Offset(OffsetBits, 0);
> if (!isa<BitCastInst>(Operand) &&
> - GEP.accumulateConstantOffset(*TD, Offset) &&
> + GEP.accumulateConstantOffset(*DL, Offset) &&
> StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace())
> {
>
> // If this GEP instruction doesn't move the pointer, just replace
> the GEP
> @@ -2231,7 +2231,7 @@ static bool TryToSinkInstruction(Instruc
> static bool AddReachableCodeToWorklist(BasicBlock *BB,
> SmallPtrSet<BasicBlock*, 64>
> &Visited,
> InstCombiner &IC,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const TargetLibraryInfo *TLI) {
> bool MadeIRChange = false;
> SmallVector<BasicBlock*, 256> Worklist;
> @@ -2259,7 +2259,7 @@ static bool AddReachableCodeToWorklist(B
>
> // ConstantProp instruction if trivially constant.
> if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
> - if (Constant *C = ConstantFoldInstruction(Inst, TD, TLI)) {
> + if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
> DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
> << *Inst << '\n');
> Inst->replaceAllUsesWith(C);
> @@ -2268,7 +2268,7 @@ static bool AddReachableCodeToWorklist(B
> continue;
> }
>
> - if (TD) {
> + if (DL) {
> // See if we can constant fold its operands.
> for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
> i != e; ++i) {
> @@ -2277,7 +2277,7 @@ static bool AddReachableCodeToWorklist(B
>
> Constant*& FoldRes = FoldedConstants[CE];
> if (!FoldRes)
> - FoldRes = ConstantFoldConstantExpression(CE, TD, TLI);
> + FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
> if (!FoldRes)
> FoldRes = CE;
>
> @@ -2344,7 +2344,7 @@ bool InstCombiner::DoOneIteration(Functi
> // the reachable instructions. Ignore blocks that are not reachable.
> Keep
> // track of which blocks we visit.
> SmallPtrSet<BasicBlock*, 64> Visited;
> - MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this,
> TD,
> + MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this,
> DL,
> TLI);
>
> // Do a quick scan over the function. If we find any blocks that are
> @@ -2390,7 +2390,7 @@ bool InstCombiner::DoOneIteration(Functi
>
> // Instruction isn't dead, see if we can constant propagate it.
> if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
> - if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {
> + if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
> DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I <<
> '\n');
>
> // Add operands to the worklist.
> @@ -2499,10 +2499,10 @@ namespace {
> class InstCombinerLibCallSimplifier : public LibCallSimplifier {
> InstCombiner *IC;
> public:
> - InstCombinerLibCallSimplifier(const DataLayout *TD,
> + InstCombinerLibCallSimplifier(const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> InstCombiner *IC)
> - : LibCallSimplifier(TD, TLI, UnsafeFPShrink) {
> + : LibCallSimplifier(DL, TLI, UnsafeFPShrink) {
> this->IC = IC;
> }
>
> @@ -2518,7 +2518,7 @@ bool InstCombiner::runOnFunction(Functio
> if (skipOptnoneFunction(F))
> return false;
>
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
> // Minimizing size?
> MinimizeSize =
> F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
> @@ -2527,11 +2527,11 @@ bool InstCombiner::runOnFunction(Functio
> /// Builder - This is an IRBuilder that automatically inserts new
> /// instructions into the worklist when they are created.
> IRBuilder<true, TargetFolder, InstCombineIRInserter>
> - TheBuilder(F.getContext(), TargetFolder(TD),
> + TheBuilder(F.getContext(), TargetFolder(DL),
> InstCombineIRInserter(Worklist));
> Builder = &TheBuilder;
>
> - InstCombinerLibCallSimplifier TheSimplifier(TD, TLI, this);
> + InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this);
> Simplifier = &TheSimplifier;
>
> bool EverMadeChange = false;
>
> Modified: llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp Thu Feb
> 20 18:06:31 2014
> @@ -336,7 +336,7 @@ struct AddressSanitizer : public Functio
> SmallString<64> BlacklistFile;
>
> LLVMContext *C;
> - DataLayout *TD;
> + DataLayout *DL;
> int LongSize;
> Type *IntptrTy;
> ShadowMapping Mapping;
> @@ -385,7 +385,7 @@ class AddressSanitizerModule : public Mo
> SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
> Type *IntptrTy;
> LLVMContext *C;
> - DataLayout *TD;
> + DataLayout *DL;
> ShadowMapping Mapping;
> Function *AsanPoisonGlobals;
> Function *AsanUnpoisonGlobals;
> @@ -516,7 +516,7 @@ struct FunctionStackPoisoner : public In
>
> uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
> Type *Ty = AI->getAllocatedType();
> - uint64_t SizeInBytes = ASan.TD->getTypeAllocSize(Ty);
> + uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty);
> return SizeInBytes;
> }
> /// Finds alloca where the value comes from.
> @@ -691,7 +691,7 @@ void AddressSanitizer::instrumentMop(Ins
> Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
>
> assert(OrigTy->isSized());
> - uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
> + uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
>
> assert((TypeSize % 8) == 0);
>
> @@ -912,13 +912,13 @@ void AddressSanitizerModule::initializeC
> // redzones and inserts this function into llvm.global_ctors.
> bool AddressSanitizerModule::runOnModule(Module &M) {
> if (!ClGlobals) return false;
> - TD = getAnalysisIfAvailable<DataLayout>();
> - if (!TD)
> + DL = getAnalysisIfAvailable<DataLayout>();
> + if (!DL)
> return false;
> BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
> if (BL->isIn(M)) return false;
> C = &(M.getContext());
> - int LongSize = TD->getPointerSizeInBits();
> + int LongSize = DL->getPointerSizeInBits();
> IntptrTy = Type::getIntNTy(*C, LongSize);
> Mapping = getShadowMapping(M, LongSize);
> initializeCallbacks(M);
> @@ -964,7 +964,7 @@ bool AddressSanitizerModule::runOnModule
> GlobalVariable *G = GlobalsToChange[i];
> PointerType *PtrTy = cast<PointerType>(G->getType());
> Type *Ty = PtrTy->getElementType();
> - uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);
> + uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
> uint64_t MinRZ = MinRedzoneSizeForGlobal();
> // MinRZ <= RZ <= kMaxGlobalRedzone
> // and trying to make RZ to be ~ 1/4 of SizeInBytes.
> @@ -1105,15 +1105,15 @@ void AddressSanitizer::emitShadowMapping
> // virtual
> bool AddressSanitizer::doInitialization(Module &M) {
> // Initialize the private fields. No one has accessed them before.
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
>
> - if (!TD)
> + if (!DL)
> return false;
> BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
> DynamicallyInitializedGlobals.Init(M);
>
> C = &(M.getContext());
> - LongSize = TD->getPointerSizeInBits();
> + LongSize = DL->getPointerSizeInBits();
> IntptrTy = Type::getIntNTy(*C, LongSize);
>
> AsanCtorFunction = Function::Create(
> @@ -1378,7 +1378,7 @@ FunctionStackPoisoner::poisonRedZones(co
> for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes)
> {
> uint64_t Val = 0;
> for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
> - if (ASan.TD->isLittleEndian())
> + if (ASan.DL->isLittleEndian())
> Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
> else
> Val = (Val << 8) | ShadowBytes[i + j];
>
> Modified: llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp (original)
> +++ llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp Thu Feb
> 20 18:06:31 2014
> @@ -53,7 +53,7 @@ namespace {
> }
>
> private:
> - const DataLayout *TD;
> + const DataLayout *DL;
> const TargetLibraryInfo *TLI;
> ObjectSizeOffsetEvaluator *ObjSizeEval;
> BuilderTy *Builder;
> @@ -127,7 +127,7 @@ void BoundsChecking::emitBranchToTrap(Va
> /// size of memory block that is touched.
> /// Returns true if any change was made to the IR, false otherwise.
> bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
> - uint64_t NeededSize = TD->getTypeStoreSize(InstVal->getType());
> + uint64_t NeededSize = DL->getTypeStoreSize(InstVal->getType());
> DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
> << " bytes\n");
>
> @@ -142,7 +142,7 @@ bool BoundsChecking::instrument(Value *P
> Value *Offset = SizeOffset.second;
> ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
>
> - Type *IntTy = TD->getIntPtrType(Ptr->getType());
> + Type *IntTy = DL->getIntPtrType(Ptr->getType());
> Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
>
> // three checks are required to ensure safety:
> @@ -166,13 +166,13 @@ bool BoundsChecking::instrument(Value *P
> }
>
> bool BoundsChecking::runOnFunction(Function &F) {
> - TD = &getAnalysis<DataLayout>();
> + DL = &getAnalysis<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
>
> TrapBB = 0;
> - BuilderTy TheBuilder(F.getContext(), TargetFolder(TD));
> + BuilderTy TheBuilder(F.getContext(), TargetFolder(DL));
> Builder = &TheBuilder;
> - ObjectSizeOffsetEvaluator TheObjSizeEval(TD, TLI, F.getContext(),
> + ObjectSizeOffsetEvaluator TheObjSizeEval(DL, TLI, F.getContext(),
> /*RoundToAlign=*/true);
> ObjSizeEval = &TheObjSizeEval;
>
>
> Modified: llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp Thu Feb
> 20 18:06:31 2014
> @@ -207,7 +207,7 @@ class MemorySanitizer : public FunctionP
> StringRef BlacklistFile = StringRef())
> : FunctionPass(ID),
> TrackOrigins(TrackOrigins || ClTrackOrigins),
> - TD(0),
> + DL(0),
> WarningFn(0),
> BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile :
> BlacklistFile),
> WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
> @@ -222,7 +222,7 @@ class MemorySanitizer : public FunctionP
> /// \brief Track origins (allocation points) of uninitialized values.
> bool TrackOrigins;
>
> - DataLayout *TD;
> + DataLayout *DL;
> LLVMContext *C;
> Type *IntptrTy;
> Type *OriginTy;
> @@ -399,12 +399,12 @@ void MemorySanitizer::initializeCallback
> ///
> /// inserts a call to __msan_init to the module's constructor list.
> bool MemorySanitizer::doInitialization(Module &M) {
> - TD = getAnalysisIfAvailable<DataLayout>();
> - if (!TD)
> + DL = getAnalysisIfAvailable<DataLayout>();
> + if (!DL)
> return false;
> BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
> C = &(M.getContext());
> - unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
> + unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);
> switch (PtrSize) {
> case 64:
> ShadowMask = kShadowMask64;
> @@ -420,7 +420,7 @@ bool MemorySanitizer::doInitialization(M
> }
>
> IRBuilder<> IRB(*C);
> - IntptrTy = IRB.getIntPtrTy(TD);
> + IntptrTy = IRB.getIntPtrTy(DL);
> OriginTy = IRB.getInt32Ty();
>
> ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
> @@ -650,7 +650,7 @@ struct MemorySanitizerVisitor : public I
> /// \brief Add MemorySanitizer instrumentation to a function.
> bool runOnFunction() {
> MS.initializeCallbacks(*F.getParent());
> - if (!MS.TD) return false;
> + if (!MS.DL) return false;
>
> // In the presence of unreachable blocks, we may see Phi nodes with
> // incoming nodes from such blocks. Since InstVisitor skips
> unreachable
> @@ -710,7 +710,7 @@ struct MemorySanitizerVisitor : public I
> if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
> return IT;
> if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
> - uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
> + uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
> return VectorType::get(IntegerType::get(*MS.C, EltSize),
> VT->getNumElements());
> }
> @@ -722,7 +722,7 @@ struct MemorySanitizerVisitor : public I
> DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
> return Res;
> }
> - uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
> + uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
> return IntegerType::get(*MS.C, TypeSize);
> }
>
> @@ -889,8 +889,8 @@ struct MemorySanitizerVisitor : public I
> continue;
> }
> unsigned Size = AI->hasByValAttr()
> - ?
> MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
> - : MS.TD->getTypeAllocSize(AI->getType());
> + ?
> MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType())
> + : MS.DL->getTypeAllocSize(AI->getType());
> if (A == AI) {
> Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
> if (AI->hasByValAttr()) {
> @@ -900,7 +900,7 @@ struct MemorySanitizerVisitor : public I
> unsigned ArgAlign = AI->getParamAlignment();
> if (ArgAlign == 0) {
> Type *EltType = A->getType()->getPointerElementType();
> - ArgAlign = MS.TD->getABITypeAlignment(EltType);
> + ArgAlign = MS.DL->getABITypeAlignment(EltType);
> }
> unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
> Value *Cpy = EntryIRB.CreateMemCpy(
> @@ -1935,13 +1935,13 @@ struct MemorySanitizerVisitor : public I
> if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
> assert(A->getType()->isPointerTy() &&
> "ByVal argument is not a pointer!");
> - Size =
> MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
> + Size =
> MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
> unsigned Alignment = CS.getParamAlignment(i + 1);
> Store = IRB.CreateMemCpy(ArgShadowBase,
> getShadowPtr(A, Type::getInt8Ty(*MS.C),
> IRB),
> Size, Alignment);
> } else {
> - Size = MS.TD->getTypeAllocSize(A->getType());
> + Size = MS.DL->getTypeAllocSize(A->getType());
> Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
> kShadowTLSAlignment);
> }
> @@ -2024,7 +2024,7 @@ struct MemorySanitizerVisitor : public I
> void visitAllocaInst(AllocaInst &I) {
> setShadow(&I, getCleanShadow(&I));
> IRBuilder<> IRB(I.getNextNode());
> - uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
> + uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
> if (PoisonStack && ClPoisonStackWithCall) {
> IRB.CreateCall2(MS.MsanPoisonStackFn,
> IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
> @@ -2223,7 +2223,7 @@ struct VarArgAMD64Helper : public VarArg
> FpOffset += 16;
> break;
> case AK_Memory:
> - uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
> + uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
> Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
> OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
> }
>
> Modified: llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
> (original)
> +++ llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp Thu Feb
> 20 18:06:31 2014
> @@ -78,7 +78,7 @@ namespace {
> struct ThreadSanitizer : public FunctionPass {
> ThreadSanitizer(StringRef BlacklistFile = StringRef())
> : FunctionPass(ID),
> - TD(0),
> + DL(0),
> BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
> : BlacklistFile) { }
> const char *getPassName() const;
> @@ -96,7 +96,7 @@ struct ThreadSanitizer : public Function
> bool addrPointsToConstantData(Value *Addr);
> int getMemoryAccessFuncIndex(Value *Addr);
>
> - DataLayout *TD;
> + DataLayout *DL;
> Type *IntptrTy;
> SmallString<64> BlacklistFile;
> OwningPtr<SpecialCaseList> BL;
> @@ -224,14 +224,14 @@ void ThreadSanitizer::initializeCallback
> }
>
> bool ThreadSanitizer::doInitialization(Module &M) {
> - TD = getAnalysisIfAvailable<DataLayout>();
> - if (!TD)
> + DL = getAnalysisIfAvailable<DataLayout>();
> + if (!DL)
> return false;
> BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
>
> // Always insert a call to __tsan_init into the module's CTORs.
> IRBuilder<> IRB(M.getContext());
> - IntptrTy = IRB.getIntPtrTy(TD);
> + IntptrTy = IRB.getIntPtrTy(DL);
> Value *TsanInit = M.getOrInsertFunction("__tsan_init",
> IRB.getVoidTy(), NULL);
> appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
> @@ -320,7 +320,7 @@ static bool isAtomic(Instruction *I) {
> }
>
> bool ThreadSanitizer::runOnFunction(Function &F) {
> - if (!TD) return false;
> + if (!DL) return false;
> if (BL->isIn(F)) return false;
> initializeCallbacks(*F.getParent());
> SmallVector<Instruction*, 8> RetVec;
> @@ -573,7 +573,7 @@ int ThreadSanitizer::getMemoryAccessFunc
> Type *OrigPtrTy = Addr->getType();
> Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
> assert(OrigTy->isSized());
> - uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
> + uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
> if (TypeSize != 8 && TypeSize != 16 &&
> TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
> NumAccessesWithBadSize++;
>
> Modified: llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp Thu Feb 20 18:06:31 2014
> @@ -262,7 +262,7 @@ namespace {
> /// cases.
> class EarlyCSE : public FunctionPass {
> public:
> - const DataLayout *TD;
> + const DataLayout *DL;
> const TargetLibraryInfo *TLI;
> DominatorTree *DT;
> typedef RecyclingAllocator<BumpPtrAllocator,
> @@ -432,7 +432,7 @@ bool EarlyCSE::processNode(DomTreeNode *
>
> // If the instruction can be simplified (e.g. X+0 = X) then replace
> it with
> // its simpler value.
> - if (Value *V = SimplifyInstruction(Inst, TD, TLI, DT)) {
> + if (Value *V = SimplifyInstruction(Inst, DL, TLI, DT)) {
> DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V <<
> '\n');
> Inst->replaceAllUsesWith(V);
> Inst->eraseFromParent();
> @@ -557,7 +557,7 @@ bool EarlyCSE::runOnFunction(Function &F
>
> std::vector<StackNode *> nodesToProcess;
>
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
> DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
>
>
> Modified: llvm/trunk/lib/Transforms/Scalar/GVN.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/GVN.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/GVN.cpp Thu Feb 20 18:06:31 2014
> @@ -586,7 +586,7 @@ namespace {
> bool NoLoads;
> MemoryDependenceAnalysis *MD;
> DominatorTree *DT;
> - const DataLayout *TD;
> + const DataLayout *DL;
> const TargetLibraryInfo *TLI;
> SetVector<BasicBlock *> DeadBlocks;
>
> @@ -624,7 +624,7 @@ namespace {
> InstrsToErase.push_back(I);
> }
>
> - const DataLayout *getDataLayout() const { return TD; }
> + const DataLayout *getDataLayout() const { return DL; }
> DominatorTree &getDominatorTree() const { return *DT; }
> AliasAnalysis *getAliasAnalysis() const { return
> VN.getAliasAnalysis(); }
> MemoryDependenceAnalysis &getMemDep() const { return *MD; }
> @@ -828,7 +828,7 @@ SpeculationFailure:
> /// CoerceAvailableValueToLoadType will succeed.
> static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
> Type *LoadTy,
> - const DataLayout &TD) {
> + const DataLayout &DL) {
> // If the loaded or stored value is an first class array or struct,
> don't try
> // to transform them. We need to be able to bitcast to integer.
> if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
> @@ -837,8 +837,8 @@ static bool CanCoerceMustAliasedValueToL
> return false;
>
> // The store has to be at least as big as the load.
> - if (TD.getTypeSizeInBits(StoredVal->getType()) <
> - TD.getTypeSizeInBits(LoadTy))
> + if (DL.getTypeSizeInBits(StoredVal->getType()) <
> + DL.getTypeSizeInBits(LoadTy))
> return false;
>
> return true;
> @@ -853,15 +853,15 @@ static bool CanCoerceMustAliasedValueToL
> static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
> Type *LoadedTy,
> Instruction *InsertPt,
> - const DataLayout &TD) {
> - if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
> + const DataLayout &DL) {
> + if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL))
> return 0;
>
> // If this is already the right type, just return it.
> Type *StoredValTy = StoredVal->getType();
>
> - uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
> - uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
> + uint64_t StoreSize = DL.getTypeSizeInBits(StoredValTy);
> + uint64_t LoadSize = DL.getTypeSizeInBits(LoadedTy);
>
> // If the store and reload are the same size, we can always reuse it.
> if (StoreSize == LoadSize) {
> @@ -872,13 +872,13 @@ static Value *CoerceAvailableValueToLoad
>
> // Convert source pointers to integers, which can be bitcast.
> if (StoredValTy->getScalarType()->isPointerTy()) {
> - StoredValTy = TD.getIntPtrType(StoredValTy);
> + StoredValTy = DL.getIntPtrType(StoredValTy);
> StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
> }
>
> Type *TypeToCastTo = LoadedTy;
> if (TypeToCastTo->getScalarType()->isPointerTy())
> - TypeToCastTo = TD.getIntPtrType(TypeToCastTo);
> + TypeToCastTo = DL.getIntPtrType(TypeToCastTo);
>
> if (StoredValTy != TypeToCastTo)
> StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
> @@ -897,7 +897,7 @@ static Value *CoerceAvailableValueToLoad
>
> // Convert source pointers to integers, which can be manipulated.
> if (StoredValTy->getScalarType()->isPointerTy()) {
> - StoredValTy = TD.getIntPtrType(StoredValTy);
> + StoredValTy = DL.getIntPtrType(StoredValTy);
> StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
> }
>
> @@ -909,7 +909,7 @@ static Value *CoerceAvailableValueToLoad
>
> // If this is a big-endian system, we need to shift the value down to
> the low
> // bits so that a truncate will work.
> - if (TD.isBigEndian()) {
> + if (DL.isBigEndian()) {
> Constant *Val = ConstantInt::get(StoredVal->getType(),
> StoreSize-LoadSize);
> StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp",
> InsertPt);
> }
> @@ -940,15 +940,15 @@ static Value *CoerceAvailableValueToLoad
> static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
> Value *WritePtr,
> uint64_t WriteSizeInBits,
> - const DataLayout &TD) {
> + const DataLayout &DL) {
> // If the loaded or stored value is a first class array or struct,
> don't try
> // to transform them. We need to be able to bitcast to integer.
> if (LoadTy->isStructTy() || LoadTy->isArrayTy())
> return -1;
>
> int64_t StoreOffset = 0, LoadOffset = 0;
> - Value *StoreBase =
> GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&TD);
> - Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset,
> &TD);
> + Value *StoreBase =
> GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&DL);
> + Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset,
> &DL);
> if (StoreBase != LoadBase)
> return -1;
>
> @@ -970,7 +970,7 @@ static int AnalyzeLoadFromClobberingWrit
> // If the load and store don't overlap at all, the store doesn't provide
> // anything to the load. In this case, they really don't alias at all,
> AA
> // must have gotten confused.
> - uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
> + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy);
>
> if ((WriteSizeInBits & 7) | (LoadSize & 7))
> return -1;
> @@ -1013,51 +1013,51 @@ static int AnalyzeLoadFromClobberingWrit
> /// memdep query of a load that ends up being a clobbering store.
> static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
> StoreInst *DepSI,
> - const DataLayout &TD) {
> + const DataLayout &DL) {
> // Cannot handle reading from store of first-class aggregate yet.
> if (DepSI->getValueOperand()->getType()->isStructTy() ||
> DepSI->getValueOperand()->getType()->isArrayTy())
> return -1;
>
> Value *StorePtr = DepSI->getPointerOperand();
> - uint64_t StoreSize
> =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType());
> + uint64_t StoreSize
> =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType());
> return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
> - StorePtr, StoreSize, TD);
> + StorePtr, StoreSize, DL);
> }
>
> /// AnalyzeLoadFromClobberingLoad - This function is called when we have a
> /// memdep query of a load that ends up being clobbered by another load.
> See if
> /// the other load can feed into the second load.
> static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
> - LoadInst *DepLI, const
> DataLayout &TD){
> + LoadInst *DepLI, const
> DataLayout &DL){
> // Cannot handle reading from store of first-class aggregate yet.
> if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
> return -1;
>
> Value *DepPtr = DepLI->getPointerOperand();
> - uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType());
> - int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr,
> DepSize, TD);
> + uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType());
> + int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr,
> DepSize, DL);
> if (R != -1) return R;
>
> // If we have a load/load clobber an DepLI can be widened to cover this
> load,
> // then we should widen it!
> int64_t LoadOffs = 0;
> const Value *LoadBase =
> - GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &TD);
> - unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
> + GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &DL);
> + unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
>
> unsigned Size = MemoryDependenceAnalysis::
> - getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI,
> TD);
> + getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI,
> DL);
> if (Size == 0) return -1;
>
> - return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8,
> TD);
> + return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8,
> DL);
> }
>
>
>
> static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
> MemIntrinsic *MI,
> - const DataLayout &TD) {
> + const DataLayout &DL) {
> // If the mem operation is a non-constant size, we can't handle it.
> ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
> if (SizeCst == 0) return -1;
> @@ -1067,7 +1067,7 @@ static int AnalyzeLoadFromClobberingMemI
> // of the memset..
> if (MI->getIntrinsicID() == Intrinsic::memset)
> return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
> - MemSizeInBits, TD);
> + MemSizeInBits, DL);
>
> // If we have a memcpy/memmove, the only case we can handle is if this
> is a
> // copy from constant memory. In that case, we can read directly from
> the
> @@ -1077,12 +1077,12 @@ static int AnalyzeLoadFromClobberingMemI
> Constant *Src = dyn_cast<Constant>(MTI->getSource());
> if (Src == 0) return -1;
>
> - GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src,
> &TD));
> + GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src,
> &DL));
> if (GV == 0 || !GV->isConstant()) return -1;
>
> // See if the access is within the bounds of the transfer.
> int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
> - MI->getDest(),
> MemSizeInBits, TD);
> + MI->getDest(),
> MemSizeInBits, DL);
> if (Offset == -1)
> return Offset;
>
> @@ -1095,7 +1095,7 @@ static int AnalyzeLoadFromClobberingMemI
> ConstantInt::get(Type::getInt64Ty(Src->getContext()),
> (unsigned)Offset);
> Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
> Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
> - if (ConstantFoldLoadFromConstPtr(Src, &TD))
> + if (ConstantFoldLoadFromConstPtr(Src, &DL))
> return Offset;
> return -1;
> }
> @@ -1108,11 +1108,11 @@ static int AnalyzeLoadFromClobberingMemI
> /// before we give up.
> static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
> Type *LoadTy,
> - Instruction *InsertPt, const
> DataLayout &TD){
> + Instruction *InsertPt, const
> DataLayout &DL){
> LLVMContext &Ctx = SrcVal->getType()->getContext();
>
> - uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
> - uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
> + uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
> + uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8;
>
> IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
>
> @@ -1120,13 +1120,13 @@ static Value *GetStoreValueForLoad(Value
> // to an integer type to start with.
> if (SrcVal->getType()->getScalarType()->isPointerTy())
> SrcVal = Builder.CreatePtrToInt(SrcVal,
> - TD.getIntPtrType(SrcVal->getType()));
> + DL.getIntPtrType(SrcVal->getType()));
> if (!SrcVal->getType()->isIntegerTy())
> SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx,
> StoreSize*8));
>
> // Shift the bits to the least significant depending on endianness.
> unsigned ShiftAmt;
> - if (TD.isLittleEndian())
> + if (DL.isLittleEndian())
> ShiftAmt = Offset*8;
> else
> ShiftAmt = (StoreSize-LoadSize-Offset)*8;
> @@ -1137,7 +1137,7 @@ static Value *GetStoreValueForLoad(Value
> if (LoadSize != StoreSize)
> SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx,
> LoadSize*8));
>
> - return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
> + return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL);
> }
>
> /// GetLoadValueForLoad - This function is called when we have a
> @@ -1148,11 +1148,11 @@ static Value *GetStoreValueForLoad(Value
> static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
> Type *LoadTy, Instruction *InsertPt,
> GVN &gvn) {
> - const DataLayout &TD = *gvn.getDataLayout();
> + const DataLayout &DL = *gvn.getDataLayout();
> // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting
> to
> // widen SrcVal out to a larger load.
> - unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());
> - unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
> + unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType());
> + unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
> if (Offset+LoadSize > SrcValSize) {
> assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!");
> assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer
> load");
> @@ -1184,7 +1184,7 @@ static Value *GetLoadValueForLoad(LoadIn
> // Replace uses of the original load with the wider load. On a big
> endian
> // system, we need to shift down to get the relevant bits.
> Value *RV = NewLoad;
> - if (TD.isBigEndian())
> + if (DL.isBigEndian())
> RV = Builder.CreateLShr(RV,
>
> NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits());
> RV = Builder.CreateTrunc(RV, SrcVal->getType());
> @@ -1199,7 +1199,7 @@ static Value *GetLoadValueForLoad(LoadIn
> SrcVal = NewLoad;
> }
>
> - return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD);
> + return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL);
> }
>
>
> @@ -1207,9 +1207,9 @@ static Value *GetLoadValueForLoad(LoadIn
> /// memdep query of a load that ends up being a clobbering mem intrinsic.
> static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned
> Offset,
> Type *LoadTy, Instruction *InsertPt,
> - const DataLayout &TD){
> + const DataLayout &DL){
> LLVMContext &Ctx = LoadTy->getContext();
> - uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
> + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8;
>
> IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
>
> @@ -1240,7 +1240,7 @@ static Value *GetMemInstValueForLoad(Mem
> ++NumBytesSet;
> }
>
> - return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
> + return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, DL);
> }
>
> // Otherwise, this is a memcpy/memmove from a constant global.
> @@ -1256,7 +1256,7 @@ static Value *GetMemInstValueForLoad(Mem
> ConstantInt::get(Type::getInt64Ty(Src->getContext()),
> (unsigned)Offset);
> Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
> Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
> - return ConstantFoldLoadFromConstPtr(Src, &TD);
> + return ConstantFoldLoadFromConstPtr(Src, &DL);
> }
>
>
> @@ -1322,10 +1322,10 @@ Value *AvailableValueInBlock::Materializ
> if (isSimpleValue()) {
> Res = getSimpleValue();
> if (Res->getType() != LoadTy) {
> - const DataLayout *TD = gvn.getDataLayout();
> - assert(TD && "Need target data to handle type mismatch case");
> + const DataLayout *DL = gvn.getDataLayout();
> + assert(DL && "Need target data to handle type mismatch case");
> Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
> - *TD);
> + *DL);
>
> DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset <<
> " "
> << *getSimpleValue() << '\n'
> @@ -1344,10 +1344,10 @@ Value *AvailableValueInBlock::Materializ
> << *Res << '\n' << "\n\n\n");
> }
> } else if (isMemIntrinValue()) {
> - const DataLayout *TD = gvn.getDataLayout();
> - assert(TD && "Need target data to handle type mismatch case");
> + const DataLayout *DL = gvn.getDataLayout();
> + assert(DL && "Need target data to handle type mismatch case");
> Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
> - LoadTy, BB->getTerminator(), *TD);
> + LoadTy, BB->getTerminator(), *DL);
> DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
> << " " << *getMemIntrinValue() << '\n'
> << *Res << '\n' << "\n\n\n");
> @@ -1400,9 +1400,9 @@ void GVN::AnalyzeLoadAvailability(LoadIn
> // read by the load, we can extract the bits we need for the load
> from the
> // stored value.
> if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
> - if (TD && Address) {
> + if (DL && Address) {
> int Offset = AnalyzeLoadFromClobberingStore(LI->getType(),
> Address,
> - DepSI, *TD);
> + DepSI, *DL);
> if (Offset != -1) {
> ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
>
> DepSI->getValueOperand(),
> @@ -1419,10 +1419,10 @@ void GVN::AnalyzeLoadAvailability(LoadIn
> if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
> // If this is a clobber and L is the first instruction in its
> block, then
> // we have the first instruction in the entry block.
> - if (DepLI != LI && Address && TD) {
> + if (DepLI != LI && Address && DL) {
> int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(),
>
> LI->getPointerOperand(),
> - DepLI, *TD);
> + DepLI, *DL);
>
> if (Offset != -1) {
>
> ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
> @@ -1435,9 +1435,9 @@ void GVN::AnalyzeLoadAvailability(LoadIn
> // If the clobbering value is a memset/memcpy/memmove, see if we can
> // forward a value on from it.
> if (MemIntrinsic *DepMI =
> dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
> - if (TD && Address) {
> + if (DL && Address) {
> int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(),
> Address,
> - DepMI, *TD);
> + DepMI, *DL);
> if (Offset != -1) {
> ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB,
> DepMI,
>
> Offset));
> @@ -1469,8 +1469,8 @@ void GVN::AnalyzeLoadAvailability(LoadIn
> if (S->getValueOperand()->getType() != LI->getType()) {
> // If the stored value is larger or equal to the loaded value, we
> can
> // reuse it.
> - if (TD == 0 ||
> !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
> - LI->getType(),
> *TD)) {
> + if (DL == 0 ||
> !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
> + LI->getType(),
> *DL)) {
> UnavailableBlocks.push_back(DepBB);
> continue;
> }
> @@ -1486,7 +1486,7 @@ void GVN::AnalyzeLoadAvailability(LoadIn
> if (LD->getType() != LI->getType()) {
> // If the stored value is larger or equal to the loaded value, we
> can
> // reuse it.
> - if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD,
> LI->getType(),*TD)){
> + if (DL == 0 || !CanCoerceMustAliasedValueToLoad(LD,
> LI->getType(),*DL)){
> UnavailableBlocks.push_back(DepBB);
> continue;
> }
> @@ -1609,7 +1609,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, A
> // If all preds have a single successor, then we know it is safe to
> insert
> // the load on the pred (?!?), so we can insert code to materialize
> the
> // pointer if it is not available.
> - PHITransAddr Address(LI->getPointerOperand(), TD);
> + PHITransAddr Address(LI->getPointerOperand(), DL);
> Value *LoadPtr = 0;
> LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
> *DT, NewInsts);
> @@ -1821,7 +1821,7 @@ bool GVN::processLoad(LoadInst *L) {
>
> // If we have a clobber and target data is around, see if this is a
> clobber
> // that we can fix up through code synthesis.
> - if (Dep.isClobber() && TD) {
> + if (Dep.isClobber() && DL) {
> // Check to see if we have something like this:
> // store i32 123, i32* %P
> // %A = bitcast i32* %P to i8*
> @@ -1836,10 +1836,10 @@ bool GVN::processLoad(LoadInst *L) {
> if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) {
> int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
> L->getPointerOperand(),
> - DepSI, *TD);
> + DepSI, *DL);
> if (Offset != -1)
> AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
> - L->getType(), L, *TD);
> + L->getType(), L, *DL);
> }
>
> // Check to see if we have something like this:
> @@ -1854,7 +1854,7 @@ bool GVN::processLoad(LoadInst *L) {
>
> int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),
> L->getPointerOperand(),
> - DepLI, *TD);
> + DepLI, *DL);
> if (Offset != -1)
> AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L,
> *this);
> }
> @@ -1864,9 +1864,9 @@ bool GVN::processLoad(LoadInst *L) {
> if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
> int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
>
> L->getPointerOperand(),
> - DepMI, *TD);
> + DepMI, *DL);
> if (Offset != -1)
> - AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,
> *TD);
> + AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,
> *DL);
> }
>
> if (AvailVal) {
> @@ -1917,9 +1917,9 @@ bool GVN::processLoad(LoadInst *L) {
> // actually have the same type. See if we know how to reuse the
> stored
> // value (depending on its type).
> if (StoredVal->getType() != L->getType()) {
> - if (TD) {
> + if (DL) {
> StoredVal = CoerceAvailableValueToLoadType(StoredVal,
> L->getType(),
> - L, *TD);
> + L, *DL);
> if (StoredVal == 0)
> return false;
>
> @@ -1946,9 +1946,9 @@ bool GVN::processLoad(LoadInst *L) {
> // the same type. See if we know how to reuse the previously loaded
> value
> // (depending on its type).
> if (DepLI->getType() != L->getType()) {
> - if (TD) {
> + if (DL) {
> AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(),
> - L, *TD);
> + L, *DL);
> if (AvailableVal == 0)
> return false;
>
> @@ -2200,7 +2200,7 @@ bool GVN::processInstruction(Instruction
> // to value numbering it. Value numbering often exposes redundancies,
> for
> // example if it determines that %y is equal to %x then the instruction
> // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now
> simplify.
> - if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {
> + if (Value *V = SimplifyInstruction(I, DL, TLI, DT)) {
> I->replaceAllUsesWith(V);
> if (MD && V->getType()->getScalarType()->isPointerTy())
> MD->invalidateCachedPointerInfo(V);
> @@ -2318,7 +2318,7 @@ bool GVN::runOnFunction(Function& F) {
> if (!NoLoads)
> MD = &getAnalysis<MemoryDependenceAnalysis>();
> DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
> VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
> VN.setMemDep(MD);
>
> Modified: llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp Thu Feb 20 18:06:31
> 2014
> @@ -126,15 +126,15 @@ namespace {
> }
>
> struct GlobalCmp {
> - const DataLayout *TD;
> + const DataLayout *DL;
>
> - GlobalCmp(const DataLayout *td) : TD(td) { }
> + GlobalCmp(const DataLayout *DL) : DL(DL) { }
>
> bool operator()(const GlobalVariable *GV1, const GlobalVariable
> *GV2) {
> Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
> Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
>
> - return (TD->getTypeAllocSize(Ty1) < TD->getTypeAllocSize(Ty2));
> + return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
> }
> };
> };
> @@ -148,7 +148,7 @@ INITIALIZE_PASS(GlobalMerge, "global-mer
> bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
> Module &M, bool isConst, unsigned AddrSpace)
> const {
> const TargetLowering *TLI = TM->getTargetLowering();
> - const DataLayout *TD = TLI->getDataLayout();
> + const DataLayout *DL = TLI->getDataLayout();
>
> // FIXME: Infer the maximum possible offset depending on the actual
> users
> // (these max offsets are different for the users inside Thumb or ARM
> @@ -156,7 +156,7 @@ bool GlobalMerge::doMerge(SmallVectorImp
> unsigned MaxOffset = TLI->getMaximalGlobalOffset();
>
> // FIXME: Find better heuristics
> - std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(TD));
> + std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(DL));
>
> Type *Int32Ty = Type::getInt32Ty(M.getContext());
>
> @@ -167,7 +167,7 @@ bool GlobalMerge::doMerge(SmallVectorImp
> std::vector<Constant*> Inits;
> for (j = i; j != e; ++j) {
> Type *Ty = Globals[j]->getType()->getElementType();
> - MergedSize += TD->getTypeAllocSize(Ty);
> + MergedSize += DL->getTypeAllocSize(Ty);
> if (MergedSize > MaxOffset) {
> break;
> }
> @@ -242,7 +242,7 @@ bool GlobalMerge::doInitialization(Modul
> DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals,
> ConstGlobals,
> BSSGlobals;
> const TargetLowering *TLI = TM->getTargetLowering();
> - const DataLayout *TD = TLI->getDataLayout();
> + const DataLayout *DL = TLI->getDataLayout();
> unsigned MaxOffset = TLI->getMaximalGlobalOffset();
> bool Changed = false;
> setMustKeepGlobalVariables(M);
> @@ -260,9 +260,9 @@ bool GlobalMerge::doInitialization(Modul
> unsigned AddressSpace = PT->getAddressSpace();
>
> // Ignore fancy-aligned globals for now.
> - unsigned Alignment = TD->getPreferredAlignment(I);
> + unsigned Alignment = DL->getPreferredAlignment(I);
> Type *Ty = I->getType()->getElementType();
> - if (Alignment > TD->getABITypeAlignment(Ty))
> + if (Alignment > DL->getABITypeAlignment(Ty))
> continue;
>
> // Ignore all 'special' globals.
> @@ -274,7 +274,7 @@ bool GlobalMerge::doInitialization(Modul
> if (isMustKeepGlobalVariable(I))
> continue;
>
> - if (TD->getTypeAllocSize(Ty) < MaxOffset) {
> + if (DL->getTypeAllocSize(Ty) < MaxOffset) {
> if (TargetLoweringObjectFile::getKindForGlobal(I,
> TLI->getTargetMachine())
> .isBSSLocal())
> BSSGlobals[AddressSpace].push_back(I);
>
> Modified: llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp Thu Feb 20
> 18:06:31 2014
> @@ -71,7 +71,7 @@ namespace {
> LoopInfo *LI;
> ScalarEvolution *SE;
> DominatorTree *DT;
> - DataLayout *TD;
> + DataLayout *DL;
> TargetLibraryInfo *TLI;
>
> SmallVector<WeakVH, 16> DeadInsts;
> @@ -79,7 +79,7 @@ namespace {
> public:
>
> static char ID; // Pass identification, replacement for typeid
> - IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), TD(0),
> + IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), DL(0),
> Changed(false) {
> initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
> }
> @@ -659,14 +659,14 @@ namespace {
> /// extended by this sign or zero extend operation. This is used to
> determine
> /// the final width of the IV before actually widening it.
> static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution
> *SE,
> - const DataLayout *TD) {
> + const DataLayout *DL) {
> bool IsSigned = Cast->getOpcode() == Instruction::SExt;
> if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)
> return;
>
> Type *Ty = Cast->getType();
> uint64_t Width = SE->getTypeSizeInBits(Ty);
> - if (TD && !TD->isLegalInteger(Width))
> + if (DL && !DL->isLegalInteger(Width))
> return;
>
> if (!WI.WidestNativeType) {
> @@ -1122,15 +1122,15 @@ PHINode *WidenIV::CreateWideIV(SCEVExpan
> namespace {
> class IndVarSimplifyVisitor : public IVVisitor {
> ScalarEvolution *SE;
> - const DataLayout *TD;
> + const DataLayout *DL;
> PHINode *IVPhi;
>
> public:
> WideIVInfo WI;
>
> IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV,
> - const DataLayout *TData, const DominatorTree
> *DTree):
> - SE(SCEV), TD(TData), IVPhi(IV) {
> + const DataLayout *DL, const DominatorTree
> *DTree):
> + SE(SCEV), DL(DL), IVPhi(IV) {
> DT = DTree;
> WI.NarrowIV = IVPhi;
> if (ReduceLiveIVs)
> @@ -1138,7 +1138,7 @@ namespace {
> }
>
> // Implement the interface used by simplifyUsersOfIV.
> - virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE,
> TD); }
> + virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE,
> DL); }
> };
> }
>
> @@ -1172,7 +1172,7 @@ void IndVarSimplify::SimplifyAndExtend(L
> PHINode *CurrIV = LoopPhis.pop_back_val();
>
> // Information about sign/zero extensions of CurrIV.
> - IndVarSimplifyVisitor Visitor(CurrIV, SE, TD, DT);
> + IndVarSimplifyVisitor Visitor(CurrIV, SE, DL, DT);
>
> Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor);
>
> @@ -1444,7 +1444,7 @@ static bool AlmostDeadIV(PHINode *Phi, B
> /// could at least handle constant BECounts.
> static PHINode *
> FindLoopCounter(Loop *L, const SCEV *BECount,
> - ScalarEvolution *SE, DominatorTree *DT, const DataLayout
> *TD) {
> + ScalarEvolution *SE, DominatorTree *DT, const DataLayout
> *DL) {
> uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
>
> Value *Cond =
> @@ -1473,7 +1473,7 @@ FindLoopCounter(Loop *L, const SCEV *BEC
> // AR may be wider than BECount. With eq/ne tests overflow is
> immaterial.
> // AR may not be a narrower type, or we may never exit.
> uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());
> - if (PhiWidth < BCWidth || (TD && !TD->isLegalInteger(PhiWidth)))
> + if (PhiWidth < BCWidth || (DL && !DL->isLegalInteger(PhiWidth)))
> continue;
>
> const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
> @@ -1818,7 +1818,7 @@ bool IndVarSimplify::runOnLoop(Loop *L,
> LI = &getAnalysis<LoopInfo>();
> SE = &getAnalysis<ScalarEvolution>();
> DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
>
> DeadInsts.clear();
> @@ -1860,7 +1860,7 @@ bool IndVarSimplify::runOnLoop(Loop *L,
> // If we have a trip count expression, rewrite the loop's exit condition
> // using it. We can currently only handle loops with a single exit.
> if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) {
> - PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD);
> + PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, DL);
> if (IndVar) {
> // Check preconditions for proper SCEVExpander operation. SCEV does
> not
> // express SCEVExpander's dependencies, such as LoopSimplify.
> Instead any
>
> Modified: llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp Thu Feb 20 18:06:31
> 2014
> @@ -76,7 +76,7 @@ namespace {
> /// revectored to the false side of the second if.
> ///
> class JumpThreading : public FunctionPass {
> - DataLayout *TD;
> + DataLayout *DL;
> TargetLibraryInfo *TLI;
> LazyValueInfo *LVI;
> #ifdef NDEBUG
> @@ -152,7 +152,7 @@ bool JumpThreading::runOnFunction(Functi
> return false;
>
> DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
> LVI = &getAnalysis<LazyValueInfo>();
>
> @@ -493,7 +493,7 @@ ComputeValueKnownInPredecessors(Value *V
> Value *LHS = PN->getIncomingValue(i);
> Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB);
>
> - Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, TD);
> + Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL);
> if (Res == 0) {
> if (!isa<Constant>(RHS))
> continue;
> @@ -695,7 +695,7 @@ bool JumpThreading::ProcessBlock(BasicBl
> // Run constant folding to see if we can reduce the condition to a
> simple
> // constant.
> if (Instruction *I = dyn_cast<Instruction>(Condition)) {
> - Value *SimpleVal = ConstantFoldInstruction(I, TD, TLI);
> + Value *SimpleVal = ConstantFoldInstruction(I, DL, TLI);
> if (SimpleVal) {
> I->replaceAllUsesWith(SimpleVal);
> I->eraseFromParent();
> @@ -1478,7 +1478,7 @@ bool JumpThreading::ThreadEdge(BasicBloc
> // At this point, the IR is fully up to date and consistent. Do a
> quick scan
> // over the new instructions and zap any that are constants or dead.
> This
> // frequently happens because of phi translation.
> - SimplifyInstructionsInBlock(NewBB, TD, TLI);
> + SimplifyInstructionsInBlock(NewBB, DL, TLI);
>
> // Threaded an edge!
> ++NumThreads;
> @@ -1560,7 +1560,7 @@ bool JumpThreading::DuplicateCondBranchO
> // If this instruction can be simplified after the operands are
> updated,
> // just use the simplified value instead. This frequently happens
> due to
> // phi translation.
> - if (Value *IV = SimplifyInstruction(New, TD)) {
> + if (Value *IV = SimplifyInstruction(New, DL)) {
> delete New;
> ValueMapping[BI] = IV;
> } else {
>
> Modified: llvm/trunk/lib/Transforms/Scalar/LICM.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LICM.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/LICM.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/LICM.cpp Thu Feb 20 18:06:31 2014
> @@ -108,7 +108,7 @@ namespace {
> LoopInfo *LI; // Current LoopInfo
> DominatorTree *DT; // Dominator Tree for the current Loop.
>
> - DataLayout *TD; // DataLayout for constant folding.
> + DataLayout *DL; // DataLayout for constant folding.
> TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
>
> // State that is updated as we process loops.
> @@ -221,7 +221,7 @@ bool LICM::runOnLoop(Loop *L, LPPassMana
> AA = &getAnalysis<AliasAnalysis>();
> DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
>
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
>
> assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
> @@ -394,7 +394,7 @@ void LICM::HoistRegion(DomTreeNode *N) {
> // Try constant folding this instruction. If all the operands are
> // constants, it is technically hoistable, but it would be better
> to just
> // fold it.
> - if (Constant *C = ConstantFoldInstruction(&I, TD, TLI)) {
> + if (Constant *C = ConstantFoldInstruction(&I, DL, TLI)) {
> DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C <<
> '\n');
> CurAST->copyValue(&I, C);
> CurAST->deleteValue(&I);
>
> Modified: llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp Thu Feb 20
> 18:06:31 2014
> @@ -132,7 +132,7 @@ namespace {
>
> class LoopIdiomRecognize : public LoopPass {
> Loop *CurLoop;
> - const DataLayout *TD;
> + const DataLayout *DL;
> DominatorTree *DT;
> ScalarEvolution *SE;
> TargetLibraryInfo *TLI;
> @@ -141,7 +141,7 @@ namespace {
> static char ID;
> explicit LoopIdiomRecognize() : LoopPass(ID) {
> initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
> - TD = 0; DT = 0; SE = 0; TLI = 0; TTI = 0;
> + DL = 0; DT = 0; SE = 0; TLI = 0; TTI = 0;
> }
>
> bool runOnLoop(Loop *L, LPPassManager &LPM);
> @@ -182,7 +182,7 @@ namespace {
> }
>
> const DataLayout *getDataLayout() {
> - return TD ? TD : TD=getAnalysisIfAvailable<DataLayout>();
> + return DL ? DL : DL=getAnalysisIfAvailable<DataLayout>();
> }
>
> DominatorTree *getDominatorTree() {
> @@ -782,7 +782,7 @@ bool LoopIdiomRecognize::processLoopStor
> Value *StorePtr = SI->getPointerOperand();
>
> // Reject stores that are so large that they overflow an unsigned.
> - uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType());
> + uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
> if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
> return false;
>
> @@ -910,7 +910,7 @@ static bool mayLoopAccessLocation(Value
> ///
> /// Note that we don't ever attempt to use memset_pattern8 or 4, because
> these
> /// just replicate their input array and then pass on to memset_pattern16.
> -static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) {
> +static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) {
> // If the value isn't a constant, we can't promote it to being in a
> constant
> // array. We could theoretically do a store to an alloca or something,
> but
> // that doesn't seem worthwhile.
> @@ -918,12 +918,12 @@ static Constant *getMemSetPatternValue(V
> if (C == 0) return 0;
>
> // Only handle simple values that are a power of two bytes in size.
> - uint64_t Size = TD.getTypeSizeInBits(V->getType());
> + uint64_t Size = DL.getTypeSizeInBits(V->getType());
> if (Size == 0 || (Size & 7) || (Size & (Size-1)))
> return 0;
>
> // Don't care enough about darwin/ppc to implement this.
> - if (TD.isBigEndian())
> + if (DL.isBigEndian())
> return 0;
>
> // Convert to size in bytes.
> @@ -970,7 +970,7 @@ processLoopStridedStore(Value *DestPtr,
> PatternValue = 0;
> } else if (DestAS == 0 &&
> TLI->has(LibFunc::memset_pattern16) &&
> - (PatternValue = getMemSetPatternValue(StoredVal, *TD))) {
> + (PatternValue = getMemSetPatternValue(StoredVal, *DL))) {
> // Don't create memset_pattern16s with address spaces.
> // It looks like we can use PatternValue!
> SplatValue = 0;
> @@ -1011,7 +1011,7 @@ processLoopStridedStore(Value *DestPtr,
>
> // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
> // pointer size if it isn't already.
> - Type *IntPtr = Builder.getIntPtrTy(TD, DestAS);
> + Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);
> BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
>
> const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr,
> 1),
> @@ -1125,7 +1125,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI
>
> // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
> // pointer size if it isn't already.
> - Type *IntPtrTy = Builder.getIntPtrTy(TD, SI->getPointerAddressSpace());
> + Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());
> BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
>
> const SCEV *NumBytesS = SE->getAddExpr(BECount,
> SE->getConstant(IntPtrTy, 1),
>
> Modified: llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp Thu Feb 20
> 18:06:31 2014
> @@ -195,9 +195,9 @@ class MemsetRanges {
> /// because each element is relatively large and expensive to copy.
> std::list<MemsetRange> Ranges;
> typedef std::list<MemsetRange>::iterator range_iterator;
> - const DataLayout &TD;
> + const DataLayout &DL;
> public:
> - MemsetRanges(const DataLayout &td) : TD(td) {}
> + MemsetRanges(const DataLayout &DL) : DL(DL) {}
>
> typedef std::list<MemsetRange>::const_iterator const_iterator;
> const_iterator begin() const { return Ranges.begin(); }
> @@ -212,7 +212,7 @@ public:
> }
>
> void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
> - int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
> + int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
>
> addRange(OffsetFromFirst, StoreSize,
> SI->getPointerOperand(), SI->getAlignment(), SI);
> @@ -305,14 +305,14 @@ namespace {
> class MemCpyOpt : public FunctionPass {
> MemoryDependenceAnalysis *MD;
> TargetLibraryInfo *TLI;
> - const DataLayout *TD;
> + const DataLayout *DL;
> public:
> static char ID; // Pass identification, replacement for typeid
> MemCpyOpt() : FunctionPass(ID) {
> initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
> MD = 0;
> TLI = 0;
> - TD = 0;
> + DL = 0;
> }
>
> bool runOnFunction(Function &F);
> @@ -366,13 +366,13 @@ INITIALIZE_PASS_END(MemCpyOpt, "memcpyop
> /// attempts to merge them together into a memcpy/memset.
> Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
> Value *StartPtr, Value
> *ByteVal) {
> - if (TD == 0) return 0;
> + if (DL == 0) return 0;
>
> // Okay, so we now have a single store that can be splatable. Scan to
> find
> // all subsequent stores of the same value to offset from the same
> pointer.
> // Join these together into ranges, so we can decide whether contiguous
> blocks
> // are stored.
> - MemsetRanges Ranges(*TD);
> + MemsetRanges Ranges(*DL);
>
> BasicBlock::iterator BI = StartInst;
> for (++BI; !isa<TerminatorInst>(BI); ++BI) {
> @@ -396,7 +396,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe
> // Check to see if this store is to a constant offset from the
> start ptr.
> int64_t Offset;
> if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
> - Offset, *TD))
> + Offset, *DL))
> break;
>
> Ranges.addStore(Offset, NextStore);
> @@ -409,7 +409,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe
>
> // Check to see if this store is to a constant offset from the
> start ptr.
> int64_t Offset;
> - if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
> + if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *DL))
> break;
>
> Ranges.addMemSet(Offset, MSI);
> @@ -441,7 +441,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe
> if (Range.TheStores.size() == 1) continue;
>
> // If it is profitable to lower this range to memset, do so now.
> - if (!Range.isProfitableToUseMemset(*TD))
> + if (!Range.isProfitableToUseMemset(*DL))
> continue;
>
> // Otherwise, we do want to transform this! Create a new memset.
> @@ -453,7 +453,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe
> if (Alignment == 0) {
> Type *EltType =
> cast<PointerType>(StartPtr->getType())->getElementType();
> - Alignment = TD->getABITypeAlignment(EltType);
> + Alignment = DL->getABITypeAlignment(EltType);
> }
>
> AMemSet =
> @@ -484,7 +484,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe
> bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
> if (!SI->isSimple()) return false;
>
> - if (TD == 0) return false;
> + if (DL == 0) return false;
>
> // Detect cases where we're performing call slot forwarding, but
> // happen to be using a load-store pair to implement it, rather than
> @@ -514,15 +514,15 @@ bool MemCpyOpt::processStore(StoreInst *
> if (C) {
> unsigned storeAlign = SI->getAlignment();
> if (!storeAlign)
> - storeAlign =
> TD->getABITypeAlignment(SI->getOperand(0)->getType());
> + storeAlign =
> DL->getABITypeAlignment(SI->getOperand(0)->getType());
> unsigned loadAlign = LI->getAlignment();
> if (!loadAlign)
> - loadAlign = TD->getABITypeAlignment(LI->getType());
> + loadAlign = DL->getABITypeAlignment(LI->getType());
>
> bool changed = performCallSlotOptzn(LI,
> SI->getPointerOperand()->stripPointerCasts(),
> LI->getPointerOperand()->stripPointerCasts(),
> -
> TD->getTypeStoreSize(SI->getOperand(0)->getType()),
> +
> DL->getTypeStoreSize(SI->getOperand(0)->getType()),
> std::min(storeAlign, loadAlign), C);
> if (changed) {
> MD->removeInstruction(SI);
> @@ -596,13 +596,13 @@ bool MemCpyOpt::performCallSlotOptzn(Ins
> return false;
>
> // Check that all of src is copied to dest.
> - if (TD == 0) return false;
> + if (DL == 0) return false;
>
> ConstantInt *srcArraySize =
> dyn_cast<ConstantInt>(srcAlloca->getArraySize());
> if (!srcArraySize)
> return false;
>
> - uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
> + uint64_t srcSize = DL->getTypeAllocSize(srcAlloca->getAllocatedType()) *
> srcArraySize->getZExtValue();
>
> if (cpyLen < srcSize)
> @@ -617,7 +617,7 @@ bool MemCpyOpt::performCallSlotOptzn(Ins
> if (!destArraySize)
> return false;
>
> - uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
> + uint64_t destSize = DL->getTypeAllocSize(A->getAllocatedType()) *
> destArraySize->getZExtValue();
>
> if (destSize < srcSize)
> @@ -636,7 +636,7 @@ bool MemCpyOpt::performCallSlotOptzn(Ins
> return false;
> }
>
> - uint64_t destSize = TD->getTypeAllocSize(StructTy);
> + uint64_t destSize = DL->getTypeAllocSize(StructTy);
> if (destSize < srcSize)
> return false;
> } else {
> @@ -646,7 +646,7 @@ bool MemCpyOpt::performCallSlotOptzn(Ins
> // Check that dest points to memory that is at least as aligned as src.
> unsigned srcAlign = srcAlloca->getAlignment();
> if (!srcAlign)
> - srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType());
> + srcAlign = DL->getABITypeAlignment(srcAlloca->getAllocatedType());
> bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
> // If dest is not aligned enough and we can't increase its alignment
> then
> // bail out.
> @@ -912,12 +912,12 @@ bool MemCpyOpt::processMemMove(MemMoveIn
>
> /// processByValArgument - This is called on every byval argument in call
> sites.
> bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
> - if (TD == 0) return false;
> + if (DL == 0) return false;
>
> // Find out what feeds this byval argument.
> Value *ByValArg = CS.getArgument(ArgNo);
> Type *ByValTy =
> cast<PointerType>(ByValArg->getType())->getElementType();
> - uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
> + uint64_t ByValSize = DL->getTypeAllocSize(ByValTy);
> MemDepResult DepInfo =
> MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg,
> ByValSize),
> true, CS.getInstruction(),
> @@ -946,7 +946,7 @@ bool MemCpyOpt::processByValArgument(Cal
> // If it is greater than the memcpy, then we check to see if we can
> force the
> // source of the memcpy to the alignment we need. If we fail, we bail
> out.
> if (MDep->getAlignment() < ByValAlign &&
> - getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) <
> ByValAlign)
> + getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, DL) <
> ByValAlign)
> return false;
>
> // Verify that the copied-from memory doesn't change in between the
> memcpy and
> @@ -1025,7 +1025,7 @@ bool MemCpyOpt::runOnFunction(Function &
>
> bool MadeChange = false;
> MD = &getAnalysis<MemoryDependenceAnalysis>();
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TLI = &getAnalysis<TargetLibraryInfo>();
>
> // If we don't have at least memset and memcpy, there is little point
> of doing
>
> Modified: llvm/trunk/lib/Transforms/Scalar/SCCP.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/SCCP.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/SCCP.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/SCCP.cpp Thu Feb 20 18:06:31 2014
> @@ -153,7 +153,7 @@ namespace {
> /// Constant Propagation.
> ///
> class SCCPSolver : public InstVisitor<SCCPSolver> {
> - const DataLayout *TD;
> + const DataLayout *DL;
> const TargetLibraryInfo *TLI;
> SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are
> executable.
> DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
> @@ -205,8 +205,8 @@ class SCCPSolver : public InstVisitor<SC
> typedef std::pair<BasicBlock*, BasicBlock*> Edge;
> DenseSet<Edge> KnownFeasibleEdges;
> public:
> - SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli)
> - : TD(td), TLI(tli) {}
> + SCCPSolver(const DataLayout *DL, const TargetLibraryInfo *tli)
> + : DL(DL), TLI(tli) {}
>
> /// MarkBlockExecutable - This method can be used by clients to mark
> all of
> /// the blocks that are known to be intrinsically live in the processed
> unit.
> @@ -1067,7 +1067,7 @@ void SCCPSolver::visitLoadInst(LoadInst
> }
>
> // Transform load from a constant into a constant if possible.
> - if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, TD))
> + if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, DL))
> return markConstant(IV, &I, C);
>
> // Otherwise we cannot say for certain what value this load will
> produce.
> @@ -1557,9 +1557,9 @@ bool SCCP::runOnFunction(Function &F) {
> return false;
>
> DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
> - const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
> + const DataLayout *DL = getAnalysisIfAvailable<DataLayout>();
> const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
> - SCCPSolver Solver(TD, TLI);
> + SCCPSolver Solver(DL, TLI);
>
> // Mark the first block of the function as being executable.
> Solver.MarkBlockExecutable(F.begin());
> @@ -1686,9 +1686,9 @@ static bool AddressIsTaken(const GlobalV
> }
>
> bool IPSCCP::runOnModule(Module &M) {
> - const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
> + const DataLayout *DL = getAnalysisIfAvailable<DataLayout>();
> const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
> - SCCPSolver Solver(TD, TLI);
> + SCCPSolver Solver(DL, TLI);
>
> // AddressTakenFunctions - This set keeps track of the address-taken
> functions
> // that are in the input. As IPSCCP runs through and simplifies code,
>
> Modified: llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp Thu Feb 20
> 18:06:31 2014
> @@ -87,7 +87,7 @@ namespace {
>
> private:
> bool HasDomTree;
> - DataLayout *TD;
> + DataLayout *DL;
>
> /// DeadInsts - Keep track of instructions we have made dead, so that
> /// we can remove them after we are done working.
> @@ -258,7 +258,7 @@ namespace {
> class ConvertToScalarInfo {
> /// AllocaSize - The size of the alloca being considered in bytes.
> unsigned AllocaSize;
> - const DataLayout &TD;
> + const DataLayout &DL;
> unsigned ScalarLoadThreshold;
>
> /// IsNotTrivial - This is set to true if there is some access to the
> object
> @@ -301,9 +301,9 @@ class ConvertToScalarInfo {
> bool HadDynamicAccess;
>
> public:
> - explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td,
> + explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL,
> unsigned SLT)
> - : AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT),
> IsNotTrivial(false),
> + : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT),
> IsNotTrivial(false),
> ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),
> HadDynamicAccess(false) { }
>
> @@ -364,7 +364,7 @@ AllocaInst *ConvertToScalarInfo::TryConv
> return 0;
>
> if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&
> - !HadNonMemTransferAccess && !TD.fitsInLegalInteger(BitWidth))
> + !HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth))
> return 0;
> // Dynamic accesses on integers aren't yet supported. They need us
> to shift
> // by a dynamic amount which could be difficult to work out as we
> might not
> @@ -520,7 +520,7 @@ bool ConvertToScalarInfo::CanConvertToSc
> HadDynamicAccess = true;
> } else
> GEPNonConstantIdx = NonConstantIdx;
> - uint64_t GEPOffset = TD.getIndexedOffset(PtrTy,
> + uint64_t GEPOffset = DL.getIndexedOffset(PtrTy,
> Indices);
> // See if all uses can be converted.
> if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx))
> @@ -615,7 +615,7 @@ void ConvertToScalarInfo::ConvertUsesToS
> GEPNonConstantIdx = Indices.pop_back_val();
> } else
> GEPNonConstantIdx = NonConstantIdx;
> - uint64_t GEPOffset =
> TD.getIndexedOffset(GEP->getPointerOperandType(),
> + uint64_t GEPOffset =
> DL.getIndexedOffset(GEP->getPointerOperandType(),
> Indices);
> ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8,
> GEPNonConstantIdx);
> GEP->eraseFromParent();
> @@ -692,9 +692,9 @@ void ConvertToScalarInfo::ConvertUsesToS
> // If the source and destination are both to the same alloca, then
> this is
> // a noop copy-to-self, just delete it. Otherwise, emit a load and
> store
> // as appropriate.
> - AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &TD,
> 0));
> + AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &DL,
> 0));
>
> - if (GetUnderlyingObject(MTI->getSource(), &TD, 0) != OrigAI) {
> + if (GetUnderlyingObject(MTI->getSource(), &DL, 0) != OrigAI) {
> // Dest must be OrigAI, change this to be a load from the original
> // pointer (bitcasted), then a store to our new alloca.
> assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
> @@ -710,7 +710,7 @@ void ConvertToScalarInfo::ConvertUsesToS
> LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
> SrcVal->setAlignment(MTI->getAlignment());
> Builder.CreateStore(SrcVal, NewAI);
> - } else if (GetUnderlyingObject(MTI->getDest(), &TD, 0) != OrigAI) {
> + } else if (GetUnderlyingObject(MTI->getDest(), &DL, 0) != OrigAI) {
> // Src must be OrigAI, change this to be a load from NewAI then a
> store
> // through the original dest pointer (bitcasted).
> assert(MTI->getRawSource() == Ptr && "Neither use is of
> pointer?");
> @@ -770,15 +770,15 @@ ConvertScalar_ExtractValue(Value *FromVa
> // If the result alloca is a vector type, this is either an element
> // access or a bitcast to another vector type of the same size.
> if (VectorType *VTy = dyn_cast<VectorType>(FromType)) {
> - unsigned FromTypeSize = TD.getTypeAllocSize(FromType);
> - unsigned ToTypeSize = TD.getTypeAllocSize(ToType);
> + unsigned FromTypeSize = DL.getTypeAllocSize(FromType);
> + unsigned ToTypeSize = DL.getTypeAllocSize(ToType);
> if (FromTypeSize == ToTypeSize)
> return Builder.CreateBitCast(FromVal, ToType);
>
> // Otherwise it must be an element access.
> unsigned Elt = 0;
> if (Offset) {
> - unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
> + unsigned EltSize = DL.getTypeAllocSizeInBits(VTy->getElementType());
> Elt = Offset/EltSize;
> assert(EltSize*Elt == Offset && "Invalid modulus in validity
> checking");
> }
> @@ -804,7 +804,7 @@ ConvertScalar_ExtractValue(Value *FromVa
> if (StructType *ST = dyn_cast<StructType>(ToType)) {
> assert(!NonConstantIdx &&
> "Dynamic indexing into struct types not supported");
> - const StructLayout &Layout = *TD.getStructLayout(ST);
> + const StructLayout &Layout = *DL.getStructLayout(ST);
> Value *Res = UndefValue::get(ST);
> for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
> Value *Elt = ConvertScalar_ExtractValue(FromVal,
> ST->getElementType(i),
> @@ -818,7 +818,7 @@ ConvertScalar_ExtractValue(Value *FromVa
> if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
> assert(!NonConstantIdx &&
> "Dynamic indexing into array types not supported");
> - uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
> + uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
> Value *Res = UndefValue::get(AT);
> for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
> Value *Elt = ConvertScalar_ExtractValue(FromVal,
> AT->getElementType(),
> @@ -834,12 +834,12 @@ ConvertScalar_ExtractValue(Value *FromVa
> // If this is a big-endian system and the load is narrower than the
> // full alloca type, we need to do a shift to get the right bits.
> int ShAmt = 0;
> - if (TD.isBigEndian()) {
> + if (DL.isBigEndian()) {
> // On big-endian machines, the lowest bit is stored at the bit offset
> // from the pointer given by getTypeStoreSizeInBits. This matters for
> // integers with a bitwidth that is not a multiple of 8.
> - ShAmt = TD.getTypeStoreSizeInBits(NTy) -
> - TD.getTypeStoreSizeInBits(ToType) - Offset;
> + ShAmt = DL.getTypeStoreSizeInBits(NTy) -
> + DL.getTypeStoreSizeInBits(ToType) - Offset;
> } else {
> ShAmt = Offset;
> }
> @@ -855,7 +855,7 @@ ConvertScalar_ExtractValue(Value *FromVa
> ConstantInt::get(FromVal->getType(),
> -ShAmt));
>
> // Finally, unconditionally truncate the integer to the right width.
> - unsigned LIBitWidth = TD.getTypeSizeInBits(ToType);
> + unsigned LIBitWidth = DL.getTypeSizeInBits(ToType);
> if (LIBitWidth < NTy->getBitWidth())
> FromVal =
> Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
> @@ -902,8 +902,8 @@ ConvertScalar_InsertValue(Value *SV, Val
> LLVMContext &Context = Old->getContext();
>
> if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
> - uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);
> - uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());
> + uint64_t VecSize = DL.getTypeAllocSizeInBits(VTy);
> + uint64_t ValSize = DL.getTypeAllocSizeInBits(SV->getType());
>
> // Changing the whole vector with memset or with an access of a
> different
> // vector type?
> @@ -914,7 +914,7 @@ ConvertScalar_InsertValue(Value *SV, Val
> Type *EltTy = VTy->getElementType();
> if (SV->getType() != EltTy)
> SV = Builder.CreateBitCast(SV, EltTy);
> - uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy);
> + uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy);
> unsigned Elt = Offset/EltSize;
> Value *Idx;
> if (NonConstantIdx) {
> @@ -933,7 +933,7 @@ ConvertScalar_InsertValue(Value *SV, Val
> if (StructType *ST = dyn_cast<StructType>(SV->getType())) {
> assert(!NonConstantIdx &&
> "Dynamic indexing into struct types not supported");
> - const StructLayout &Layout = *TD.getStructLayout(ST);
> + const StructLayout &Layout = *DL.getStructLayout(ST);
> for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
> Value *Elt = Builder.CreateExtractValue(SV, i);
> Old = ConvertScalar_InsertValue(Elt, Old,
> @@ -946,7 +946,7 @@ ConvertScalar_InsertValue(Value *SV, Val
> if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
> assert(!NonConstantIdx &&
> "Dynamic indexing into array types not supported");
> - uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
> + uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
> for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
> Value *Elt = Builder.CreateExtractValue(SV, i);
> Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, 0,
> Builder);
> @@ -956,14 +956,14 @@ ConvertScalar_InsertValue(Value *SV, Val
>
> // If SV is a float, convert it to the appropriate integer type.
> // If it is a pointer, do the same.
> - unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
> - unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);
> - unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());
> - unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType);
> + unsigned SrcWidth = DL.getTypeSizeInBits(SV->getType());
> + unsigned DestWidth = DL.getTypeSizeInBits(AllocaType);
> + unsigned SrcStoreWidth = DL.getTypeStoreSizeInBits(SV->getType());
> + unsigned DestStoreWidth = DL.getTypeStoreSizeInBits(AllocaType);
> if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
> SV = Builder.CreateBitCast(SV,
> IntegerType::get(SV->getContext(),SrcWidth));
> else if (SV->getType()->isPointerTy())
> - SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType()));
> + SV = Builder.CreatePtrToInt(SV, DL.getIntPtrType(SV->getType()));
>
> // Zero extend or truncate the value if needed.
> if (SV->getType() != AllocaType) {
> @@ -982,7 +982,7 @@ ConvertScalar_InsertValue(Value *SV, Val
> // If this is a big-endian system and the store is narrower than the
> // full alloca type, we need to do a shift to get the right bits.
> int ShAmt = 0;
> - if (TD.isBigEndian()) {
> + if (DL.isBigEndian()) {
> // On big-endian machines, the lowest bit is stored at the bit offset
> // from the pointer given by getTypeStoreSizeInBits. This matters for
> // integers with a bitwidth that is not a multiple of 8.
> @@ -1023,7 +1023,7 @@ bool SROA::runOnFunction(Function &F) {
> if (skipOptnoneFunction(F))
> return false;
>
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
>
> bool Changed = performPromotion(F);
>
> @@ -1031,7 +1031,7 @@ bool SROA::runOnFunction(Function &F) {
> // theoretically needs to. It should be refactored in order to support
> // target-independent IR. Until this is done, just skip the actual
> // scalar-replacement portion of this pass.
> - if (!TD) return Changed;
> + if (!DL) return Changed;
>
> while (1) {
> bool LocalChange = performScalarRepl(F);
> @@ -1137,7 +1137,7 @@ public:
> ///
> /// We can do this to a select if its only uses are loads and if the
> operand to
> /// the select can be loaded unconditionally.
> -static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD)
> {
> +static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL)
> {
> bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
> bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
>
> @@ -1149,10 +1149,10 @@ static bool isSafeSelectToSpeculate(Sele
> // Both operands to the select need to be dereferencable, either
> absolutely
> // (e.g. allocas) or at this point because we can see other accesses
> to it.
> if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(),
> LI,
> - LI->getAlignment(),
> TD))
> + LI->getAlignment(),
> DL))
> return false;
> if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(),
> LI,
> - LI->getAlignment(),
> TD))
> + LI->getAlignment(),
> DL))
> return false;
> }
>
> @@ -1175,7 +1175,7 @@ static bool isSafeSelectToSpeculate(Sele
> ///
> /// We can do this to a select if its only uses are loads and if the
> operand to
> /// the select can be loaded unconditionally.
> -static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
> +static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
> // For now, we can only do this promotion if the load is in the same
> block as
> // the PHI, and if there are no stores between the phi and load.
> // TODO: Allow recursive phi users.
> @@ -1225,7 +1225,7 @@ static bool isSafePHIToSpeculate(PHINode
> // If this pointer is always safe to load, or if we can prove that
> there is
> // already a load in the block, then we can move the load to the pred
> block.
> if (InVal->isDereferenceablePointer() ||
> - isSafeToLoadUnconditionally(InVal, Pred->getTerminator(),
> MaxAlign, TD))
> + isSafeToLoadUnconditionally(InVal, Pred->getTerminator(),
> MaxAlign, DL))
> continue;
>
> return false;
> @@ -1239,7 +1239,7 @@ static bool isSafePHIToSpeculate(PHINode
> /// direct (non-volatile) loads and stores to it. If the alloca is close
> but
> /// not quite there, this will transform the code to allow promotion. As
> such,
> /// it is a non-pure predicate.
> -static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout
> *TD) {
> +static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout
> *DL) {
> SetVector<Instruction*, SmallVector<Instruction*, 4>,
> SmallPtrSet<Instruction*, 4> > InstsToRewrite;
>
> @@ -1268,12 +1268,12 @@ static bool tryToMakeAllocaBePromotable(
>
> // This is very rare and we just scrambled the use list of AI,
> start
> // over completely.
> - return tryToMakeAllocaBePromotable(AI, TD);
> + return tryToMakeAllocaBePromotable(AI, DL);
> }
>
> // If it is safe to turn "load (select c, AI, ptr)" into a select
> of two
> // loads, then we can transform this by rewriting the select.
> - if (!isSafeSelectToSpeculate(SI, TD))
> + if (!isSafeSelectToSpeculate(SI, DL))
> return false;
>
> InstsToRewrite.insert(SI);
> @@ -1288,7 +1288,7 @@ static bool tryToMakeAllocaBePromotable(
>
> // If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of
> loads
> // in the pred blocks, then we can transform this by rewriting the
> PHI.
> - if (!isSafePHIToSpeculate(PN, TD))
> + if (!isSafePHIToSpeculate(PN, DL))
> return false;
>
> InstsToRewrite.insert(PN);
> @@ -1423,7 +1423,7 @@ bool SROA::performPromotion(Function &F)
> // the entry node
> for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
> if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an
> alloca?
> - if (tryToMakeAllocaBePromotable(AI, TD))
> + if (tryToMakeAllocaBePromotable(AI, DL))
> Allocas.push_back(AI);
>
> if (Allocas.empty()) break;
> @@ -1499,7 +1499,7 @@ bool SROA::performScalarRepl(Function &F
> // transform the allocation instruction if it is an array allocation
> // (allocations OF arrays are ok though), and an allocation of a
> scalar
> // value cannot be decomposed at all.
> - uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
> + uint64_t AllocaSize = DL->getTypeAllocSize(AI->getAllocatedType());
>
> // Do not promote [0 x %struct].
> if (AllocaSize == 0) continue;
> @@ -1523,7 +1523,7 @@ bool SROA::performScalarRepl(Function &F
> // that we can't just check based on the type: the alloca may be of
> an i32
> // but that has pointer arithmetic to set byte 3 of it or something.
> if (AllocaInst *NewAI = ConvertToScalarInfo(
> - (unsigned)AllocaSize, *TD,
> ScalarLoadThreshold).TryConvert(AI)) {
> + (unsigned)AllocaSize, *DL,
> ScalarLoadThreshold).TryConvert(AI)) {
> NewAI->takeName(AI);
> AI->eraseFromParent();
> ++NumConverted;
> @@ -1625,7 +1625,7 @@ void SROA::isSafeForScalarRepl(Instructi
> if (!LI->isSimple())
> return MarkUnsafe(Info, User);
> Type *LIType = LI->getType();
> - isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
> + isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
> LIType, false, Info, LI, true /*AllowWholeAccess*/);
> Info.hasALoadOrStore = true;
>
> @@ -1635,7 +1635,7 @@ void SROA::isSafeForScalarRepl(Instructi
> return MarkUnsafe(Info, User);
>
> Type *SIType = SI->getOperand(0)->getType();
> - isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
> + isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
> SIType, true, Info, SI, true /*AllowWholeAccess*/);
> Info.hasALoadOrStore = true;
> } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
> @@ -1684,7 +1684,7 @@ void SROA::isSafePHISelectUseForScalarRe
> if (!LI->isSimple())
> return MarkUnsafe(Info, User);
> Type *LIType = LI->getType();
> - isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
> + isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
> LIType, false, Info, LI, false
> /*AllowWholeAccess*/);
> Info.hasALoadOrStore = true;
>
> @@ -1694,7 +1694,7 @@ void SROA::isSafePHISelectUseForScalarRe
> return MarkUnsafe(Info, User);
>
> Type *SIType = SI->getOperand(0)->getType();
> - isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
> + isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
> SIType, true, Info, SI, false /*AllowWholeAccess*/);
> Info.hasALoadOrStore = true;
> } else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
> @@ -1739,7 +1739,7 @@ void SROA::isSafeGEP(GetElementPtrInst *
> // constant part of the offset.
> if (NonConstant)
> Indices.pop_back();
> - Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
> + Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
> if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset,
> NonConstantIdxSize))
> MarkUnsafe(Info, GEPI);
> @@ -1798,7 +1798,7 @@ void SROA::isSafeMemAccess(uint64_t Offs
> bool AllowWholeAccess) {
> // Check if this is a load/store of the entire alloca.
> if (Offset == 0 && AllowWholeAccess &&
> - MemSize == TD->getTypeAllocSize(Info.AI->getAllocatedType())) {
> + MemSize == DL->getTypeAllocSize(Info.AI->getAllocatedType())) {
> // This can be safe for MemIntrinsics (where MemOpType is 0) and
> integer
> // loads/stores (which are essentially the same as the MemIntrinsics
> with
> // regard to copying padding between elements). But, if an alloca is
> @@ -1835,20 +1835,20 @@ bool SROA::TypeHasComponent(Type *T, uin
> Type *EltTy;
> uint64_t EltSize;
> if (StructType *ST = dyn_cast<StructType>(T)) {
> - const StructLayout *Layout = TD->getStructLayout(ST);
> + const StructLayout *Layout = DL->getStructLayout(ST);
> unsigned EltIdx = Layout->getElementContainingOffset(Offset);
> EltTy = ST->getContainedType(EltIdx);
> - EltSize = TD->getTypeAllocSize(EltTy);
> + EltSize = DL->getTypeAllocSize(EltTy);
> Offset -= Layout->getElementOffset(EltIdx);
> } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
> EltTy = AT->getElementType();
> - EltSize = TD->getTypeAllocSize(EltTy);
> + EltSize = DL->getTypeAllocSize(EltTy);
> if (Offset >= AT->getNumElements() * EltSize)
> return false;
> Offset %= EltSize;
> } else if (VectorType *VT = dyn_cast<VectorType>(T)) {
> EltTy = VT->getElementType();
> - EltSize = TD->getTypeAllocSize(EltTy);
> + EltSize = DL->getTypeAllocSize(EltTy);
> if (Offset >= VT->getNumElements() * EltSize)
> return false;
> Offset %= EltSize;
> @@ -1887,7 +1887,7 @@ void SROA::RewriteForScalarRepl(Instruct
> ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
> uint64_t MemSize = Length->getZExtValue();
> if (Offset == 0 &&
> - MemSize == TD->getTypeAllocSize(AI->getAllocatedType()))
> + MemSize == DL->getTypeAllocSize(AI->getAllocatedType()))
> RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
> // Otherwise the intrinsic can only touch a single element and the
> // address operand will be updated, so nothing else needs to be
> done.
> @@ -1923,8 +1923,8 @@ void SROA::RewriteForScalarRepl(Instruct
> LI->replaceAllUsesWith(Insert);
> DeadInsts.push_back(LI);
> } else if (LIType->isIntegerTy() &&
> - TD->getTypeAllocSize(LIType) ==
> - TD->getTypeAllocSize(AI->getAllocatedType())) {
> + DL->getTypeAllocSize(LIType) ==
> + DL->getTypeAllocSize(AI->getAllocatedType())) {
> // If this is a load of the entire alloca to an integer, rewrite
> it.
> RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
> }
> @@ -1950,8 +1950,8 @@ void SROA::RewriteForScalarRepl(Instruct
> }
> DeadInsts.push_back(SI);
> } else if (SIType->isIntegerTy() &&
> - TD->getTypeAllocSize(SIType) ==
> - TD->getTypeAllocSize(AI->getAllocatedType())) {
> + DL->getTypeAllocSize(SIType) ==
> + DL->getTypeAllocSize(AI->getAllocatedType())) {
> // If this is a store of the entire alloca from an integer,
> rewrite it.
> RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
> }
> @@ -2013,7 +2013,7 @@ uint64_t SROA::FindElementAndOffset(Type
> Type *&IdxTy) {
> uint64_t Idx = 0;
> if (StructType *ST = dyn_cast<StructType>(T)) {
> - const StructLayout *Layout = TD->getStructLayout(ST);
> + const StructLayout *Layout = DL->getStructLayout(ST);
> Idx = Layout->getElementContainingOffset(Offset);
> T = ST->getContainedType(Idx);
> Offset -= Layout->getElementOffset(Idx);
> @@ -2021,7 +2021,7 @@ uint64_t SROA::FindElementAndOffset(Type
> return Idx;
> } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
> T = AT->getElementType();
> - uint64_t EltSize = TD->getTypeAllocSize(T);
> + uint64_t EltSize = DL->getTypeAllocSize(T);
> Idx = Offset / EltSize;
> Offset -= Idx * EltSize;
> IdxTy = Type::getInt64Ty(T->getContext());
> @@ -2029,7 +2029,7 @@ uint64_t SROA::FindElementAndOffset(Type
> }
> VectorType *VT = cast<VectorType>(T);
> T = VT->getElementType();
> - uint64_t EltSize = TD->getTypeAllocSize(T);
> + uint64_t EltSize = DL->getTypeAllocSize(T);
> Idx = Offset / EltSize;
> Offset -= Idx * EltSize;
> IdxTy = Type::getInt64Ty(T->getContext());
> @@ -2050,7 +2050,7 @@ void SROA::RewriteGEP(GetElementPtrInst
> Value* NonConstantIdx = 0;
> if (!GEPI->hasAllConstantIndices())
> NonConstantIdx = Indices.pop_back_val();
> - Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
> + Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
>
> RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
>
> @@ -2121,7 +2121,7 @@ void SROA::RewriteLifetimeIntrinsic(Intr
> V = Builder.CreateGEP(V, Builder.getInt64(NewOffset));
>
> IdxTy = NewElts[Idx]->getAllocatedType();
> - uint64_t EltSize = TD->getTypeAllocSize(IdxTy) - NewOffset;
> + uint64_t EltSize = DL->getTypeAllocSize(IdxTy) - NewOffset;
> if (EltSize > Size) {
> EltSize = Size;
> Size = 0;
> @@ -2137,7 +2137,7 @@ void SROA::RewriteLifetimeIntrinsic(Intr
>
> for (; Idx != NewElts.size() && Size; ++Idx) {
> IdxTy = NewElts[Idx]->getAllocatedType();
> - uint64_t EltSize = TD->getTypeAllocSize(IdxTy);
> + uint64_t EltSize = DL->getTypeAllocSize(IdxTy);
> if (EltSize > Size) {
> EltSize = Size;
> Size = 0;
> @@ -2229,10 +2229,10 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIn
> PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
> Type *OtherTy = OtherPtrTy->getElementType();
> if (StructType *ST = dyn_cast<StructType>(OtherTy)) {
> - EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
> + EltOffset = DL->getStructLayout(ST)->getElementOffset(i);
> } else {
> Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
> - EltOffset = TD->getTypeAllocSize(EltTy)*i;
> + EltOffset = DL->getTypeAllocSize(EltTy)*i;
> }
>
> // The alignment of the other pointer is the guaranteed alignment
> of the
> @@ -2273,7 +2273,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIn
> Type *ValTy = EltTy->getScalarType();
>
> // Construct an integer with the right value.
> - unsigned EltSize = TD->getTypeSizeInBits(ValTy);
> + unsigned EltSize = DL->getTypeSizeInBits(ValTy);
> APInt OneVal(EltSize, CI->getZExtValue());
> APInt TotalVal(OneVal);
> // Set each byte.
> @@ -2303,7 +2303,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIn
> // this element.
> }
>
> - unsigned EltSize = TD->getTypeAllocSize(EltTy);
> + unsigned EltSize = DL->getTypeAllocSize(EltTy);
> if (!EltSize)
> continue;
>
> @@ -2337,12 +2337,12 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor
> // and store the element value to the individual alloca.
> Value *SrcVal = SI->getOperand(0);
> Type *AllocaEltTy = AI->getAllocatedType();
> - uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
> + uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
>
> IRBuilder<> Builder(SI);
>
> // Handle tail padding by extending the operand
> - if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
> + if (DL->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
> SrcVal = Builder.CreateZExt(SrcVal,
> IntegerType::get(SI->getContext(),
> AllocaSizeBits));
>
> @@ -2352,15 +2352,15 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor
> // There are two forms here: AI could be an array or struct. Both cases
> // have different ways to compute the element offset.
> if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
> - const StructLayout *Layout = TD->getStructLayout(EltSTy);
> + const StructLayout *Layout = DL->getStructLayout(EltSTy);
>
> for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
> // Get the number of bits to shift SrcVal to get the value.
> Type *FieldTy = EltSTy->getElementType(i);
> uint64_t Shift = Layout->getElementOffsetInBits(i);
>
> - if (TD->isBigEndian())
> - Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
> + if (DL->isBigEndian())
> + Shift = AllocaSizeBits-Shift-DL->getTypeAllocSizeInBits(FieldTy);
>
> Value *EltVal = SrcVal;
> if (Shift) {
> @@ -2369,7 +2369,7 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor
> }
>
> // Truncate down to an integer of the right size.
> - uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
> + uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
>
> // Ignore zero sized fields like {}, they obviously contain no data.
> if (FieldSizeBits == 0) continue;
> @@ -2394,12 +2394,12 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor
> } else {
> ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
> Type *ArrayEltTy = ATy->getElementType();
> - uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
> - uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
> + uint64_t ElementOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
> + uint64_t ElementSizeBits = DL->getTypeSizeInBits(ArrayEltTy);
>
> uint64_t Shift;
>
> - if (TD->isBigEndian())
> + if (DL->isBigEndian())
> Shift = AllocaSizeBits-ElementOffset;
> else
> Shift = 0;
> @@ -2433,7 +2433,7 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor
> }
> new StoreInst(EltVal, DestField, SI);
>
> - if (TD->isBigEndian())
> + if (DL->isBigEndian())
> Shift -= ElementOffset;
> else
> Shift += ElementOffset;
> @@ -2451,7 +2451,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI
> // Extract each element out of the NewElts according to its structure
> offset
> // and form the result value.
> Type *AllocaEltTy = AI->getAllocatedType();
> - uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
> + uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
>
> DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
> << '\n');
> @@ -2461,10 +2461,10 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI
> const StructLayout *Layout = 0;
> uint64_t ArrayEltBitOffset = 0;
> if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
> - Layout = TD->getStructLayout(EltSTy);
> + Layout = DL->getStructLayout(EltSTy);
> } else {
> Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
> - ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
> + ArrayEltBitOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
> }
>
> Value *ResultVal =
> @@ -2476,7 +2476,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI
> Value *SrcField = NewElts[i];
> Type *FieldTy =
> cast<PointerType>(SrcField->getType())->getElementType();
> - uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
> + uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
>
> // Ignore zero sized fields like {}, they obviously contain no data.
> if (FieldSizeBits == 0) continue;
> @@ -2507,7 +2507,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI
> else // Array case.
> Shift = i*ArrayEltBitOffset;
>
> - if (TD->isBigEndian())
> + if (DL->isBigEndian())
> Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
>
> if (Shift) {
> @@ -2524,7 +2524,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI
> }
>
> // Handle tail padding by truncating the result
> - if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
> + if (DL->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
> ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
>
> LI->replaceAllUsesWith(ResultVal);
> @@ -2534,15 +2534,15 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI
> /// HasPadding - Return true if the specified type has any structure or
> /// alignment padding in between the elements that would be split apart
> /// by SROA; return false otherwise.
> -static bool HasPadding(Type *Ty, const DataLayout &TD) {
> +static bool HasPadding(Type *Ty, const DataLayout &DL) {
> if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
> Ty = ATy->getElementType();
> - return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
> + return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty);
> }
>
> // SROA currently handles only Arrays and Structs.
> StructType *STy = cast<StructType>(Ty);
> - const StructLayout *SL = TD.getStructLayout(STy);
> + const StructLayout *SL = DL.getStructLayout(STy);
> unsigned PrevFieldBitOffset = 0;
> for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
> unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
> @@ -2551,7 +2551,7 @@ static bool HasPadding(Type *Ty, const D
> // previous one.
> if (i) {
> unsigned PrevFieldEnd =
> - PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
> + PrevFieldBitOffset+DL.getTypeSizeInBits(STy->getElementType(i-1));
> if (PrevFieldEnd < FieldBitOffset)
> return true;
> }
> @@ -2560,7 +2560,7 @@ static bool HasPadding(Type *Ty, const D
> // Check for tail padding.
> if (unsigned EltCount = STy->getNumElements()) {
> unsigned PrevFieldEnd = PrevFieldBitOffset +
> - TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
> + DL.getTypeSizeInBits(STy->getElementType(EltCount-1));
> if (PrevFieldEnd < SL->getSizeInBits())
> return true;
> }
> @@ -2587,7 +2587,7 @@ bool SROA::isSafeAllocaToScalarRepl(Allo
> // types, but may actually be used. In these cases, we refuse to
> promote the
> // struct.
> if (Info.isMemCpySrc && Info.isMemCpyDst &&
> - HasPadding(AI->getAllocatedType(), *TD))
> + HasPadding(AI->getAllocatedType(), *DL))
> return false;
>
> // If the alloca never has an access to just *part* of it, but is
> accessed
>
> Modified: llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp (original)
> +++ llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp Thu Feb 20 18:06:31
> 2014
> @@ -205,17 +205,17 @@ namespace {
> bool ModuleLevelChanges;
> const char *NameSuffix;
> ClonedCodeInfo *CodeInfo;
> - const DataLayout *TD;
> + const DataLayout *DL;
> public:
> PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
> ValueToValueMapTy &valueMap,
> bool moduleLevelChanges,
> const char *nameSuffix,
> ClonedCodeInfo *codeInfo,
> - const DataLayout *td)
> + const DataLayout *DL)
> : NewFunc(newFunc), OldFunc(oldFunc),
> VMap(valueMap), ModuleLevelChanges(moduleLevelChanges),
> - NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
> + NameSuffix(nameSuffix), CodeInfo(codeInfo), DL(DL) {
> }
>
> /// CloneBlock - The specified block is found to be reachable, clone
> it and
> @@ -272,7 +272,7 @@ void PruningFunctionCloner::CloneBlock(c
> // If we can simplify this instruction to some other value, simply
> add
> // a mapping to that value rather than inserting a new instruction
> into
> // the basic block.
> - if (Value *V = SimplifyInstruction(NewInst, TD)) {
> + if (Value *V = SimplifyInstruction(NewInst, DL)) {
> // On the off-chance that this simplifies to an instruction in
> the old
> // function, map it back into the new function.
> if (Value *MappedV = VMap.lookup(V))
> @@ -368,7 +368,7 @@ void llvm::CloneAndPruneFunctionInto(Fun
> SmallVectorImpl<ReturnInst*>
> &Returns,
> const char *NameSuffix,
> ClonedCodeInfo *CodeInfo,
> - const DataLayout *TD,
> + const DataLayout *DL,
> Instruction *TheCall) {
> assert(NameSuffix && "NameSuffix cannot be null!");
>
> @@ -379,7 +379,7 @@ void llvm::CloneAndPruneFunctionInto(Fun
> #endif
>
> PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,
> - NameSuffix, CodeInfo, TD);
> + NameSuffix, CodeInfo, DL);
>
> // Clone the entry block, and anything recursively reachable from it.
> std::vector<const BasicBlock*> CloneWorklist;
> @@ -509,7 +509,7 @@ void llvm::CloneAndPruneFunctionInto(Fun
> // node).
> for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx)
> if (PHINode *PN = dyn_cast<PHINode>(VMap[PHIToResolve[Idx]]))
> - recursivelySimplifyInstruction(PN, TD);
> + recursivelySimplifyInstruction(PN, DL);
>
> // Now that the inlined function body has been fully constructed, go
> through
> // and zap unconditional fall-through branches. This happen all the
> time when
>
> Modified: llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp (original)
> +++ llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp Thu Feb 20 18:06:31
> 2014
> @@ -90,7 +90,7 @@ namespace {
>
> class SimplifyCFGOpt {
> const TargetTransformInfo &TTI;
> - const DataLayout *const TD;
> + const DataLayout *const DL;
> Value *isValueEqualityComparison(TerminatorInst *TI);
> BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,
> std::vector<ValueEqualityComparisonCase>
> &Cases);
> @@ -109,8 +109,8 @@ class SimplifyCFGOpt {
> bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder);
>
> public:
> - SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *TD)
> - : TTI(TTI), TD(TD) {}
> + SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *DL)
> + : TTI(TTI), DL(DL) {}
> bool run(BasicBlock *BB);
> };
> }
> @@ -306,15 +306,15 @@ static bool DominatesMergePoint(Value *V
>
> /// GetConstantInt - Extract ConstantInt from value, looking through
> IntToPtr
> /// and PointerNullValue. Return NULL if value is not a constant int.
> -static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
> +static ConstantInt *GetConstantInt(Value *V, const DataLayout *DL) {
> // Normal constant int.
> ConstantInt *CI = dyn_cast<ConstantInt>(V);
> - if (CI || !TD || !isa<Constant>(V) || !V->getType()->isPointerTy())
> + if (CI || !DL || !isa<Constant>(V) || !V->getType()->isPointerTy())
> return CI;
>
> // This is some kind of pointer constant. Turn it into a pointer-sized
> // ConstantInt if possible.
> - IntegerType *PtrTy = cast<IntegerType>(TD->getIntPtrType(V->getType()));
> + IntegerType *PtrTy = cast<IntegerType>(DL->getIntPtrType(V->getType()));
>
> // Null pointer means 0, see SelectionDAGBuilder::getValue(const
> Value*).
> if (isa<ConstantPointerNull>(V))
> @@ -340,13 +340,13 @@ static ConstantInt *GetConstantInt(Value
> /// Values vector.
> static Value *
> GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value
> *&Extra,
> - const DataLayout *TD, bool isEQ, unsigned
> &UsedICmps) {
> + const DataLayout *DL, bool isEQ, unsigned
> &UsedICmps) {
> Instruction *I = dyn_cast<Instruction>(V);
> if (I == 0) return 0;
>
> // If this is an icmp against a constant, handle this as one of the
> cases.
> if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
> - if (ConstantInt *C = GetConstantInt(I->getOperand(1), TD)) {
> + if (ConstantInt *C = GetConstantInt(I->getOperand(1), DL)) {
> Value *RHSVal;
> ConstantInt *RHSC;
>
> @@ -405,11 +405,11 @@ GatherConstantCompares(Value *V, std::ve
>
> unsigned NumValsBeforeLHS = Vals.size();
> unsigned UsedICmpsBeforeLHS = UsedICmps;
> - if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra,
> TD,
> + if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra,
> DL,
> isEQ, UsedICmps)) {
> unsigned NumVals = Vals.size();
> unsigned UsedICmpsBeforeRHS = UsedICmps;
> - if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals,
> Extra, TD,
> + if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals,
> Extra, DL,
> isEQ, UsedICmps)) {
> if (LHS == RHS)
> return LHS;
> @@ -434,7 +434,7 @@ GatherConstantCompares(Value *V, std::ve
> if (Extra == 0 || Extra == I->getOperand(0)) {
> Value *OldExtra = Extra;
> Extra = I->getOperand(0);
> - if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals,
> Extra, TD,
> + if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals,
> Extra, DL,
> isEQ, UsedICmps))
> return RHS;
> assert(Vals.size() == NumValsBeforeLHS);
> @@ -472,14 +472,14 @@ Value *SimplifyCFGOpt::isValueEqualityCo
> } else if (BranchInst *BI = dyn_cast<BranchInst>(TI))
> if (BI->isConditional() && BI->getCondition()->hasOneUse())
> if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
> - if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), TD))
> + if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), DL))
> CV = ICI->getOperand(0);
>
> // Unwrap any lossless ptrtoint cast.
> - if (TD && CV) {
> + if (DL && CV) {
> if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV)) {
> Value *Ptr = PTII->getPointerOperand();
> - if (PTII->getType() == TD->getIntPtrType(Ptr->getType()))
> + if (PTII->getType() == DL->getIntPtrType(Ptr->getType()))
> CV = Ptr;
> }
> }
> @@ -504,7 +504,7 @@ GetValueEqualityComparisonCases(Terminat
> ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
> BasicBlock *Succ = BI->getSuccessor(ICI->getPredicate() ==
> ICmpInst::ICMP_NE);
>
> Cases.push_back(ValueEqualityComparisonCase(GetConstantInt(ICI->getOperand(1),
> - TD),
> + DL),
> Succ));
> return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ);
> }
> @@ -930,8 +930,8 @@ bool SimplifyCFGOpt::FoldValueComparison
> Builder.SetInsertPoint(PTI);
> // Convert pointer to int before we switch.
> if (CV->getType()->isPointerTy()) {
> - assert(TD && "Cannot switch on pointer without DataLayout");
> - CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()),
> + assert(DL && "Cannot switch on pointer without DataLayout");
> + CV = Builder.CreatePtrToInt(CV, DL->getIntPtrType(CV->getType()),
> "magicptr");
> }
>
> @@ -1606,7 +1606,7 @@ static bool BlockIsSimpleEnoughToThreadT
> /// that is defined in the same block as the branch and if any PHI
> entries are
> /// constants, thread edges corresponding to that entry to be branches to
> their
> /// ultimate destination.
> -static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
> +static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) {
> BasicBlock *BB = BI->getParent();
> PHINode *PN = dyn_cast<PHINode>(BI->getCondition());
> // NOTE: we currently cannot transform this case if the PHI node is used
> @@ -1675,7 +1675,7 @@ static bool FoldCondBranchOnPHI(BranchIn
> }
>
> // Check for trivial simplification.
> - if (Value *V = SimplifyInstruction(N, TD)) {
> + if (Value *V = SimplifyInstruction(N, DL)) {
> TranslateMap[BBI] = V;
> delete N; // Instruction folded away, don't need actual inst
> } else {
> @@ -1696,7 +1696,7 @@ static bool FoldCondBranchOnPHI(BranchIn
> }
>
> // Recurse, simplifying any other constants.
> - return FoldCondBranchOnPHI(BI, TD) | true;
> + return FoldCondBranchOnPHI(BI, DL) | true;
> }
>
> return false;
> @@ -1704,7 +1704,7 @@ static bool FoldCondBranchOnPHI(BranchIn
>
> /// FoldTwoEntryPHINode - Given a BB that starts with the specified
> two-entry
> /// PHI node, see if we can eliminate it.
> -static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
> +static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) {
> // Ok, this is a two entry PHI node. Check to see if this is a simple
> "if
> // statement", which has a very simple dominance structure. Basically,
> we
> // are trying to find the condition that is being branched on, which
> @@ -1738,7 +1738,7 @@ static bool FoldTwoEntryPHINode(PHINode
>
> for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {
> PHINode *PN = cast<PHINode>(II++);
> - if (Value *V = SimplifyInstruction(PN, TD)) {
> + if (Value *V = SimplifyInstruction(PN, DL)) {
> PN->replaceAllUsesWith(V);
> PN->eraseFromParent();
> continue;
> @@ -2634,7 +2634,7 @@ static bool SimplifyIndirectBrOnSelect(I
> /// the PHI, merging the third icmp into the switch.
> static bool TryToSimplifyUncondBranchWithICmpInIt(
> ICmpInst *ICI, IRBuilder<> &Builder, const TargetTransformInfo &TTI,
> - const DataLayout *TD) {
> + const DataLayout *DL) {
> BasicBlock *BB = ICI->getParent();
>
> // If the block has any PHIs in it or the icmp has multiple uses, it is
> too
> @@ -2662,12 +2662,12 @@ static bool TryToSimplifyUncondBranchWit
> assert(VVal && "Should have a unique destination value");
> ICI->setOperand(0, VVal);
>
> - if (Value *V = SimplifyInstruction(ICI, TD)) {
> + if (Value *V = SimplifyInstruction(ICI, DL)) {
> ICI->replaceAllUsesWith(V);
> ICI->eraseFromParent();
> }
> // BB is now empty, so it is likely to simplify away.
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> }
>
> // Ok, the block is reachable from the default dest. If the constant
> we're
> @@ -2683,7 +2683,7 @@ static bool TryToSimplifyUncondBranchWit
> ICI->replaceAllUsesWith(V);
> ICI->eraseFromParent();
> // BB is now empty, so it is likely to simplify away.
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> }
>
> // The use of the icmp has to be in the 'end' block, by the only PHI
> node in
> @@ -2739,7 +2739,7 @@ static bool TryToSimplifyUncondBranchWit
> /// SimplifyBranchOnICmpChain - The specified branch is a conditional
> branch.
> /// Check to see if it is branching on an or/and chain of icmp
> instructions, and
> /// fold it into a switch instruction if so.
> -static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout
> *TD,
> +static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout
> *DL,
> IRBuilder<> &Builder) {
> Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
> if (Cond == 0) return false;
> @@ -2755,10 +2755,10 @@ static bool SimplifyBranchOnICmpChain(Br
> unsigned UsedICmps = 0;
>
> if (Cond->getOpcode() == Instruction::Or) {
> - CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, true,
> + CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, true,
> UsedICmps);
> } else if (Cond->getOpcode() == Instruction::And) {
> - CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, false,
> + CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, false,
> UsedICmps);
> TrueWhenEqual = false;
> }
> @@ -2820,9 +2820,9 @@ static bool SimplifyBranchOnICmpChain(Br
> Builder.SetInsertPoint(BI);
> // Convert pointer to int before we switch.
> if (CompVal->getType()->isPointerTy()) {
> - assert(TD && "Cannot switch on pointer without DataLayout");
> + assert(DL && "Cannot switch on pointer without DataLayout");
> CompVal = Builder.CreatePtrToInt(CompVal,
> -
> TD->getIntPtrType(CompVal->getType()),
> +
> DL->getIntPtrType(CompVal->getType()),
> "magicptr");
> }
>
> @@ -3453,7 +3453,7 @@ namespace {
> ConstantInt *Offset,
> const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >&
> Values,
> Constant *DefaultValue,
> - const DataLayout *TD);
> + const DataLayout *DL);
>
> /// BuildLookup - Build instructions with Builder to retrieve the
> value at
> /// the position given by Index in the lookup table.
> @@ -3461,7 +3461,7 @@ namespace {
>
> /// WouldFitInRegister - Return true if a table with TableSize
> elements of
> /// type ElementType would fit in a target-legal register.
> - static bool WouldFitInRegister(const DataLayout *TD,
> + static bool WouldFitInRegister(const DataLayout *DL,
> uint64_t TableSize,
> const Type *ElementType);
>
> @@ -3500,7 +3500,7 @@ SwitchLookupTable::SwitchLookupTable(Mod
> ConstantInt *Offset,
> const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >&
> Values,
> Constant *DefaultValue,
> - const DataLayout *TD)
> + const DataLayout *DL)
> : SingleValue(0), BitMap(0), BitMapElementTy(0), Array(0) {
> assert(Values.size() && "Can't build lookup table without values!");
> assert(TableSize >= Values.size() && "Can't fit values in table!");
> @@ -3546,7 +3546,7 @@ SwitchLookupTable::SwitchLookupTable(Mod
> }
>
> // If the type is integer and the table fits in a register, build a
> bitmap.
> - if (WouldFitInRegister(TD, TableSize, ValueType)) {
> + if (WouldFitInRegister(DL, TableSize, ValueType)) {
> IntegerType *IT = cast<IntegerType>(ValueType);
> APInt TableInt(TableSize * IT->getBitWidth(), 0);
> for (uint64_t I = TableSize; I > 0; --I) {
> @@ -3611,10 +3611,10 @@ Value *SwitchLookupTable::BuildLookup(Va
> llvm_unreachable("Unknown lookup table kind!");
> }
>
> -bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
> +bool SwitchLookupTable::WouldFitInRegister(const DataLayout *DL,
> uint64_t TableSize,
> const Type *ElementType) {
> - if (!TD)
> + if (!DL)
> return false;
> const IntegerType *IT = dyn_cast<IntegerType>(ElementType);
> if (!IT)
> @@ -3625,7 +3625,7 @@ bool SwitchLookupTable::WouldFitInRegist
> // Avoid overflow, fitsInLegalInteger uses unsigned int for the width.
> if (TableSize >= UINT_MAX/IT->getBitWidth())
> return false;
> - return TD->fitsInLegalInteger(TableSize * IT->getBitWidth());
> + return DL->fitsInLegalInteger(TableSize * IT->getBitWidth());
> }
>
> /// ShouldBuildLookupTable - Determine whether a lookup table should be
> built
> @@ -3634,7 +3634,7 @@ bool SwitchLookupTable::WouldFitInRegist
> static bool ShouldBuildLookupTable(SwitchInst *SI,
> uint64_t TableSize,
> const TargetTransformInfo &TTI,
> - const DataLayout *TD,
> + const DataLayout *DL,
> const SmallDenseMap<PHINode*, Type*>&
> ResultTypes) {
> if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10)
> return false; // TableSize overflowed, or mul below might overflow.
> @@ -3650,7 +3650,7 @@ static bool ShouldBuildLookupTable(Switc
>
> // Saturate this flag to false.
> AllTablesFitInRegister = AllTablesFitInRegister &&
> - SwitchLookupTable::WouldFitInRegister(TD, TableSize, Ty);
> + SwitchLookupTable::WouldFitInRegister(DL, TableSize, Ty);
>
> // If both flags saturate, we're done. NOTE: This *only* works with
> // saturating flags, and all flags have to saturate first due to the
> @@ -3679,7 +3679,7 @@ static bool ShouldBuildLookupTable(Switc
> static bool SwitchToLookupTable(SwitchInst *SI,
> IRBuilder<> &Builder,
> const TargetTransformInfo &TTI,
> - const DataLayout* TD) {
> + const DataLayout* DL) {
> assert(SI->getNumCases() > 1 && "Degenerate switch?");
>
> // Only build lookup table when we have a target that supports it.
> @@ -3723,7 +3723,7 @@ static bool SwitchToLookupTable(SwitchIn
> typedef SmallVector<std::pair<PHINode*, Constant*>, 4> ResultsTy;
> ResultsTy Results;
> if (!GetCaseResults(SI, CaseVal, CI.getCaseSuccessor(), &CommonDest,
> - Results, TD))
> + Results, DL))
> return false;
>
> // Append the result from this case to the list for each phi.
> @@ -3748,7 +3748,7 @@ static bool SwitchToLookupTable(SwitchIn
> // If the table has holes, we need a constant result for the default
> case.
> SmallVector<std::pair<PHINode*, Constant*>, 4> DefaultResultsList;
> if (TableHasHoles && !GetCaseResults(SI, 0, SI->getDefaultDest(),
> &CommonDest,
> - DefaultResultsList, TD))
> + DefaultResultsList, DL))
> return false;
>
> for (size_t I = 0, E = DefaultResultsList.size(); I != E; ++I) {
> @@ -3757,7 +3757,7 @@ static bool SwitchToLookupTable(SwitchIn
> DefaultResults[PHI] = Result;
> }
>
> - if (!ShouldBuildLookupTable(SI, TableSize, TTI, TD, ResultTypes))
> + if (!ShouldBuildLookupTable(SI, TableSize, TTI, DL, ResultTypes))
> return false;
>
> // Create the BB that does the lookups.
> @@ -3801,7 +3801,7 @@ static bool SwitchToLookupTable(SwitchIn
> PHINode *PHI = PHIs[I];
>
> SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultLists[PHI],
> - DefaultResults[PHI], TD);
> + DefaultResults[PHI], DL);
>
> Value *Result = Table.BuildLookup(TableIndex, Builder);
>
> @@ -3842,12 +3842,12 @@ bool SimplifyCFGOpt::SimplifySwitch(Swit
> // see if that predecessor totally determines the outcome of this
> switch.
> if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
> if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred,
> Builder))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> Value *Cond = SI->getCondition();
> if (SelectInst *Select = dyn_cast<SelectInst>(Cond))
> if (SimplifySwitchOnSelect(SI, Select))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> // If the block only contains the switch, see if we can fold the block
> // away into any preds.
> @@ -3857,22 +3857,22 @@ bool SimplifyCFGOpt::SimplifySwitch(Swit
> ++BBI;
> if (SI == &*BBI)
> if (FoldValueComparisonIntoPredecessors(SI, Builder))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> }
>
> // Try to transform the switch into an icmp and a branch.
> if (TurnSwitchRangeIntoICmp(SI, Builder))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> // Remove unreachable cases.
> if (EliminateDeadSwitchCases(SI))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> if (ForwardSwitchConditionToPHI(SI))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> - if (SwitchToLookupTable(SI, Builder, TTI, TD))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + if (SwitchToLookupTable(SI, Builder, TTI, DL))
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> return false;
> }
> @@ -3909,7 +3909,7 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(
>
> if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) {
> if (SimplifyIndirectBrOnSelect(IBI, SI))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> }
> return Changed;
> }
> @@ -3933,7 +3933,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranc
> for (++I; isa<DbgInfoIntrinsic>(I); ++I)
> ;
> if (I->isTerminator() &&
> - TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, TD))
> + TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, DL))
> return true;
> }
>
> @@ -3942,7 +3942,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranc
> // predecessor and use logical operations to update the incoming value
> // for PHI nodes in common successor.
> if (FoldBranchToCommonDest(BI))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> return false;
> }
>
> @@ -3957,7 +3957,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(
> // switch.
> if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
> if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred,
> Builder))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> // This block must be empty, except for the setcond inst, if it
> exists.
> // Ignore dbg intrinsics.
> @@ -3967,26 +3967,26 @@ bool SimplifyCFGOpt::SimplifyCondBranch(
> ++I;
> if (&*I == BI) {
> if (FoldValueComparisonIntoPredecessors(BI, Builder))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> } else if (&*I == cast<Instruction>(BI->getCondition())){
> ++I;
> // Ignore dbg intrinsics.
> while (isa<DbgInfoIntrinsic>(I))
> ++I;
> if (&*I == BI && FoldValueComparisonIntoPredecessors(BI, Builder))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> }
> }
>
> // Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction.
> - if (SimplifyBranchOnICmpChain(BI, TD, Builder))
> + if (SimplifyBranchOnICmpChain(BI, DL, Builder))
> return true;
>
> // If this basic block is ONLY a compare and a branch, and if a
> predecessor
> // branches to us and one of our successors, fold the comparison into
> the
> // predecessor and use logical operations to pick the right destination.
> if (FoldBranchToCommonDest(BI))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> // We have a conditional branch to two blocks that are only reachable
> // from BI. We know that the condbr dominates the two blocks, so see if
> @@ -3995,7 +3995,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(
> if (BI->getSuccessor(0)->getSinglePredecessor() != 0) {
> if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
> if (HoistThenElseCodeToIf(BI))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> } else {
> // If Successor #1 has multiple preds, we may be able to
> conditionally
> // execute Successor #0 if it branches to successor #1.
> @@ -4003,7 +4003,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(
> if (Succ0TI->getNumSuccessors() == 1 &&
> Succ0TI->getSuccessor(0) == BI->getSuccessor(1))
> if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0)))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> }
> } else if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
> // If Successor #0 has multiple preds, we may be able to conditionally
> @@ -4012,22 +4012,22 @@ bool SimplifyCFGOpt::SimplifyCondBranch(
> if (Succ1TI->getNumSuccessors() == 1 &&
> Succ1TI->getSuccessor(0) == BI->getSuccessor(0))
> if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1)))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
> }
>
> // If this is a branch on a phi node in the current block, thread
> control
> // through this block if any PHI node entries are constants.
> if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition()))
> if (PN->getParent() == BI->getParent())
> - if (FoldCondBranchOnPHI(BI, TD))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + if (FoldCondBranchOnPHI(BI, DL))
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> // Scan predecessor blocks for conditional branches.
> for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
> if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
> if (PBI != BI && PBI->isConditional())
> if (SimplifyCondBranchToCondBranch(PBI, BI))
> - return SimplifyCFG(BB, TTI, TD) | true;
> + return SimplifyCFG(BB, TTI, DL) | true;
>
> return false;
> }
> @@ -4139,7 +4139,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB)
> // eliminate it, do so now.
> if (PHINode *PN = dyn_cast<PHINode>(BB->begin()))
> if (PN->getNumIncomingValues() == 2)
> - Changed |= FoldTwoEntryPHINode(PN, TD);
> + Changed |= FoldTwoEntryPHINode(PN, DL);
>
> Builder.SetInsertPoint(BB->getTerminator());
> if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
> @@ -4171,6 +4171,6 @@ bool SimplifyCFGOpt::run(BasicBlock *BB)
> /// of the CFG. It returns true if a modification was made.
> ///
> bool llvm::SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
> - const DataLayout *TD) {
> - return SimplifyCFGOpt(TTI, TD).run(BB);
> + const DataLayout *DL) {
> + return SimplifyCFGOpt(TTI, DL).run(BB);
> }
>
> Modified: llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp (original)
> +++ llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp Thu Feb 20 18:06:31
> 2014
> @@ -48,7 +48,7 @@ namespace {
> Loop *L;
> LoopInfo *LI;
> ScalarEvolution *SE;
> - const DataLayout *TD; // May be NULL
> + const DataLayout *DL; // May be NULL
>
> SmallVectorImpl<WeakVH> &DeadInsts;
>
> @@ -60,7 +60,7 @@ namespace {
> L(Loop),
> LI(LPM->getAnalysisIfAvailable<LoopInfo>()),
> SE(SE),
> - TD(LPM->getAnalysisIfAvailable<DataLayout>()),
> + DL(LPM->getAnalysisIfAvailable<DataLayout>()),
> DeadInsts(Dead),
> Changed(false) {
> assert(LI && "IV simplification requires LoopInfo");
>
> Modified: llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp (original)
> +++ llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp Thu Feb 20
> 18:06:31 2014
> @@ -43,7 +43,7 @@ namespace {
> class LibCallOptimization {
> protected:
> Function *Caller;
> - const DataLayout *TD;
> + const DataLayout *DL;
> const TargetLibraryInfo *TLI;
> const LibCallSimplifier *LCS;
> LLVMContext* Context;
> @@ -63,11 +63,11 @@ public:
> /// change the calling convention.
> virtual bool ignoreCallingConv() { return false; }
>
> - Value *optimizeCall(CallInst *CI, const DataLayout *TD,
> + Value *optimizeCall(CallInst *CI, const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> const LibCallSimplifier *LCS, IRBuilder<> &B) {
> Caller = CI->getParent()->getParent();
> - this->TD = TD;
> + this->DL = DL;
> this->TLI = TLI;
> this->LCS = LCS;
> if (CI->getCalledFunction())
> @@ -184,8 +184,8 @@ struct MemCpyChkOpt : public InstFortifi
> if (FT->getNumParams() != 4 || FT->getReturnType() !=
> FT->getParamType(0) ||
> !FT->getParamType(0)->isPointerTy() ||
> !FT->getParamType(1)->isPointerTy() ||
> - FT->getParamType(2) != TD->getIntPtrType(Context) ||
> - FT->getParamType(3) != TD->getIntPtrType(Context))
> + FT->getParamType(2) != DL->getIntPtrType(Context) ||
> + FT->getParamType(3) != DL->getIntPtrType(Context))
> return 0;
>
> if (isFoldable(3, 2, false)) {
> @@ -207,8 +207,8 @@ struct MemMoveChkOpt : public InstFortif
> if (FT->getNumParams() != 4 || FT->getReturnType() !=
> FT->getParamType(0) ||
> !FT->getParamType(0)->isPointerTy() ||
> !FT->getParamType(1)->isPointerTy() ||
> - FT->getParamType(2) != TD->getIntPtrType(Context) ||
> - FT->getParamType(3) != TD->getIntPtrType(Context))
> + FT->getParamType(2) != DL->getIntPtrType(Context) ||
> + FT->getParamType(3) != DL->getIntPtrType(Context))
> return 0;
>
> if (isFoldable(3, 2, false)) {
> @@ -230,8 +230,8 @@ struct MemSetChkOpt : public InstFortifi
> if (FT->getNumParams() != 4 || FT->getReturnType() !=
> FT->getParamType(0) ||
> !FT->getParamType(0)->isPointerTy() ||
> !FT->getParamType(1)->isIntegerTy() ||
> - FT->getParamType(2) != TD->getIntPtrType(Context) ||
> - FT->getParamType(3) != TD->getIntPtrType(Context))
> + FT->getParamType(2) != DL->getIntPtrType(Context) ||
> + FT->getParamType(3) != DL->getIntPtrType(Context))
> return 0;
>
> if (isFoldable(3, 2, false)) {
> @@ -256,7 +256,7 @@ struct StrCpyChkOpt : public InstFortifi
> FT->getReturnType() != FT->getParamType(0) ||
> FT->getParamType(0) != FT->getParamType(1) ||
> FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
> - FT->getParamType(2) != TD->getIntPtrType(Context))
> + FT->getParamType(2) != DL->getIntPtrType(Context))
> return 0;
>
> Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
> @@ -269,7 +269,7 @@ struct StrCpyChkOpt : public InstFortifi
> // TODO: It might be nice to get a maximum length out of the possible
> // string lengths for varying.
> if (isFoldable(2, 1, true)) {
> - Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));
> + Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6));
> return Ret;
> } else {
> // Maybe we can stil fold __strcpy_chk to __memcpy_chk.
> @@ -277,12 +277,12 @@ struct StrCpyChkOpt : public InstFortifi
> if (Len == 0) return 0;
>
> // This optimization require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> Value *Ret =
> EmitMemCpyChk(Dst, Src,
> - ConstantInt::get(TD->getIntPtrType(Context), Len),
> - CI->getArgOperand(2), B, TD, TLI);
> + ConstantInt::get(DL->getIntPtrType(Context), Len),
> + CI->getArgOperand(2), B, DL, TLI);
> return Ret;
> }
> return 0;
> @@ -301,12 +301,12 @@ struct StpCpyChkOpt : public InstFortifi
> FT->getReturnType() != FT->getParamType(0) ||
> FT->getParamType(0) != FT->getParamType(1) ||
> FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
> - FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
> + FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))
> return 0;
>
> Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
> if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
> - Value *StrLen = EmitStrLen(Src, B, TD, TLI);
> + Value *StrLen = EmitStrLen(Src, B, DL, TLI);
> return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
> }
>
> @@ -316,7 +316,7 @@ struct StpCpyChkOpt : public InstFortifi
> // TODO: It might be nice to get a maximum length out of the possible
> // string lengths for varying.
> if (isFoldable(2, 1, true)) {
> - Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));
> + Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6));
> return Ret;
> } else {
> // Maybe we can stil fold __stpcpy_chk to __memcpy_chk.
> @@ -324,14 +324,14 @@ struct StpCpyChkOpt : public InstFortifi
> if (Len == 0) return 0;
>
> // This optimization require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> Type *PT = FT->getParamType(0);
> - Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
> + Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);
> Value *DstEnd = B.CreateGEP(Dst,
> - ConstantInt::get(TD->getIntPtrType(PT),
> + ConstantInt::get(DL->getIntPtrType(PT),
> Len - 1));
> - if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, TD,
> TLI))
> + if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, DL,
> TLI))
> return 0;
> return DstEnd;
> }
> @@ -351,12 +351,12 @@ struct StrNCpyChkOpt : public InstFortif
> FT->getParamType(0) != FT->getParamType(1) ||
> FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
> !FT->getParamType(2)->isIntegerTy() ||
> - FT->getParamType(3) != TD->getIntPtrType(Context))
> + FT->getParamType(3) != DL->getIntPtrType(Context))
> return 0;
>
> if (isFoldable(3, 2, false)) {
> Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
> - CI->getArgOperand(2), B, TD, TLI,
> + CI->getArgOperand(2), B, DL, TLI,
> Name.substr(2, 7));
> return Ret;
> }
> @@ -392,7 +392,7 @@ struct StrCatOpt : public LibCallOptimiz
> return Dst;
>
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> return emitStrLenMemCpy(Src, Dst, Len, B);
> }
> @@ -401,7 +401,7 @@ struct StrCatOpt : public LibCallOptimiz
> IRBuilder<> &B) {
> // We need to find the end of the destination string. That's where
> the
> // memory is to be moved to. We just generate a call to strlen.
> - Value *DstLen = EmitStrLen(Dst, B, TD, TLI);
> + Value *DstLen = EmitStrLen(Dst, B, DL, TLI);
> if (!DstLen)
> return 0;
>
> @@ -413,7 +413,7 @@ struct StrCatOpt : public LibCallOptimiz
> // We have enough information to now generate the memcpy call to do
> the
> // concatenation for us. Make a memcpy to copy the nul byte with
> align = 1.
> B.CreateMemCpy(CpyDst, Src,
> - ConstantInt::get(TD->getIntPtrType(*Context), Len +
> 1), 1);
> + ConstantInt::get(DL->getIntPtrType(*Context), Len +
> 1), 1);
> return Dst;
> }
> };
> @@ -451,7 +451,7 @@ struct StrNCatOpt : public StrCatOpt {
> if (SrcLen == 0 || Len == 0) return Dst;
>
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> // We don't optimize this case
> if (Len < SrcLen) return 0;
> @@ -479,23 +479,23 @@ struct StrChrOpt : public LibCallOptimiz
> ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
> if (CharC == 0) {
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> uint64_t Len = GetStringLength(SrcStr);
> if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr
> needs i32.
> return 0;
>
> return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
> - ConstantInt::get(TD->getIntPtrType(*Context),
> Len),
> - B, TD, TLI);
> + ConstantInt::get(DL->getIntPtrType(*Context),
> Len),
> + B, DL, TLI);
> }
>
> // Otherwise, the character is a constant, see if the first argument
> is
> // a string literal. If so, we can constant fold.
> StringRef Str;
> if (!getConstantStringInfo(SrcStr, Str)) {
> - if (TD && CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
> - return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, TD, TLI),
> "strchr");
> + if (DL && CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
> + return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, DL, TLI),
> "strchr");
> return 0;
> }
>
> @@ -531,8 +531,8 @@ struct StrRChrOpt : public LibCallOptimi
> StringRef Str;
> if (!getConstantStringInfo(SrcStr, Str)) {
> // strrchr(s, 0) -> strchr(s, 0)
> - if (TD && CharC->isZero())
> - return EmitStrChr(SrcStr, '\0', B, TD, TLI);
> + if (DL && CharC->isZero())
> + return EmitStrChr(SrcStr, '\0', B, DL, TLI);
> return 0;
> }
>
> @@ -581,11 +581,11 @@ struct StrCmpOpt : public LibCallOptimiz
> uint64_t Len2 = GetStringLength(Str2P);
> if (Len1 && Len2) {
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> return EmitMemCmp(Str1P, Str2P,
> - ConstantInt::get(TD->getIntPtrType(*Context),
> - std::min(Len1, Len2)), B, TD, TLI);
> + ConstantInt::get(DL->getIntPtrType(*Context),
> + std::min(Len1, Len2)), B, DL, TLI);
> }
>
> return 0;
> @@ -617,8 +617,8 @@ struct StrNCmpOpt : public LibCallOptimi
> if (Length == 0) // strncmp(x,y,0) -> 0
> return ConstantInt::get(CI->getType(), 0);
>
> - if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
> - return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI);
> + if (DL && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
> + return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, DL, TLI);
>
> StringRef Str1, Str2;
> bool HasStr1 = getConstantStringInfo(Str1P, Str1);
> @@ -657,7 +657,7 @@ struct StrCpyOpt : public LibCallOptimiz
> return Src;
>
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> // See if we can get the length of the input string.
> uint64_t Len = GetStringLength(Src);
> @@ -666,7 +666,7 @@ struct StrCpyOpt : public LibCallOptimiz
> // We have enough information to now generate the memcpy call to do
> the
> // copy for us. Make a memcpy to copy the nul byte with align = 1.
> B.CreateMemCpy(Dst, Src,
> - ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
> + ConstantInt::get(DL->getIntPtrType(*Context), Len), 1);
> return Dst;
> }
> };
> @@ -682,11 +682,11 @@ struct StpCpyOpt: public LibCallOptimiza
> return 0;
>
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
> if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
> - Value *StrLen = EmitStrLen(Src, B, TD, TLI);
> + Value *StrLen = EmitStrLen(Src, B, DL, TLI);
> return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
> }
>
> @@ -695,9 +695,9 @@ struct StpCpyOpt: public LibCallOptimiza
> if (Len == 0) return 0;
>
> Type *PT = FT->getParamType(0);
> - Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
> + Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);
> Value *DstEnd = B.CreateGEP(Dst,
> - ConstantInt::get(TD->getIntPtrType(PT),
> + ConstantInt::get(DL->getIntPtrType(PT),
> Len - 1));
>
> // We have enough information to now generate the memcpy call to do
> the
> @@ -740,7 +740,7 @@ struct StrNCpyOpt : public LibCallOptimi
> if (Len == 0) return Dst; // strncpy(x, y, 0) -> x
>
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> // Let strncpy handle the zero padding
> if (Len > SrcLen+1) return 0;
> @@ -748,7 +748,7 @@ struct StrNCpyOpt : public LibCallOptimi
> Type *PT = FT->getParamType(0);
> // strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
> B.CreateMemCpy(Dst, Src,
> - ConstantInt::get(TD->getIntPtrType(PT), Len), 1);
> + ConstantInt::get(DL->getIntPtrType(PT), Len), 1);
>
> return Dst;
> }
> @@ -805,8 +805,8 @@ struct StrPBrkOpt : public LibCallOptimi
> }
>
> // strpbrk(s, "a") -> strchr(s, 'a')
> - if (TD && HasS2 && S2.size() == 1)
> - return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI);
> + if (DL && HasS2 && S2.size() == 1)
> + return EmitStrChr(CI->getArgOperand(0), S2[0], B, DL, TLI);
>
> return 0;
> }
> @@ -885,8 +885,8 @@ struct StrCSpnOpt : public LibCallOptimi
> }
>
> // strcspn(s, "") -> strlen(s)
> - if (TD && HasS2 && S2.empty())
> - return EmitStrLen(CI->getArgOperand(0), B, TD, TLI);
> + if (DL && HasS2 && S2.empty())
> + return EmitStrLen(CI->getArgOperand(0), B, DL, TLI);
>
> return 0;
> }
> @@ -906,12 +906,12 @@ struct StrStrOpt : public LibCallOptimiz
> return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
>
> // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
> - if (TD && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
> - Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI);
> + if (DL && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
> + Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, DL, TLI);
> if (!StrLen)
> return 0;
> Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0),
> CI->getArgOperand(1),
> - StrLen, B, TD, TLI);
> + StrLen, B, DL, TLI);
> if (!StrNCmp)
> return 0;
> for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
> @@ -949,7 +949,7 @@ struct StrStrOpt : public LibCallOptimiz
>
> // fold strstr(x, "y") -> strchr(x, 'y').
> if (HasStr2 && ToFindStr.size() == 1) {
> - Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B,
> TD, TLI);
> + Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B,
> DL, TLI);
> return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0;
> }
> return 0;
> @@ -1011,13 +1011,13 @@ struct MemCmpOpt : public LibCallOptimiz
> struct MemCpyOpt : public LibCallOptimization {
> virtual Value *callOptimizer(Function *Callee, CallInst *CI,
> IRBuilder<> &B) {
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> FunctionType *FT = Callee->getFunctionType();
> if (FT->getNumParams() != 3 || FT->getReturnType() !=
> FT->getParamType(0) ||
> !FT->getParamType(0)->isPointerTy() ||
> !FT->getParamType(1)->isPointerTy() ||
> - FT->getParamType(2) != TD->getIntPtrType(*Context))
> + FT->getParamType(2) != DL->getIntPtrType(*Context))
> return 0;
>
> // memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
> @@ -1030,13 +1030,13 @@ struct MemCpyOpt : public LibCallOptimiz
> struct MemMoveOpt : public LibCallOptimization {
> virtual Value *callOptimizer(Function *Callee, CallInst *CI,
> IRBuilder<> &B) {
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> FunctionType *FT = Callee->getFunctionType();
> if (FT->getNumParams() != 3 || FT->getReturnType() !=
> FT->getParamType(0) ||
> !FT->getParamType(0)->isPointerTy() ||
> !FT->getParamType(1)->isPointerTy() ||
> - FT->getParamType(2) != TD->getIntPtrType(*Context))
> + FT->getParamType(2) != DL->getIntPtrType(*Context))
> return 0;
>
> // memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
> @@ -1049,13 +1049,13 @@ struct MemMoveOpt : public LibCallOptimi
> struct MemSetOpt : public LibCallOptimization {
> virtual Value *callOptimizer(Function *Callee, CallInst *CI,
> IRBuilder<> &B) {
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> FunctionType *FT = Callee->getFunctionType();
> if (FT->getNumParams() != 3 || FT->getReturnType() !=
> FT->getParamType(0) ||
> !FT->getParamType(0)->isPointerTy() ||
> !FT->getParamType(1)->isIntegerTy() ||
> - FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
> + FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))
> return 0;
>
> // memset(p, v, n) -> llvm.memset(p, v, n, 1)
> @@ -1632,7 +1632,7 @@ struct PrintFOpt : public LibCallOptimiz
>
> // printf("x") -> putchar('x'), even for '%'.
> if (FormatStr.size() == 1) {
> - Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, TD, TLI);
> + Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, DL, TLI);
> if (CI->use_empty() || !Res) return Res;
> return B.CreateIntCast(Res, CI->getType(), true);
> }
> @@ -1644,7 +1644,7 @@ struct PrintFOpt : public LibCallOptimiz
> // pass to be run after this pass, to merge duplicate strings.
> FormatStr = FormatStr.drop_back();
> Value *GV = B.CreateGlobalString(FormatStr, "str");
> - Value *NewCI = EmitPutS(GV, B, TD, TLI);
> + Value *NewCI = EmitPutS(GV, B, DL, TLI);
> return (CI->use_empty() || !NewCI) ?
> NewCI :
> ConstantInt::get(CI->getType(), FormatStr.size()+1);
> @@ -1654,7 +1654,7 @@ struct PrintFOpt : public LibCallOptimiz
> // printf("%c", chr) --> putchar(chr)
> if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
> CI->getArgOperand(1)->getType()->isIntegerTy()) {
> - Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD, TLI);
> + Value *Res = EmitPutChar(CI->getArgOperand(1), B, DL, TLI);
>
> if (CI->use_empty() || !Res) return Res;
> return B.CreateIntCast(Res, CI->getType(), true);
> @@ -1663,7 +1663,7 @@ struct PrintFOpt : public LibCallOptimiz
> // printf("%s\n", str) --> puts(str)
> if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
> CI->getArgOperand(1)->getType()->isPointerTy()) {
> - return EmitPutS(CI->getArgOperand(1), B, TD, TLI);
> + return EmitPutS(CI->getArgOperand(1), B, DL, TLI);
> }
> return 0;
> }
> @@ -1712,11 +1712,11 @@ struct SPrintFOpt : public LibCallOptimi
> return 0; // we found a format specifier, bail out.
>
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> // sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
> B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
> - ConstantInt::get(TD->getIntPtrType(*Context), //
> Copy the
> + ConstantInt::get(DL->getIntPtrType(*Context), //
> Copy the
> FormatStr.size() + 1), 1); // nul
> byte.
> return ConstantInt::get(CI->getType(), FormatStr.size());
> }
> @@ -1742,12 +1742,12 @@ struct SPrintFOpt : public LibCallOptimi
>
> if (FormatStr[1] == 's') {
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> // sprintf(dest, "%s", str) -> llvm.memcpy(dest, str,
> strlen(str)+1, 1)
> if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0;
>
> - Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD, TLI);
> + Value *Len = EmitStrLen(CI->getArgOperand(2), B, DL, TLI);
> if (!Len)
> return 0;
> Value *IncLen = B.CreateAdd(Len,
> @@ -1812,12 +1812,12 @@ struct FPrintFOpt : public LibCallOptimi
> return 0; // We found a format specifier.
>
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> return EmitFWrite(CI->getArgOperand(1),
> - ConstantInt::get(TD->getIntPtrType(*Context),
> + ConstantInt::get(DL->getIntPtrType(*Context),
> FormatStr.size()),
> - CI->getArgOperand(0), B, TD, TLI);
> + CI->getArgOperand(0), B, DL, TLI);
> }
>
> // The remaining optimizations require the format string to be "%s"
> or "%c"
> @@ -1830,14 +1830,14 @@ struct FPrintFOpt : public LibCallOptimi
> if (FormatStr[1] == 'c') {
> // fprintf(F, "%c", chr) --> fputc(chr, F)
> if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
> - return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD,
> TLI);
> + return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, DL,
> TLI);
> }
>
> if (FormatStr[1] == 's') {
> // fprintf(F, "%s", str) --> fputs(str, F)
> if (!CI->getArgOperand(2)->getType()->isPointerTy())
> return 0;
> - return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD,
> TLI);
> + return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, DL,
> TLI);
> }
> return 0;
> }
> @@ -1897,7 +1897,7 @@ struct FWriteOpt : public LibCallOptimiz
> // This optimisation is only valid, if the return value is unused.
> if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) ->
> fputc(S[0],F)
> Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B),
> "char");
> - Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, TD, TLI);
> + Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, DL, TLI);
> return NewCI ? ConstantInt::get(CI->getType(), 1) : 0;
> }
>
> @@ -1911,7 +1911,7 @@ struct FPutsOpt : public LibCallOptimiza
> (void) ER.callOptimizer(Callee, CI, B);
>
> // These optimizations require DataLayout.
> - if (!TD) return 0;
> + if (!DL) return 0;
>
> // Require two pointers. Also, we can't optimize if return value is
> used.
> FunctionType *FT = Callee->getFunctionType();
> @@ -1925,8 +1925,8 @@ struct FPutsOpt : public LibCallOptimiza
> if (!Len) return 0;
> // Known to have no uses (see above).
> return EmitFWrite(CI->getArgOperand(0),
> - ConstantInt::get(TD->getIntPtrType(*Context),
> Len-1),
> - CI->getArgOperand(1), B, TD, TLI);
> + ConstantInt::get(DL->getIntPtrType(*Context),
> Len-1),
> + CI->getArgOperand(1), B, DL, TLI);
> }
> };
>
> @@ -1946,7 +1946,7 @@ struct PutsOpt : public LibCallOptimizat
>
> if (Str.empty() && CI->use_empty()) {
> // puts("") -> putchar('\n')
> - Value *Res = EmitPutChar(B.getInt32('\n'), B, TD, TLI);
> + Value *Res = EmitPutChar(B.getInt32('\n'), B, DL, TLI);
> if (CI->use_empty() || !Res) return Res;
> return B.CreateIntCast(Res, CI->getType(), true);
> }
> @@ -1960,7 +1960,7 @@ struct PutsOpt : public LibCallOptimizat
> namespace llvm {
>
> class LibCallSimplifierImpl {
> - const DataLayout *TD;
> + const DataLayout *DL;
> const TargetLibraryInfo *TLI;
> const LibCallSimplifier *LCS;
> bool UnsafeFPShrink;
> @@ -1970,11 +1970,11 @@ class LibCallSimplifierImpl {
> PowOpt Pow;
> Exp2Opt Exp2;
> public:
> - LibCallSimplifierImpl(const DataLayout *TD, const TargetLibraryInfo
> *TLI,
> + LibCallSimplifierImpl(const DataLayout *DL, const TargetLibraryInfo
> *TLI,
> const LibCallSimplifier *LCS,
> bool UnsafeFPShrink = false)
> : Cos(UnsafeFPShrink), Pow(UnsafeFPShrink), Exp2(UnsafeFPShrink) {
> - this->TD = TD;
> + this->DL = DL;
> this->TLI = TLI;
> this->LCS = LCS;
> this->UnsafeFPShrink = UnsafeFPShrink;
> @@ -2233,15 +2233,15 @@ Value *LibCallSimplifierImpl::optimizeCa
> LibCallOptimization *LCO = lookupOptimization(CI);
> if (LCO) {
> IRBuilder<> Builder(CI);
> - return LCO->optimizeCall(CI, TD, TLI, LCS, Builder);
> + return LCO->optimizeCall(CI, DL, TLI, LCS, Builder);
> }
> return 0;
> }
>
> -LibCallSimplifier::LibCallSimplifier(const DataLayout *TD,
> +LibCallSimplifier::LibCallSimplifier(const DataLayout *DL,
> const TargetLibraryInfo *TLI,
> bool UnsafeFPShrink) {
> - Impl = new LibCallSimplifierImpl(TD, TLI, this, UnsafeFPShrink);
> + Impl = new LibCallSimplifierImpl(DL, TLI, this, UnsafeFPShrink);
> }
>
> LibCallSimplifier::~LibCallSimplifier() {
>
> Modified: llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp?rev=201827&r1=201826&r2=201827&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp (original)
> +++ llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp Thu Feb 20
> 18:06:31 2014
> @@ -201,7 +201,7 @@ namespace {
> AA = &P->getAnalysis<AliasAnalysis>();
> DT = &P->getAnalysis<DominatorTreeWrapperPass>().getDomTree();
> SE = &P->getAnalysis<ScalarEvolution>();
> - TD = P->getAnalysisIfAvailable<DataLayout>();
> + DL = P->getAnalysisIfAvailable<DataLayout>();
> TTI = IgnoreTargetInfo ? 0 : &P->getAnalysis<TargetTransformInfo>();
> }
>
> @@ -214,7 +214,7 @@ namespace {
> AliasAnalysis *AA;
> DominatorTree *DT;
> ScalarEvolution *SE;
> - DataLayout *TD;
> + DataLayout *DL;
> const TargetTransformInfo *TTI;
>
> // FIXME: const correct?
> @@ -436,7 +436,7 @@ namespace {
> AA = &getAnalysis<AliasAnalysis>();
> DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
> SE = &getAnalysis<ScalarEvolution>();
> - TD = getAnalysisIfAvailable<DataLayout>();
> + DL = getAnalysisIfAvailable<DataLayout>();
> TTI = IgnoreTargetInfo ? 0 : &getAnalysis<TargetTransformInfo>();
>
> return vectorizeBB(BB);
> @@ -634,11 +634,11 @@ namespace {
> int64_t Offset = IntOff->getSExtValue();
>
> Type *VTy = IPtr->getType()->getPointerElementType();
> - int64_t VTyTSS = (int64_t) TD->getTypeStoreSize(VTy);
> + int64_t VTyTSS = (int64_t) DL->getTypeStoreSize(VTy);
>
> Type *VTy2 = JPtr->getType()->getPointerElementType();
> if (VTy != VTy2 && Offset < 0) {
> - int64_t VTy2TSS = (int64_t) TD->getTypeStoreSize(VTy2);
> + int64_t VTy2TSS = (int64_t) DL->getTypeStoreSize(VTy2);
> OffsetInElmts = Offset/VTy2TSS;
> return (abs64(Offset) % VTy2TSS) == 0;
> }
> @@ -821,7 +821,7 @@ namespace {
>
> // It is important to cleanup here so that future iterations of this
> // function have less work to do.
> - (void) SimplifyInstructionsInBlock(&BB, TD,
> AA->getTargetLibraryInfo());
> + (void) SimplifyInstructionsInBlock(&BB, DL,
> AA->getTargetLibraryInfo());
> return true;
> }
>
> @@ -876,7 +876,7 @@ namespace {
> }
>
> // We can't vectorize memory operations without target data
> - if (TD == 0 && IsSimpleLoadStore)
> + if (DL == 0 && IsSimpleLoadStore)
> return false;
>
> Type *T1, *T2;
> @@ -913,7 +913,7 @@ namespace {
> if (T2->isX86_FP80Ty() || T2->isPPC_FP128Ty() || T2->isX86_MMXTy())
> return false;
>
> - if ((!Config.VectorizePointers || TD == 0) &&
> + if ((!Config.VectorizePointers || DL == 0) &&
> (T1->getScalarType()->isPointerTy() ||
> T2->getScalarType()->isPointerTy()))
> return false;
> @@ -977,7 +977,7 @@ namespace {
> // with the lower offset has an alignment suitable for the
> // vector type.
>
> - unsigned VecAlignment = TD->getPrefTypeAlignment(VType);
> + unsigned VecAlignment = DL->getPrefTypeAlignment(VType);
> if (BottomAlignment < VecAlignment)
> return false;
> }
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20140301/421fa6c5/attachment.html>
More information about the llvm-commits
mailing list