<div dir="ltr">I woke up in the middle of last night and seemed to remember that nobody said thanks for doing this long-needed fixup.<div><br></div><div>Well, Thanks!</div><div><br></div><div>-- Sean Silva</div></div><div class="gmail_extra">
<br><br><div class="gmail_quote">On Thu, Feb 20, 2014 at 7:06 PM, Rafael Espindola <span dir="ltr"><<a href="mailto:rafael.espindola@gmail.com" target="_blank">rafael.espindola@gmail.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
Author: rafael<br>
Date: Thu Feb 20 18:06:31 2014<br>
New Revision: 201827<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=201827&view=rev" target="_blank">http://llvm.org/viewvc/llvm-project?rev=201827&view=rev</a><br>
Log:<br>
Rename many DataLayout variables from TD to DL.<br>
<br>
I am really sorry for the noise, but the current state where some parts of the<br>
code use TD (from the old name: TargetData) and other parts use DL makes it<br>
hard to write a patch that changes where those variables come from and how<br>
they are passed along.<br>
<br>
Modified:<br>
llvm/trunk/lib/Analysis/IPA/InlineCost.cpp<br>
llvm/trunk/lib/Analysis/InstructionSimplify.cpp<br>
llvm/trunk/lib/Analysis/Lint.cpp<br>
llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp<br>
llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp<br>
llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombine.h<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp<br>
llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp<br>
llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp<br>
llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp<br>
llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp<br>
llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp<br>
llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp<br>
llvm/trunk/lib/Transforms/Scalar/GVN.cpp<br>
llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp<br>
llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp<br>
llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp<br>
llvm/trunk/lib/Transforms/Scalar/LICM.cpp<br>
llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp<br>
llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp<br>
llvm/trunk/lib/Transforms/Scalar/SCCP.cpp<br>
llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp<br>
llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp<br>
llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp<br>
llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp<br>
llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp<br>
llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp<br>
<br>
Modified: llvm/trunk/lib/Analysis/IPA/InlineCost.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/IPA/InlineCost.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/IPA/InlineCost.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Analysis/IPA/InlineCost.cpp (original)<br>
+++ llvm/trunk/lib/Analysis/IPA/InlineCost.cpp Thu Feb 20 18:06:31 2014<br>
@@ -43,7 +43,7 @@ class CallAnalyzer : public InstVisitor<<br>
friend class InstVisitor<CallAnalyzer, bool>;<br>
<br>
// DataLayout if available, or null.<br>
- const DataLayout *const TD;<br>
+ const DataLayout *const DL;<br>
<br>
/// The TargetTransformInfo available for this compilation.<br>
const TargetTransformInfo &TTI;<br>
@@ -142,9 +142,9 @@ class CallAnalyzer : public InstVisitor<<br>
bool visitUnreachableInst(UnreachableInst &I);<br>
<br>
public:<br>
- CallAnalyzer(const DataLayout *TD, const TargetTransformInfo &TTI,<br>
+ CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,<br>
Function &Callee, int Threshold)<br>
- : TD(TD), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),<br>
+ : DL(DL), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),<br>
IsCallerRecursive(false), IsRecursiveCall(false),<br>
ExposesReturnsTwice(false), HasDynamicAlloca(false),<br>
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),<br>
@@ -256,10 +256,10 @@ bool CallAnalyzer::isGEPOffsetConstant(G<br>
/// Returns false if unable to compute the offset for any reason. Respects any<br>
/// simplified values known during the analysis of this callsite.<br>
bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {<br>
- if (!TD)<br>
+ if (!DL)<br>
return false;<br>
<br>
- unsigned IntPtrWidth = TD->getPointerSizeInBits();<br>
+ unsigned IntPtrWidth = DL->getPointerSizeInBits();<br>
assert(IntPtrWidth == Offset.getBitWidth());<br>
<br>
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);<br>
@@ -275,12 +275,12 @@ bool CallAnalyzer::accumulateGEPOffset(G<br>
// Handle a struct index, which adds its field offset to the pointer.<br>
if (StructType *STy = dyn_cast<StructType>(*GTI)) {<br>
unsigned ElementIdx = OpC->getZExtValue();<br>
- const StructLayout *SL = TD->getStructLayout(STy);<br>
+ const StructLayout *SL = DL->getStructLayout(STy);<br>
Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));<br>
continue;<br>
}<br>
<br>
- APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));<br>
+ APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType()));<br>
Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;<br>
}<br>
return true;<br>
@@ -293,7 +293,7 @@ bool CallAnalyzer::visitAlloca(AllocaIns<br>
// Accumulate the allocated size.<br>
if (I.isStaticAlloca()) {<br>
Type *Ty = I.getAllocatedType();<br>
- AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :<br>
+ AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :<br>
Ty->getPrimitiveSizeInBits());<br>
}<br>
<br>
@@ -330,7 +330,7 @@ bool CallAnalyzer::visitGetElementPtr(Ge<br>
<br>
// Try to fold GEPs of constant-offset call site argument pointers. This<br>
// requires target data and inbounds GEPs.<br>
- if (TD && I.isInBounds()) {<br>
+ if (DL && I.isInBounds()) {<br>
// Check if we have a base + offset for the pointer.<br>
Value *Ptr = I.getPointerOperand();<br>
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);<br>
@@ -412,7 +412,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIn<br>
// Track base/offset pairs when converted to a plain integer provided the<br>
// integer is large enough to represent the pointer.<br>
unsigned IntegerSize = I.getType()->getScalarSizeInBits();<br>
- if (TD && IntegerSize >= TD->getPointerSizeInBits()) {<br>
+ if (DL && IntegerSize >= DL->getPointerSizeInBits()) {<br>
std::pair<Value *, APInt> BaseAndOffset<br>
= ConstantOffsetPtrs.lookup(I.getOperand(0));<br>
if (BaseAndOffset.first)<br>
@@ -449,7 +449,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPt<br>
// modifications provided the integer is not too large.<br>
Value *Op = I.getOperand(0);<br>
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();<br>
- if (TD && IntegerSize <= TD->getPointerSizeInBits()) {<br>
+ if (DL && IntegerSize <= DL->getPointerSizeInBits()) {<br>
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);<br>
if (BaseAndOffset.first)<br>
ConstantOffsetPtrs[&I] = BaseAndOffset;<br>
@@ -488,7 +488,7 @@ bool CallAnalyzer::visitUnaryInstruction<br>
COp = SimplifiedValues.lookup(Operand);<br>
if (COp)<br>
if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),<br>
- COp, TD)) {<br>
+ COp, DL)) {<br>
SimplifiedValues[&I] = C;<br>
return true;<br>
}<br>
@@ -602,7 +602,7 @@ bool CallAnalyzer::visitBinaryOperator(B<br>
if (!isa<Constant>(RHS))<br>
if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))<br>
RHS = SimpleRHS;<br>
- Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);<br>
+ Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);<br>
if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {<br>
SimplifiedValues[&I] = C;<br>
return true;<br>
@@ -784,7 +784,7 @@ bool CallAnalyzer::visitCallSite(CallSit<br>
// during devirtualization and so we want to give it a hefty bonus for<br>
// inlining, but cap that bonus in the event that inlining wouldn't pan<br>
// out. Pretend to inline the function, with a custom threshold.<br>
- CallAnalyzer CA(TD, TTI, *F, InlineConstants::IndirectCallThreshold);<br>
+ CallAnalyzer CA(DL, TTI, *F, InlineConstants::IndirectCallThreshold);<br>
if (CA.analyzeCall(CS)) {<br>
// We were able to inline the indirect call! Subtract the cost from the<br>
// bonus we want to apply, but don't go below zero.<br>
@@ -931,10 +931,10 @@ bool CallAnalyzer::analyzeBlock(BasicBlo<br>
/// returns 0 if V is not a pointer, and returns the constant '0' if there are<br>
/// no constant offsets applied.<br>
ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {<br>
- if (!TD || !V->getType()->isPointerTy())<br>
+ if (!DL || !V->getType()->isPointerTy())<br>
return 0;<br>
<br>
- unsigned IntPtrWidth = TD->getPointerSizeInBits();<br>
+ unsigned IntPtrWidth = DL->getPointerSizeInBits();<br>
APInt Offset = APInt::getNullValue(IntPtrWidth);<br>
<br>
// Even though we don't look through PHI nodes, we could be called on an<br>
@@ -958,7 +958,7 @@ ConstantInt *CallAnalyzer::stripAndCompu<br>
assert(V->getType()->isPointerTy() && "Unexpected operand type!");<br>
} while (Visited.insert(V));<br>
<br>
- Type *IntPtrTy = TD->getIntPtrType(V->getContext());<br>
+ Type *IntPtrTy = DL->getIntPtrType(V->getContext());<br>
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));<br>
}<br>
<br>
@@ -993,12 +993,12 @@ bool CallAnalyzer::analyzeCall(CallSite<br>
// Give out bonuses per argument, as the instructions setting them up will<br>
// be gone after inlining.<br>
for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {<br>
- if (TD && CS.isByValArgument(I)) {<br>
+ if (DL && CS.isByValArgument(I)) {<br>
// We approximate the number of loads and stores needed by dividing the<br>
// size of the byval type by the target's pointer size.<br>
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());<br>
- unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());<br>
- unsigned PointerSize = TD->getPointerSizeInBits();<br>
+ unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType());<br>
+ unsigned PointerSize = DL->getPointerSizeInBits();<br>
// Ceiling division.<br>
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;<br>
<br>
<br>
Modified: llvm/trunk/lib/Analysis/InstructionSimplify.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/InstructionSimplify.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/InstructionSimplify.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Analysis/InstructionSimplify.cpp (original)<br>
+++ llvm/trunk/lib/Analysis/InstructionSimplify.cpp Thu Feb 20 18:06:31 2014<br>
@@ -42,12 +42,12 @@ STATISTIC(NumFactor , "Number of factori<br>
STATISTIC(NumReassoc, "Number of reassociations");<br>
<br>
struct Query {<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
const TargetLibraryInfo *TLI;<br>
const DominatorTree *DT;<br>
<br>
- Query(const DataLayout *td, const TargetLibraryInfo *tli,<br>
- const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}<br>
+ Query(const DataLayout *DL, const TargetLibraryInfo *tli,<br>
+ const DominatorTree *dt) : DL(DL), TLI(tli), DT(dt) {}<br>
};<br>
<br>
static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);<br>
@@ -595,7 +595,7 @@ static Value *SimplifyAddInst(Value *Op0<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(), Ops,<br>
- <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Q.DL, Q.TLI);<br>
}<br>
<br>
// Canonicalize the constant to the RHS.<br>
@@ -651,9 +651,9 @@ static Value *SimplifyAddInst(Value *Op0<br>
}<br>
<br>
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,<br>
- const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),<br>
+ return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -667,17 +667,17 @@ Value *llvm::SimplifyAddInst(Value *Op0,<br>
/// This is very similar to GetPointerBaseWithConstantOffset except it doesn't<br>
/// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.<br>
/// folding.<br>
-static Constant *stripAndComputeConstantOffsets(const DataLayout *TD,<br>
+static Constant *stripAndComputeConstantOffsets(const DataLayout *DL,<br>
Value *&V,<br>
bool AllowNonInbounds = false) {<br>
assert(V->getType()->getScalarType()->isPointerTy());<br>
<br>
// Without DataLayout, just be conservative for now. Theoretically, more could<br>
// be done in this case.<br>
- if (!TD)<br>
+ if (!DL)<br>
return ConstantInt::get(IntegerType::get(V->getContext(), 64), 0);<br>
<br>
- Type *IntPtrTy = TD->getIntPtrType(V->getType())->getScalarType();<br>
+ Type *IntPtrTy = DL->getIntPtrType(V->getType())->getScalarType();<br>
APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());<br>
<br>
// Even though we don't look through PHI nodes, we could be called on an<br>
@@ -687,7 +687,7 @@ static Constant *stripAndComputeConstant<br>
do {<br>
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {<br>
if ((!AllowNonInbounds && !GEP->isInBounds()) ||<br>
- !GEP->accumulateConstantOffset(*TD, Offset))<br>
+ !GEP->accumulateConstantOffset(*DL, Offset))<br>
break;<br>
V = GEP->getPointerOperand();<br>
} else if (Operator::getOpcode(V) == Instruction::BitCast) {<br>
@@ -712,10 +712,10 @@ static Constant *stripAndComputeConstant<br>
<br>
/// \brief Compute the constant difference between two pointer values.<br>
/// If the difference is not a constant, returns zero.<br>
-static Constant *computePointerDifference(const DataLayout *TD,<br>
+static Constant *computePointerDifference(const DataLayout *DL,<br>
Value *LHS, Value *RHS) {<br>
- Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);<br>
- Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);<br>
+ Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);<br>
+ Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);<br>
<br>
// If LHS and RHS are not related via constant offsets to the same base<br>
// value, there is nothing we can do here.<br>
@@ -737,7 +737,7 @@ static Value *SimplifySubInst(Value *Op0<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(),<br>
- Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Ops, Q.DL, Q.TLI);<br>
}<br>
<br>
// X - undef -> undef<br>
@@ -831,7 +831,7 @@ static Value *SimplifySubInst(Value *Op0<br>
// Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).<br>
if (match(Op0, m_PtrToInt(m_Value(X))) &&<br>
match(Op1, m_PtrToInt(m_Value(Y))))<br>
- if (Constant *Result = computePointerDifference(<a href="http://Q.TD" target="_blank">Q.TD</a>, X, Y))<br>
+ if (Constant *Result = computePointerDifference(Q.DL, X, Y))<br>
return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);<br>
<br>
// Mul distributes over Sub. Try some generic simplifications based on this.<br>
@@ -857,9 +857,9 @@ static Value *SimplifySubInst(Value *Op0<br>
}<br>
<br>
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,<br>
- const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),<br>
+ return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -871,7 +871,7 @@ static Value *SimplifyFAddInst(Value *Op<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::FAdd, CLHS->getType(),<br>
- Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Ops, Q.DL, Q.TLI);<br>
}<br>
<br>
// Canonicalize the constant to the RHS.<br>
@@ -913,7 +913,7 @@ static Value *SimplifyFSubInst(Value *Op<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::FSub, CLHS->getType(),<br>
- Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Ops, Q.DL, Q.TLI);<br>
}<br>
}<br>
<br>
@@ -951,7 +951,7 @@ static Value *SimplifyFMulInst(Value *Op<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::FMul, CLHS->getType(),<br>
- Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Ops, Q.DL, Q.TLI);<br>
}<br>
<br>
// Canonicalize the constant to the RHS.<br>
@@ -977,7 +977,7 @@ static Value *SimplifyMulInst(Value *Op0<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(),<br>
- Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Ops, Q.DL, Q.TLI);<br>
}<br>
<br>
// Canonicalize the constant to the RHS.<br>
@@ -1035,29 +1035,29 @@ static Value *SimplifyMulInst(Value *Op0<br>
}<br>
<br>
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,<br>
- const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyFAddInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyFAddInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,<br>
- const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyFSubInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyFSubInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1,<br>
FastMathFlags FMF,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyFMulInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyFMulInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyMulInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can<br>
@@ -1067,7 +1067,7 @@ static Value *SimplifyDiv(Instruction::B<br>
if (Constant *C0 = dyn_cast<Constant>(Op0)) {<br>
if (Constant *C1 = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { C0, C1 };<br>
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);<br>
}<br>
}<br>
<br>
@@ -1142,10 +1142,10 @@ static Value *SimplifySDivInst(Value *Op<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifySDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// SimplifyUDivInst - Given operands for a UDiv, see if we can<br>
@@ -1158,10 +1158,10 @@ static Value *SimplifyUDivInst(Value *Op<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyUDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,<br>
@@ -1177,10 +1177,10 @@ static Value *SimplifyFDivInst(Value *Op<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyFDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// SimplifyRem - Given operands for an SRem or URem, see if we can<br>
@@ -1190,7 +1190,7 @@ static Value *SimplifyRem(Instruction::B<br>
if (Constant *C0 = dyn_cast<Constant>(Op0)) {<br>
if (Constant *C1 = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { C0, C1 };<br>
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);<br>
}<br>
}<br>
<br>
@@ -1247,10 +1247,10 @@ static Value *SimplifySRemInst(Value *Op<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifySRemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// SimplifyURemInst - Given operands for a URem, see if we can<br>
@@ -1263,10 +1263,10 @@ static Value *SimplifyURemInst(Value *Op<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyURemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,<br>
@@ -1282,10 +1282,10 @@ static Value *SimplifyFRemInst(Value *Op<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyFRemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// isUndefShift - Returns true if a shift by \c Amount always yields undef.<br>
@@ -1322,7 +1322,7 @@ static Value *SimplifyShift(unsigned Opc<br>
if (Constant *C0 = dyn_cast<Constant>(Op0)) {<br>
if (Constant *C1 = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { C0, C1 };<br>
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);<br>
}<br>
}<br>
<br>
@@ -1372,9 +1372,9 @@ static Value *SimplifyShlInst(Value *Op0<br>
}<br>
<br>
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,<br>
- const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),<br>
+ return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -1403,10 +1403,10 @@ static Value *SimplifyLShrInst(Value *Op<br>
}<br>
<br>
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),<br>
+ return ::SimplifyLShrInst(Op0, Op1, isExact, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -1439,10 +1439,10 @@ static Value *SimplifyAShrInst(Value *Op<br>
}<br>
<br>
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),<br>
+ return ::SimplifyAShrInst(Op0, Op1, isExact, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -1454,7 +1454,7 @@ static Value *SimplifyAndInst(Value *Op0<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),<br>
- Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Ops, Q.DL, Q.TLI);<br>
}<br>
<br>
// Canonicalize the constant to the RHS.<br>
@@ -1539,10 +1539,10 @@ static Value *SimplifyAndInst(Value *Op0<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyAndInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// SimplifyOrInst - Given operands for an Or, see if we can<br>
@@ -1553,7 +1553,7 @@ static Value *SimplifyOrInst(Value *Op0,<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(),<br>
- Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Ops, Q.DL, Q.TLI);<br>
}<br>
<br>
// Canonicalize the constant to the RHS.<br>
@@ -1633,10 +1633,10 @@ static Value *SimplifyOrInst(Value *Op0,<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyOrInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// SimplifyXorInst - Given operands for a Xor, see if we can<br>
@@ -1647,7 +1647,7 @@ static Value *SimplifyXorInst(Value *Op0<br>
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {<br>
Constant *Ops[] = { CLHS, CRHS };<br>
return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(),<br>
- Ops, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ Ops, Q.DL, Q.TLI);<br>
}<br>
<br>
// Canonicalize the constant to the RHS.<br>
@@ -1693,10 +1693,10 @@ static Value *SimplifyXorInst(Value *Op0<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,<br>
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyXorInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
static Type *GetCompareTy(Value *Op) {<br>
@@ -1751,7 +1751,7 @@ static Value *ExtractEquivalentCondition<br>
// If the C and C++ standards are ever made sufficiently restrictive in this<br>
// area, it may be possible to update LLVM's semantics accordingly and reinstate<br>
// this optimization.<br>
-static Constant *computePointerICmp(const DataLayout *TD,<br>
+static Constant *computePointerICmp(const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
CmpInst::Predicate Pred,<br>
Value *LHS, Value *RHS) {<br>
@@ -1793,8 +1793,8 @@ static Constant *computePointerICmp(cons<br>
// numerous hazards. AliasAnalysis and its utilities rely on special rules<br>
// governing loads and stores which don't apply to icmps. Also, AliasAnalysis<br>
// doesn't need to guarantee pointer inequality when it says NoAlias.<br>
- Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);<br>
- Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);<br>
+ Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);<br>
+ Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);<br>
<br>
// If LHS and RHS are related via constant offsets to the same base<br>
// value, we can replace it with an icmp which just compares the offsets.<br>
@@ -1838,8 +1838,8 @@ static Constant *computePointerICmp(cons<br>
ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);<br>
uint64_t LHSSize, RHSSize;<br>
if (LHSOffsetCI && RHSOffsetCI &&<br>
- getObjectSize(LHS, LHSSize, TD, TLI) &&<br>
- getObjectSize(RHS, RHSSize, TD, TLI)) {<br>
+ getObjectSize(LHS, LHSSize, DL, TLI) &&<br>
+ getObjectSize(RHS, RHSSize, DL, TLI)) {<br>
const APInt &LHSOffsetValue = LHSOffsetCI->getValue();<br>
const APInt &RHSOffsetValue = RHSOffsetCI->getValue();<br>
if (!LHSOffsetValue.isNegative() &&<br>
@@ -1865,8 +1865,8 @@ static Constant *computePointerICmp(cons<br>
// equality comparisons concerning the result. We avoid walking the whole<br>
// chain again by starting where the last calls to<br>
// stripAndComputeConstantOffsets left off and accumulate the offsets.<br>
- Constant *LHSNoBound = stripAndComputeConstantOffsets(TD, LHS, true);<br>
- Constant *RHSNoBound = stripAndComputeConstantOffsets(TD, RHS, true);<br>
+ Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);<br>
+ Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);<br>
if (LHS == RHS)<br>
return ConstantExpr::getICmp(Pred,<br>
ConstantExpr::getAdd(LHSOffset, LHSNoBound),<br>
@@ -1886,7 +1886,7 @@ static Value *SimplifyICmpInst(unsigned<br>
<br>
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {<br>
if (Constant *CRHS = dyn_cast<Constant>(RHS))<br>
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);<br>
<br>
// If we have a constant, make sure it is on the RHS.<br>
std::swap(LHS, RHS);<br>
@@ -1950,40 +1950,40 @@ static Value *SimplifyICmpInst(unsigned<br>
return getTrue(ITy);<br>
case ICmpInst::ICMP_EQ:<br>
case ICmpInst::ICMP_ULE:<br>
- if (isKnownNonZero(LHS, <a href="http://Q.TD" target="_blank">Q.TD</a>))<br>
+ if (isKnownNonZero(LHS, Q.DL))<br>
return getFalse(ITy);<br>
break;<br>
case ICmpInst::ICMP_NE:<br>
case ICmpInst::ICMP_UGT:<br>
- if (isKnownNonZero(LHS, <a href="http://Q.TD" target="_blank">Q.TD</a>))<br>
+ if (isKnownNonZero(LHS, Q.DL))<br>
return getTrue(ITy);<br>
break;<br>
case ICmpInst::ICMP_SLT:<br>
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, <a href="http://Q.TD" target="_blank">Q.TD</a>);<br>
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);<br>
if (LHSKnownNegative)<br>
return getTrue(ITy);<br>
if (LHSKnownNonNegative)<br>
return getFalse(ITy);<br>
break;<br>
case ICmpInst::ICMP_SLE:<br>
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, <a href="http://Q.TD" target="_blank">Q.TD</a>);<br>
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);<br>
if (LHSKnownNegative)<br>
return getTrue(ITy);<br>
- if (LHSKnownNonNegative && isKnownNonZero(LHS, <a href="http://Q.TD" target="_blank">Q.TD</a>))<br>
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL))<br>
return getFalse(ITy);<br>
break;<br>
case ICmpInst::ICMP_SGE:<br>
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, <a href="http://Q.TD" target="_blank">Q.TD</a>);<br>
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);<br>
if (LHSKnownNegative)<br>
return getFalse(ITy);<br>
if (LHSKnownNonNegative)<br>
return getTrue(ITy);<br>
break;<br>
case ICmpInst::ICMP_SGT:<br>
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, <a href="http://Q.TD" target="_blank">Q.TD</a>);<br>
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);<br>
if (LHSKnownNegative)<br>
return getFalse(ITy);<br>
- if (LHSKnownNonNegative && isKnownNonZero(LHS, <a href="http://Q.TD" target="_blank">Q.TD</a>))<br>
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL))<br>
return getTrue(ITy);<br>
break;<br>
}<br>
@@ -2066,8 +2066,8 @@ static Value *SimplifyICmpInst(unsigned<br>
<br>
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input<br>
// if the integer type is the same size as the pointer type.<br>
- if (MaxRecurse && <a href="http://Q.TD" target="_blank">Q.TD</a> && isa<PtrToIntInst>(LI) &&<br>
- Q.TD->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {<br>
+ if (MaxRecurse && Q.DL && isa<PtrToIntInst>(LI) &&<br>
+ Q.DL->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {<br>
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {<br>
// Transfer the cast to the constant.<br>
if (Value *V = SimplifyICmpInst(Pred, SrcOp,<br>
@@ -2287,7 +2287,7 @@ static Value *SimplifyICmpInst(unsigned<br>
break;<br>
case ICmpInst::ICMP_SGT:<br>
case ICmpInst::ICMP_SGE:<br>
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, <a href="http://Q.TD" target="_blank">Q.TD</a>);<br>
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL);<br>
if (!KnownNonNegative)<br>
break;<br>
// fall-through<br>
@@ -2297,7 +2297,7 @@ static Value *SimplifyICmpInst(unsigned<br>
return getFalse(ITy);<br>
case ICmpInst::ICMP_SLT:<br>
case ICmpInst::ICMP_SLE:<br>
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, <a href="http://Q.TD" target="_blank">Q.TD</a>);<br>
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL);<br>
if (!KnownNonNegative)<br>
break;<br>
// fall-through<br>
@@ -2316,7 +2316,7 @@ static Value *SimplifyICmpInst(unsigned<br>
break;<br>
case ICmpInst::ICMP_SGT:<br>
case ICmpInst::ICMP_SGE:<br>
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, <a href="http://Q.TD" target="_blank">Q.TD</a>);<br>
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL);<br>
if (!KnownNonNegative)<br>
break;<br>
// fall-through<br>
@@ -2326,7 +2326,7 @@ static Value *SimplifyICmpInst(unsigned<br>
return getTrue(ITy);<br>
case ICmpInst::ICMP_SLT:<br>
case ICmpInst::ICMP_SLE:<br>
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, <a href="http://Q.TD" target="_blank">Q.TD</a>);<br>
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL);<br>
if (!KnownNonNegative)<br>
break;<br>
// fall-through<br>
@@ -2569,7 +2569,7 @@ static Value *SimplifyICmpInst(unsigned<br>
// Simplify comparisons of related pointers using a powerful, recursive<br>
// GEP-walk when we have target data available..<br>
if (LHS->getType()->isPointerTy())<br>
- if (Constant *C = computePointerICmp(<a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI, Pred, LHS, RHS))<br>
+ if (Constant *C = computePointerICmp(Q.DL, Q.TLI, Pred, LHS, RHS))<br>
return C;<br>
<br>
if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {<br>
@@ -2609,10 +2609,10 @@ static Value *SimplifyICmpInst(unsigned<br>
}<br>
<br>
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),<br>
+ return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -2625,7 +2625,7 @@ static Value *SimplifyFCmpInst(unsigned<br>
<br>
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {<br>
if (Constant *CRHS = dyn_cast<Constant>(RHS))<br>
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);<br>
<br>
// If we have a constant, make sure it is on the RHS.<br>
std::swap(LHS, RHS);<br>
@@ -2706,10 +2706,10 @@ static Value *SimplifyFCmpInst(unsigned<br>
}<br>
<br>
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),<br>
+ return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -2745,10 +2745,10 @@ static Value *SimplifySelectInst(Value *<br>
}<br>
<br>
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),<br>
+ return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -2776,9 +2776,9 @@ static Value *SimplifyGEPInst(ArrayRef<V<br>
if (match(Ops[1], m_Zero()))<br>
return Ops[0];<br>
// getelementptr P, N -> P if P points to a type of zero size.<br>
- if (<a href="http://Q.TD" target="_blank">Q.TD</a>) {<br>
+ if (Q.DL) {<br>
Type *Ty = PtrTy->getElementType();<br>
- if (Ty->isSized() && Q.TD->getTypeAllocSize(Ty) == 0)<br>
+ if (Ty->isSized() && Q.DL->getTypeAllocSize(Ty) == 0)<br>
return Ops[0];<br>
}<br>
}<br>
@@ -2791,10 +2791,10 @@ static Value *SimplifyGEPInst(ArrayRef<V<br>
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));<br>
}<br>
<br>
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD,<br>
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyGEPInst(Ops, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we<br>
@@ -2828,10 +2828,10 @@ static Value *SimplifyInsertValueInst(Va<br>
<br>
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,<br>
ArrayRef<unsigned> Idxs,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),<br>
+ return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -2871,15 +2871,15 @@ static Value *SimplifyPHINode(PHINode *P<br>
<br>
static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {<br>
if (Constant *C = dyn_cast<Constant>(Op))<br>
- return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, <a href="http://Q.TD" target="_blank">Q.TD</a>, Q.TLI);<br>
+ return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.DL, Q.TLI);<br>
<br>
return 0;<br>
}<br>
<br>
-Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,<br>
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyTruncInst(Op, Ty, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
//=== Helper functions for higher up the class hierarchy.<br>
@@ -2924,7 +2924,7 @@ static Value *SimplifyBinOp(unsigned Opc<br>
if (Constant *CLHS = dyn_cast<Constant>(LHS))<br>
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {<br>
Constant *COps[] = {CLHS, CRHS};<br>
- return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, <a href="http://Q.TD" target="_blank">Q.TD</a>,<br>
+ return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, Q.DL,<br>
Q.TLI);<br>
}<br>
<br>
@@ -2950,9 +2950,9 @@ static Value *SimplifyBinOp(unsigned Opc<br>
}<br>
<br>
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,<br>
- const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);<br>
+ return ::SimplifyBinOp(Opcode, LHS, RHS, Query (DL, TLI, DT), RecursionLimit);<br>
}<br>
<br>
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can<br>
@@ -2965,9 +2965,9 @@ static Value *SimplifyCmpInst(unsigned P<br>
}<br>
<br>
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,<br>
- const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),<br>
+ return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
@@ -3040,136 +3040,136 @@ static Value *SimplifyCall(Value *V, Ite<br>
}<br>
<br>
Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,<br>
- User::op_iterator ArgEnd, const DataLayout *TD,<br>
+ User::op_iterator ArgEnd, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(TD, TLI, DT),<br>
+ return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,<br>
- const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return ::SimplifyCall(V, Args.begin(), Args.end(), Query(TD, TLI, DT),<br>
+ return ::SimplifyCall(V, Args.begin(), Args.end(), Query(DL, TLI, DT),<br>
RecursionLimit);<br>
}<br>
<br>
/// SimplifyInstruction - See if we can compute a simplified version of this<br>
/// instruction. If not, this returns null.<br>
-Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,<br>
+Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
Value *Result;<br>
<br>
switch (I->getOpcode()) {<br>
default:<br>
- Result = ConstantFoldInstruction(I, TD, TLI);<br>
+ Result = ConstantFoldInstruction(I, DL, TLI);<br>
break;<br>
case Instruction::FAdd:<br>
Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),<br>
- I->getFastMathFlags(), TD, TLI, DT);<br>
+ I->getFastMathFlags(), DL, TLI, DT);<br>
break;<br>
case Instruction::Add:<br>
Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),<br>
cast<BinaryOperator>(I)->hasNoSignedWrap(),<br>
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),<br>
- TD, TLI, DT);<br>
+ DL, TLI, DT);<br>
break;<br>
case Instruction::FSub:<br>
Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),<br>
- I->getFastMathFlags(), TD, TLI, DT);<br>
+ I->getFastMathFlags(), DL, TLI, DT);<br>
break;<br>
case Instruction::Sub:<br>
Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),<br>
cast<BinaryOperator>(I)->hasNoSignedWrap(),<br>
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),<br>
- TD, TLI, DT);<br>
+ DL, TLI, DT);<br>
break;<br>
case Instruction::FMul:<br>
Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),<br>
- I->getFastMathFlags(), TD, TLI, DT);<br>
+ I->getFastMathFlags(), DL, TLI, DT);<br>
break;<br>
case Instruction::Mul:<br>
- Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::SDiv:<br>
- Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::UDiv:<br>
- Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::FDiv:<br>
- Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::SRem:<br>
- Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::URem:<br>
- Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::FRem:<br>
- Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::Shl:<br>
Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),<br>
cast<BinaryOperator>(I)->hasNoSignedWrap(),<br>
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),<br>
- TD, TLI, DT);<br>
+ DL, TLI, DT);<br>
break;<br>
case Instruction::LShr:<br>
Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),<br>
cast<BinaryOperator>(I)->isExact(),<br>
- TD, TLI, DT);<br>
+ DL, TLI, DT);<br>
break;<br>
case Instruction::AShr:<br>
Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),<br>
cast<BinaryOperator>(I)->isExact(),<br>
- TD, TLI, DT);<br>
+ DL, TLI, DT);<br>
break;<br>
case Instruction::And:<br>
- Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::Or:<br>
- Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::Xor:<br>
- Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::ICmp:<br>
Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),<br>
- I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::FCmp:<br>
Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),<br>
- I->getOperand(0), I->getOperand(1), TD, TLI, DT);<br>
+ I->getOperand(0), I->getOperand(1), DL, TLI, DT);<br>
break;<br>
case Instruction::Select:<br>
Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),<br>
- I->getOperand(2), TD, TLI, DT);<br>
+ I->getOperand(2), DL, TLI, DT);<br>
break;<br>
case Instruction::GetElementPtr: {<br>
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());<br>
- Result = SimplifyGEPInst(Ops, TD, TLI, DT);<br>
+ Result = SimplifyGEPInst(Ops, DL, TLI, DT);<br>
break;<br>
}<br>
case Instruction::InsertValue: {<br>
InsertValueInst *IV = cast<InsertValueInst>(I);<br>
Result = SimplifyInsertValueInst(IV->getAggregateOperand(),<br>
IV->getInsertedValueOperand(),<br>
- IV->getIndices(), TD, TLI, DT);<br>
+ IV->getIndices(), DL, TLI, DT);<br>
break;<br>
}<br>
case Instruction::PHI:<br>
- Result = SimplifyPHINode(cast<PHINode>(I), Query (TD, TLI, DT));<br>
+ Result = SimplifyPHINode(cast<PHINode>(I), Query (DL, TLI, DT));<br>
break;<br>
case Instruction::Call: {<br>
CallSite CS(cast<CallInst>(I));<br>
Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(),<br>
- TD, TLI, DT);<br>
+ DL, TLI, DT);<br>
break;<br>
}<br>
case Instruction::Trunc:<br>
- Result = SimplifyTruncInst(I->getOperand(0), I->getType(), TD, TLI, DT);<br>
+ Result = SimplifyTruncInst(I->getOperand(0), I->getType(), DL, TLI, DT);<br>
break;<br>
}<br>
<br>
@@ -3191,7 +3191,7 @@ Value *llvm::SimplifyInstruction(Instruc<br>
/// This routine returns 'true' only when *it* simplifies something. The passed<br>
/// in simplified value does not count toward this.<br>
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
bool Simplified = false;<br>
@@ -3221,7 +3221,7 @@ static bool replaceAndRecursivelySimplif<br>
I = Worklist[Idx];<br>
<br>
// See if this instruction simplifies.<br>
- SimpleV = SimplifyInstruction(I, TD, TLI, DT);<br>
+ SimpleV = SimplifyInstruction(I, DL, TLI, DT);<br>
if (!SimpleV)<br>
continue;<br>
<br>
@@ -3246,17 +3246,17 @@ static bool replaceAndRecursivelySimplif<br>
}<br>
<br>
bool llvm::recursivelySimplifyInstruction(Instruction *I,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
- return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);<br>
+ return replaceAndRecursivelySimplifyImpl(I, 0, DL, TLI, DT);<br>
}<br>
<br>
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const DominatorTree *DT) {<br>
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");<br>
assert(SimpleV && "Must provide a simplified value.");<br>
- return replaceAndRecursivelySimplifyImpl(I, SimpleV, TD, TLI, DT);<br>
+ return replaceAndRecursivelySimplifyImpl(I, SimpleV, DL, TLI, DT);<br>
}<br>
<br>
Modified: llvm/trunk/lib/Analysis/Lint.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/Lint.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/Lint.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Analysis/Lint.cpp (original)<br>
+++ llvm/trunk/lib/Analysis/Lint.cpp Thu Feb 20 18:06:31 2014<br>
@@ -102,7 +102,7 @@ namespace {<br>
Module *Mod;<br>
AliasAnalysis *AA;<br>
DominatorTree *DT;<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
TargetLibraryInfo *TLI;<br>
<br>
std::string Messages;<br>
@@ -176,7 +176,7 @@ bool Lint::runOnFunction(Function &F) {<br>
Mod = F.getParent();<br>
AA = &getAnalysis<AliasAnalysis>();<br>
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
visit(F);<br>
dbgs() << MessagesStr.str();<br>
@@ -247,7 +247,7 @@ void Lint::visitCallSite(CallSite CS) {<br>
Type *Ty =<br>
cast<PointerType>(Formal->getType())->getElementType();<br>
visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),<br>
- TD ? TD->getABITypeAlignment(Ty) : 0,<br>
+ DL ? DL->getABITypeAlignment(Ty) : 0,<br>
Ty, MemRef::Read | MemRef::Write);<br>
}<br>
}<br>
@@ -414,7 +414,7 @@ void Lint::visitMemoryReference(Instruct<br>
// Only handles memory references that read/write something simple like an<br>
// alloca instruction or a global variable.<br>
int64_t Offset = 0;<br>
- if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, TD)) {<br>
+ if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) {<br>
// OK, so the access is to a constant offset from Ptr. Check that Ptr is<br>
// something we can handle and if so extract the size of this base object<br>
// along with its alignment.<br>
@@ -423,21 +423,21 @@ void Lint::visitMemoryReference(Instruct<br>
<br>
if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {<br>
Type *ATy = AI->getAllocatedType();<br>
- if (TD && !AI->isArrayAllocation() && ATy->isSized())<br>
- BaseSize = TD->getTypeAllocSize(ATy);<br>
+ if (DL && !AI->isArrayAllocation() && ATy->isSized())<br>
+ BaseSize = DL->getTypeAllocSize(ATy);<br>
BaseAlign = AI->getAlignment();<br>
- if (TD && BaseAlign == 0 && ATy->isSized())<br>
- BaseAlign = TD->getABITypeAlignment(ATy);<br>
+ if (DL && BaseAlign == 0 && ATy->isSized())<br>
+ BaseAlign = DL->getABITypeAlignment(ATy);<br>
} else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {<br>
// If the global may be defined differently in another compilation unit<br>
// then don't warn about funky memory accesses.<br>
if (GV->hasDefinitiveInitializer()) {<br>
Type *GTy = GV->getType()->getElementType();<br>
- if (TD && GTy->isSized())<br>
- BaseSize = TD->getTypeAllocSize(GTy);<br>
+ if (DL && GTy->isSized())<br>
+ BaseSize = DL->getTypeAllocSize(GTy);<br>
BaseAlign = GV->getAlignment();<br>
- if (TD && BaseAlign == 0 && GTy->isSized())<br>
- BaseAlign = TD->getABITypeAlignment(GTy);<br>
+ if (DL && BaseAlign == 0 && GTy->isSized())<br>
+ BaseAlign = DL->getABITypeAlignment(GTy);<br>
}<br>
}<br>
<br>
@@ -450,8 +450,8 @@ void Lint::visitMemoryReference(Instruct<br>
<br>
// Accesses that say that the memory is more aligned than it is are not<br>
// defined.<br>
- if (TD && Align == 0 && Ty && Ty->isSized())<br>
- Align = TD->getABITypeAlignment(Ty);<br>
+ if (DL && Align == 0 && Ty && Ty->isSized())<br>
+ Align = DL->getABITypeAlignment(Ty);<br>
Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),<br>
"Undefined behavior: Memory reference address is misaligned", &I);<br>
}<br>
@@ -542,22 +542,22 @@ static bool isZero(Value *V, DataLayout<br>
}<br>
<br>
void Lint::visitSDiv(BinaryOperator &I) {<br>
- Assert1(!isZero(I.getOperand(1), TD),<br>
+ Assert1(!isZero(I.getOperand(1), DL),<br>
"Undefined behavior: Division by zero", &I);<br>
}<br>
<br>
void Lint::visitUDiv(BinaryOperator &I) {<br>
- Assert1(!isZero(I.getOperand(1), TD),<br>
+ Assert1(!isZero(I.getOperand(1), DL),<br>
"Undefined behavior: Division by zero", &I);<br>
}<br>
<br>
void Lint::visitSRem(BinaryOperator &I) {<br>
- Assert1(!isZero(I.getOperand(1), TD),<br>
+ Assert1(!isZero(I.getOperand(1), DL),<br>
"Undefined behavior: Division by zero", &I);<br>
}<br>
<br>
void Lint::visitURem(BinaryOperator &I) {<br>
- Assert1(!isZero(I.getOperand(1), TD),<br>
+ Assert1(!isZero(I.getOperand(1), DL),<br>
"Undefined behavior: Division by zero", &I);<br>
}<br>
<br>
@@ -631,7 +631,7 @@ Value *Lint::findValueImpl(Value *V, boo<br>
// TODO: Look through eliminable cast pairs.<br>
// TODO: Look through calls with unique return values.<br>
// TODO: Look through vector insert/extract/shuffle.<br>
- V = OffsetOk ? GetUnderlyingObject(V, TD) : V->stripPointerCasts();<br>
+ V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts();<br>
if (LoadInst *L = dyn_cast<LoadInst>(V)) {<br>
BasicBlock::iterator BBI = L;<br>
BasicBlock *BB = L->getParent();<br>
@@ -651,7 +651,7 @@ Value *Lint::findValueImpl(Value *V, boo<br>
if (W != V)<br>
return findValueImpl(W, OffsetOk, Visited);<br>
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {<br>
- if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :<br>
+ if (CI->isNoopCast(DL ? DL->getIntPtrType(V->getContext()) :<br>
Type::getInt64Ty(V->getContext())))<br>
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);<br>
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {<br>
@@ -665,7 +665,7 @@ Value *Lint::findValueImpl(Value *V, boo<br>
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),<br>
CE->getOperand(0)->getType(),<br>
CE->getType(),<br>
- TD ? TD->getIntPtrType(V->getContext()) :<br>
+ DL ? DL->getIntPtrType(V->getContext()) :<br>
Type::getInt64Ty(V->getContext())))<br>
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);<br>
} else if (CE->getOpcode() == Instruction::ExtractValue) {<br>
@@ -678,10 +678,10 @@ Value *Lint::findValueImpl(Value *V, boo<br>
<br>
// As a last resort, try SimplifyInstruction or constant folding.<br>
if (Instruction *Inst = dyn_cast<Instruction>(V)) {<br>
- if (Value *W = SimplifyInstruction(Inst, TD, TLI, DT))<br>
+ if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT))<br>
return findValueImpl(W, OffsetOk, Visited);<br>
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {<br>
- if (Value *W = ConstantFoldConstantExpression(CE, TD, TLI))<br>
+ if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI))<br>
if (W != V)<br>
return findValueImpl(W, OffsetOk, Visited);<br>
}<br>
<br>
Modified: llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/IPO/ConstantMerge.cpp Thu Feb 20 18:06:31 2014<br>
@@ -51,7 +51,7 @@ namespace {<br>
// alignment to a concrete value.<br>
unsigned getAlignment(GlobalVariable *GV) const;<br>
<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
};<br>
}<br>
<br>
@@ -89,20 +89,20 @@ static bool IsBetterCanonical(const Glob<br>
}<br>
<br>
bool ConstantMerge::hasKnownAlignment(GlobalVariable *GV) const {<br>
- return TD || GV->getAlignment() != 0;<br>
+ return DL || GV->getAlignment() != 0;<br>
}<br>
<br>
unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {<br>
unsigned Align = GV->getAlignment();<br>
if (Align)<br>
return Align;<br>
- if (TD)<br>
- return TD->getPreferredAlignment(GV);<br>
+ if (DL)<br>
+ return DL->getPreferredAlignment(GV);<br>
return 0;<br>
}<br>
<br>
bool ConstantMerge::runOnModule(Module &M) {<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
<br>
// Find all the globals that are marked "used". These cannot be merged.<br>
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;<br>
<br>
Modified: llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp Thu Feb 20 18:06:31 2014<br>
@@ -84,7 +84,7 @@ namespace {<br>
const GlobalStatus &GS);<br>
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);<br>
<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
TargetLibraryInfo *TLI;<br>
};<br>
}<br>
@@ -266,7 +266,7 @@ static bool CleanupPointerRootUsers(Glob<br>
/// quick scan over the use list to clean up the easy and obvious cruft. This<br>
/// returns true if it made a change.<br>
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,<br>
- DataLayout *TD, TargetLibraryInfo *TLI) {<br>
+ DataLayout *DL, TargetLibraryInfo *TLI) {<br>
bool Changed = false;<br>
// Note that we need to use a weak value handle for the worklist items. When<br>
// we delete a constant array, we may also be holding pointer to one of its<br>
@@ -296,12 +296,12 @@ static bool CleanupConstantGlobalUsers(V<br>
Constant *SubInit = 0;<br>
if (Init)<br>
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);<br>
- Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI);<br>
+ Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);<br>
} else if ((CE->getOpcode() == Instruction::BitCast &&<br>
CE->getType()->isPointerTy()) ||<br>
CE->getOpcode() == Instruction::AddrSpaceCast) {<br>
// Pointer cast, delete any stores and memsets to the global.<br>
- Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI);<br>
+ Changed |= CleanupConstantGlobalUsers(CE, 0, DL, TLI);<br>
}<br>
<br>
if (CE->use_empty()) {<br>
@@ -315,7 +315,7 @@ static bool CleanupConstantGlobalUsers(V<br>
Constant *SubInit = 0;<br>
if (!isa<ConstantExpr>(GEP->getOperand(0))) {<br>
ConstantExpr *CE =<br>
- dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI));<br>
+ dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI));<br>
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)<br>
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);<br>
<br>
@@ -325,7 +325,7 @@ static bool CleanupConstantGlobalUsers(V<br>
if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())<br>
SubInit = Constant::getNullValue(GEP->getType()->getElementType());<br>
}<br>
- Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI);<br>
+ Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);<br>
<br>
if (GEP->use_empty()) {<br>
GEP->eraseFromParent();<br>
@@ -342,7 +342,7 @@ static bool CleanupConstantGlobalUsers(V<br>
// us, and if they are all dead, nuke them without remorse.<br>
if (isSafeToDestroyConstant(C)) {<br>
C->destroyConstant();<br>
- CleanupConstantGlobalUsers(V, Init, TD, TLI);<br>
+ CleanupConstantGlobalUsers(V, Init, DL, TLI);<br>
return true;<br>
}<br>
}<br>
@@ -467,7 +467,7 @@ static bool GlobalUsersSafeToSRA(GlobalV<br>
/// behavior of the program in a more fine-grained way. We have determined that<br>
/// this transformation is safe already. We return the first global variable we<br>
/// insert so that the caller can reprocess it.<br>
-static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {<br>
+static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {<br>
// Make sure this global only has simple uses that we can SRA.<br>
if (!GlobalUsersSafeToSRA(GV))<br>
return 0;<br>
@@ -482,11 +482,11 @@ static GlobalVariable *SRAGlobal(GlobalV<br>
// Get the alignment of the global, either explicit or target-specific.<br>
unsigned StartAlignment = GV->getAlignment();<br>
if (StartAlignment == 0)<br>
- StartAlignment = TD.getABITypeAlignment(GV->getType());<br>
+ StartAlignment = DL.getABITypeAlignment(GV->getType());<br>
<br>
if (StructType *STy = dyn_cast<StructType>(Ty)) {<br>
NewGlobals.reserve(STy->getNumElements());<br>
- const StructLayout &Layout = *TD.getStructLayout(STy);<br>
+ const StructLayout &Layout = *DL.getStructLayout(STy);<br>
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {<br>
Constant *In = Init->getAggregateElement(i);<br>
assert(In && "Couldn't get element of initializer?");<br>
@@ -503,7 +503,7 @@ static GlobalVariable *SRAGlobal(GlobalV<br>
// propagate info to each field.<br>
uint64_t FieldOffset = Layout.getElementOffset(i);<br>
unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);<br>
- if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))<br>
+ if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))<br>
NGV->setAlignment(NewAlign);<br>
}<br>
} else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {<br>
@@ -517,8 +517,8 @@ static GlobalVariable *SRAGlobal(GlobalV<br>
return 0; // It's not worth it.<br>
NewGlobals.reserve(NumElements);<br>
<br>
- uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());<br>
- unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());<br>
+ uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());<br>
+ unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());<br>
for (unsigned i = 0, e = NumElements; i != e; ++i) {<br>
Constant *In = Init->getAggregateElement(i);<br>
assert(In && "Couldn't get element of initializer?");<br>
@@ -743,7 +743,7 @@ static bool OptimizeAwayTrappingUsesOfVa<br>
/// if the loaded value is dynamically null, then we know that they cannot be<br>
/// reachable with a null optimize away the load.<br>
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,<br>
- DataLayout *TD,<br>
+ DataLayout *DL,<br>
TargetLibraryInfo *TLI) {<br>
bool Changed = false;<br>
<br>
@@ -792,7 +792,7 @@ static bool OptimizeAwayTrappingUsesOfLo<br>
Changed |= CleanupPointerRootUsers(GV, TLI);<br>
} else {<br>
Changed = true;<br>
- CleanupConstantGlobalUsers(GV, 0, TD, TLI);<br>
+ CleanupConstantGlobalUsers(GV, 0, DL, TLI);<br>
}<br>
if (GV->use_empty()) {<br>
DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");<br>
@@ -807,10 +807,10 @@ static bool OptimizeAwayTrappingUsesOfLo<br>
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the<br>
/// instructions that are foldable.<br>
static void ConstantPropUsersOf(Value *V,<br>
- DataLayout *TD, TargetLibraryInfo *TLI) {<br>
+ DataLayout *DL, TargetLibraryInfo *TLI) {<br>
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )<br>
if (Instruction *I = dyn_cast<Instruction>(*UI++))<br>
- if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {<br>
+ if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {<br>
I->replaceAllUsesWith(NewC);<br>
<br>
// Advance UI to the next non-I use to avoid invalidating it!<br>
@@ -830,7 +830,7 @@ static GlobalVariable *OptimizeGlobalAdd<br>
CallInst *CI,<br>
Type *AllocTy,<br>
ConstantInt *NElements,<br>
- DataLayout *TD,<br>
+ DataLayout *DL,<br>
TargetLibraryInfo *TLI) {<br>
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');<br>
<br>
@@ -949,9 +949,9 @@ static GlobalVariable *OptimizeGlobalAdd<br>
// To further other optimizations, loop over all users of NewGV and try to<br>
// constant prop them. This will promote GEP instructions with constant<br>
// indices into GEP constant-exprs, which will allow global-opt to hack on it.<br>
- ConstantPropUsersOf(NewGV, TD, TLI);<br>
+ ConstantPropUsersOf(NewGV, DL, TLI);<br>
if (RepValue != NewGV)<br>
- ConstantPropUsersOf(RepValue, TD, TLI);<br>
+ ConstantPropUsersOf(RepValue, DL, TLI);<br>
<br>
return NewGV;<br>
}<br>
@@ -1278,7 +1278,7 @@ static void RewriteUsesOfLoadForHeapSRoA<br>
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break<br>
/// it up into multiple allocations of arrays of the fields.<br>
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,<br>
- Value *NElems, DataLayout *TD,<br>
+ Value *NElems, DataLayout *DL,<br>
const TargetLibraryInfo *TLI) {<br>
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');<br>
Type *MAT = getMallocAllocatedType(CI, TLI);<br>
@@ -1307,10 +1307,10 @@ static GlobalVariable *PerformHeapAllocS<br>
GV->getThreadLocalMode());<br>
FieldGlobals.push_back(NGV);<br>
<br>
- unsigned TypeSize = TD->getTypeAllocSize(FieldTy);<br>
+ unsigned TypeSize = DL->getTypeAllocSize(FieldTy);<br>
if (StructType *ST = dyn_cast<StructType>(FieldTy))<br>
- TypeSize = TD->getStructLayout(ST)->getSizeInBytes();<br>
- Type *IntPtrTy = TD->getIntPtrType(CI->getType());<br>
+ TypeSize = DL->getStructLayout(ST)->getSizeInBytes();<br>
+ Type *IntPtrTy = DL->getIntPtrType(CI->getType());<br>
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,<br>
ConstantInt::get(IntPtrTy, TypeSize),<br>
NElems, 0,<br>
@@ -1470,9 +1470,9 @@ static bool TryToOptimizeStoreOfMallocTo<br>
Type *AllocTy,<br>
AtomicOrdering Ordering,<br>
Module::global_iterator &GVI,<br>
- DataLayout *TD,<br>
+ DataLayout *DL,<br>
TargetLibraryInfo *TLI) {<br>
- if (!TD)<br>
+ if (!DL)<br>
return false;<br>
<br>
// If this is a malloc of an abstract type, don't touch it.<br>
@@ -1502,7 +1502,7 @@ static bool TryToOptimizeStoreOfMallocTo<br>
// This eliminates dynamic allocation, avoids an indirection accessing the<br>
// data, and exposes the resultant global to further GlobalOpt.<br>
// We cannot optimize the malloc if we cannot determine malloc array size.<br>
- Value *NElems = getMallocArraySize(CI, TD, TLI, true);<br>
+ Value *NElems = getMallocArraySize(CI, DL, TLI, true);<br>
if (!NElems)<br>
return false;<br>
<br>
@@ -1510,8 +1510,8 @@ static bool TryToOptimizeStoreOfMallocTo<br>
// Restrict this transformation to only working on small allocations<br>
// (2048 bytes currently), as we don't want to introduce a 16M global or<br>
// something.<br>
- if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {<br>
- GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI);<br>
+ if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) {<br>
+ GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);<br>
return true;<br>
}<br>
<br>
@@ -1540,8 +1540,8 @@ static bool TryToOptimizeStoreOfMallocTo<br>
// If this is a fixed size array, transform the Malloc to be an alloc of<br>
// structs. malloc [100 x struct],1 -> malloc struct, 100<br>
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {<br>
- Type *IntPtrTy = TD->getIntPtrType(CI->getType());<br>
- unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();<br>
+ Type *IntPtrTy = DL->getIntPtrType(CI->getType());<br>
+ unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes();<br>
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);<br>
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());<br>
Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,<br>
@@ -1556,8 +1556,8 @@ static bool TryToOptimizeStoreOfMallocTo<br>
CI = cast<CallInst>(Malloc);<br>
}<br>
<br>
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true),<br>
- TD, TLI);<br>
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),<br>
+ DL, TLI);<br>
return true;<br>
}<br>
<br>
@@ -1569,7 +1569,7 @@ static bool TryToOptimizeStoreOfMallocTo<br>
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,<br>
AtomicOrdering Ordering,<br>
Module::global_iterator &GVI,<br>
- DataLayout *TD, TargetLibraryInfo *TLI) {<br>
+ DataLayout *DL, TargetLibraryInfo *TLI) {<br>
// Ignore no-op GEPs and bitcasts.<br>
StoredOnceVal = StoredOnceVal->stripPointerCasts();<br>
<br>
@@ -1584,13 +1584,13 @@ static bool OptimizeOnceStoredGlobal(Glo<br>
SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());<br>
<br>
// Optimize away any trapping uses of the loaded value.<br>
- if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))<br>
+ if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))<br>
return true;<br>
} else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {<br>
Type *MallocType = getMallocAllocatedType(CI, TLI);<br>
if (MallocType &&<br>
TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,<br>
- TD, TLI))<br>
+ DL, TLI))<br>
return true;<br>
}<br>
}<br>
@@ -1784,7 +1784,7 @@ bool GlobalOpt::ProcessInternalGlobal(Gl<br>
} else {<br>
// Delete any stores we can find to the global. We may not be able to<br>
// make it completely dead though.<br>
- Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);<br>
+ Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);<br>
}<br>
<br>
// If the global is dead now, delete it.<br>
@@ -1800,7 +1800,7 @@ bool GlobalOpt::ProcessInternalGlobal(Gl<br>
GV->setConstant(true);<br>
<br>
// Clean up any obviously simplifiable users now.<br>
- CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);<br>
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);<br>
<br>
// If the global is dead now, just nuke it.<br>
if (GV->use_empty()) {<br>
@@ -1813,8 +1813,8 @@ bool GlobalOpt::ProcessInternalGlobal(Gl<br>
++NumMarked;<br>
return true;<br>
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {<br>
- if (DataLayout *TD = getAnalysisIfAvailable<DataLayout>())<br>
- if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {<br>
+ if (DataLayout *DL = getAnalysisIfAvailable<DataLayout>())<br>
+ if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *DL)) {<br>
GVI = FirstNewGV; // Don't skip the newly produced globals!<br>
return true;<br>
}<br>
@@ -1829,7 +1829,7 @@ bool GlobalOpt::ProcessInternalGlobal(Gl<br>
GV->setInitializer(SOVConstant);<br>
<br>
// Clean up any obviously simplifiable users now.<br>
- CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);<br>
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);<br>
<br>
if (GV->use_empty()) {<br>
DEBUG(dbgs() << " *** Substituting initializer allowed us to "<br>
@@ -1846,7 +1846,7 @@ bool GlobalOpt::ProcessInternalGlobal(Gl<br>
// Try to optimize globals based on the knowledge that only one value<br>
// (besides its initializer) is ever stored to the global.<br>
if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,<br>
- TD, TLI))<br>
+ DL, TLI))<br>
return true;<br>
<br>
// Otherwise, if the global was not a boolean, we can shrink it to be a<br>
@@ -1947,7 +1947,7 @@ bool GlobalOpt::OptimizeGlobalVars(Modul<br>
// Simplify the initializer.<br>
if (GV->hasInitializer())<br>
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {<br>
- Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);<br>
+ Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);<br>
if (New && New != CE)<br>
GV->setInitializer(New);<br>
}<br>
@@ -2070,7 +2070,7 @@ static GlobalVariable *InstallGlobalCtor<br>
static inline bool<br>
isSimpleEnoughValueToCommit(Constant *C,<br>
SmallPtrSet<Constant*, 8> &SimpleConstants,<br>
- const DataLayout *TD);<br>
+ const DataLayout *DL);<br>
<br>
<br>
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be<br>
@@ -2083,7 +2083,7 @@ isSimpleEnoughValueToCommit(Constant *C,<br>
/// time.<br>
static bool isSimpleEnoughValueToCommitHelper(Constant *C,<br>
SmallPtrSet<Constant*, 8> &SimpleConstants,<br>
- const DataLayout *TD) {<br>
+ const DataLayout *DL) {<br>
// Simple integer, undef, constant aggregate zero, global addresses, etc are<br>
// all supported.<br>
if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||<br>
@@ -2095,7 +2095,7 @@ static bool isSimpleEnoughValueToCommitH<br>
isa<ConstantVector>(C)) {<br>
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {<br>
Constant *Op = cast<Constant>(C->getOperand(i));<br>
- if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD))<br>
+ if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL))<br>
return false;<br>
}<br>
return true;<br>
@@ -2108,29 +2108,29 @@ static bool isSimpleEnoughValueToCommitH<br>
switch (CE->getOpcode()) {<br>
case Instruction::BitCast:<br>
// Bitcast is fine if the casted value is fine.<br>
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);<br>
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);<br>
<br>
case Instruction::IntToPtr:<br>
case Instruction::PtrToInt:<br>
// int <=> ptr is fine if the int type is the same size as the<br>
// pointer type.<br>
- if (!TD || TD->getTypeSizeInBits(CE->getType()) !=<br>
- TD->getTypeSizeInBits(CE->getOperand(0)->getType()))<br>
+ if (!DL || DL->getTypeSizeInBits(CE->getType()) !=<br>
+ DL->getTypeSizeInBits(CE->getOperand(0)->getType()))<br>
return false;<br>
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);<br>
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);<br>
<br>
// GEP is fine if it is simple + constant offset.<br>
case Instruction::GetElementPtr:<br>
for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)<br>
if (!isa<ConstantInt>(CE->getOperand(i)))<br>
return false;<br>
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);<br>
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);<br>
<br>
case Instruction::Add:<br>
// We allow simple+cst.<br>
if (!isa<ConstantInt>(CE->getOperand(1)))<br>
return false;<br>
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);<br>
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);<br>
}<br>
return false;<br>
}<br>
@@ -2138,11 +2138,11 @@ static bool isSimpleEnoughValueToCommitH<br>
static inline bool<br>
isSimpleEnoughValueToCommit(Constant *C,<br>
SmallPtrSet<Constant*, 8> &SimpleConstants,<br>
- const DataLayout *TD) {<br>
+ const DataLayout *DL) {<br>
// If we already checked this constant, we win.<br>
if (!SimpleConstants.insert(C)) return true;<br>
// Check the constant.<br>
- return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD);<br>
+ return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);<br>
}<br>
<br>
<br>
@@ -2269,8 +2269,8 @@ namespace {<br>
/// Once an evaluation call fails, the evaluation object should not be reused.<br>
class Evaluator {<br>
public:<br>
- Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI)<br>
- : TD(TD), TLI(TLI) {<br>
+ Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI)<br>
+ : DL(DL), TLI(TLI) {<br>
ValueStack.push_back(new DenseMap<Value*, Constant*>);<br>
}<br>
<br>
@@ -2350,7 +2350,7 @@ private:<br>
/// simple enough to live in a static initializer of a global.<br>
SmallPtrSet<Constant*, 8> SimpleConstants;<br>
<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
const TargetLibraryInfo *TLI;<br>
};<br>
<br>
@@ -2403,7 +2403,7 @@ bool Evaluator::EvaluateBlock(BasicBlock<br>
Constant *Ptr = getVal(SI->getOperand(1));<br>
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {<br>
DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);<br>
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);<br>
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);<br>
DEBUG(dbgs() << "; To: " << *Ptr << "\n");<br>
}<br>
if (!isSimpleEnoughPointerToCommit(Ptr)) {<br>
@@ -2416,7 +2416,7 @@ bool Evaluator::EvaluateBlock(BasicBlock<br>
<br>
// If this might be too difficult for the backend to handle (e.g. the addr<br>
// of one global variable divided by another) then we can't commit it.<br>
- if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD)) {<br>
+ if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {<br>
DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val<br>
<< "\n");<br>
return false;<br>
@@ -2448,7 +2448,7 @@ bool Evaluator::EvaluateBlock(BasicBlock<br>
<br>
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);<br>
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))<br>
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);<br>
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);<br>
<br>
// If we can't improve the situation by introspecting NewTy,<br>
// we have to give up.<br>
@@ -2512,7 +2512,7 @@ bool Evaluator::EvaluateBlock(BasicBlock<br>
<br>
Constant *Ptr = getVal(LI->getOperand(0));<br>
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {<br>
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);<br>
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);<br>
DEBUG(dbgs() << "Found a constant pointer expression, constant "<br>
"folding: " << *Ptr << "\n");<br>
}<br>
@@ -2589,9 +2589,9 @@ bool Evaluator::EvaluateBlock(BasicBlock<br>
Value *Ptr = PtrArg->stripPointerCasts();<br>
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {<br>
Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();<br>
- if (TD && !Size->isAllOnesValue() &&<br>
+ if (DL && !Size->isAllOnesValue() &&<br>
Size->getValue().getLimitedValue() >=<br>
- TD->getTypeStoreSize(ElemTy)) {<br>
+ DL->getTypeStoreSize(ElemTy)) {<br>
Invariants.insert(GV);<br>
DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV<br>
<< "\n");<br>
@@ -2697,7 +2697,7 @@ bool Evaluator::EvaluateBlock(BasicBlock<br>
<br>
if (!CurInst->use_empty()) {<br>
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))<br>
- InstResult = ConstantFoldConstantExpression(CE, TD, TLI);<br>
+ InstResult = ConstantFoldConstantExpression(CE, DL, TLI);<br>
<br>
setVal(CurInst, InstResult);<br>
}<br>
@@ -2780,10 +2780,10 @@ bool Evaluator::EvaluateFunction(Functio<br>
<br>
/// EvaluateStaticConstructor - Evaluate static constructors in the function, if<br>
/// we can. Return true if we can, false otherwise.<br>
-static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,<br>
+static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI) {<br>
// Call the function.<br>
- Evaluator Eval(TD, TLI);<br>
+ Evaluator Eval(DL, TLI);<br>
Constant *RetValDummy;<br>
bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,<br>
SmallVector<Constant*, 0>());<br>
@@ -2831,7 +2831,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(<br>
if (F->empty()) continue;<br>
<br>
// If we can evaluate the ctor at compile time, do.<br>
- if (EvaluateStaticConstructor(F, TD, TLI)) {<br>
+ if (EvaluateStaticConstructor(F, DL, TLI)) {<br>
Ctors.erase(Ctors.begin()+i);<br>
MadeChange = true;<br>
--i;<br>
@@ -3159,7 +3159,7 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDt<br>
bool GlobalOpt::runOnModule(Module &M) {<br>
bool Changed = false;<br>
<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
<br>
// Try to find the llvm.globalctors list.<br>
<br>
Modified: llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/IPO/MergeFunctions.cpp Thu Feb 20 18:06:31 2014<br>
@@ -108,12 +108,12 @@ public:<br>
static const ComparableFunction TombstoneKey;<br>
static DataLayout * const LookupOnly;<br>
<br>
- ComparableFunction(Function *Func, DataLayout *TD)<br>
- : Func(Func), Hash(profileFunction(Func)), TD(TD) {}<br>
+ ComparableFunction(Function *Func, DataLayout *DL)<br>
+ : Func(Func), Hash(profileFunction(Func)), DL(DL) {}<br>
<br>
Function *getFunc() const { return Func; }<br>
unsigned getHash() const { return Hash; }<br>
- DataLayout *getTD() const { return TD; }<br>
+ DataLayout *getDataLayout() const { return DL; }<br>
<br>
// Drops AssertingVH reference to the function. Outside of debug mode, this<br>
// does nothing.<br>
@@ -125,11 +125,11 @@ public:<br>
<br>
private:<br>
explicit ComparableFunction(unsigned Hash)<br>
- : Func(NULL), Hash(Hash), TD(NULL) {}<br>
+ : Func(NULL), Hash(Hash), DL(NULL) {}<br>
<br>
AssertingVH<Function> Func;<br>
unsigned Hash;<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
};<br>
<br>
const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);<br>
@@ -164,9 +164,9 @@ namespace {<br>
/// side of claiming that two functions are different).<br>
class FunctionComparator {<br>
public:<br>
- FunctionComparator(const DataLayout *TD, const Function *F1,<br>
+ FunctionComparator(const DataLayout *DL, const Function *F1,<br>
const Function *F2)<br>
- : F1(F1), F2(F2), TD(TD) {}<br>
+ : F1(F1), F2(F2), DL(DL) {}<br>
<br>
/// Test whether the two functions have equivalent behaviour.<br>
bool compare();<br>
@@ -199,7 +199,7 @@ private:<br>
// The two functions undergoing comparison.<br>
const Function *F1, *F2;<br>
<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
<br>
DenseMap<const Value *, const Value *> id_map;<br>
DenseSet<const Value *> seen_values;<br>
@@ -214,9 +214,9 @@ bool FunctionComparator::isEquivalentTyp<br>
PointerType *PTy1 = dyn_cast<PointerType>(Ty1);<br>
PointerType *PTy2 = dyn_cast<PointerType>(Ty2);<br>
<br>
- if (TD) {<br>
- if (PTy1 && PTy1->getAddressSpace() == 0) Ty1 = TD->getIntPtrType(Ty1);<br>
- if (PTy2 && PTy2->getAddressSpace() == 0) Ty2 = TD->getIntPtrType(Ty2);<br>
+ if (DL) {<br>
+ if (PTy1 && PTy1->getAddressSpace() == 0) Ty1 = DL->getIntPtrType(Ty1);<br>
+ if (PTy2 && PTy2->getAddressSpace() == 0) Ty2 = DL->getIntPtrType(Ty2);<br>
}<br>
<br>
if (Ty1 == Ty2)<br>
@@ -359,13 +359,13 @@ bool FunctionComparator::isEquivalentGEP<br>
if (AS != GEP2->getPointerAddressSpace())<br>
return false;<br>
<br>
- if (TD) {<br>
+ if (DL) {<br>
// When we have target data, we can reduce the GEP down to the value in bytes<br>
// added to the address.<br>
- unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 1;<br>
+ unsigned BitWidth = DL ? DL->getPointerSizeInBits(AS) : 1;<br>
APInt Offset1(BitWidth, 0), Offset2(BitWidth, 0);<br>
- if (GEP1->accumulateConstantOffset(*TD, Offset1) &&<br>
- GEP2->accumulateConstantOffset(*TD, Offset2)) {<br>
+ if (GEP1->accumulateConstantOffset(*DL, Offset1) &&<br>
+ GEP2->accumulateConstantOffset(*DL, Offset2)) {<br>
return Offset1 == Offset2;<br>
}<br>
}<br>
@@ -606,7 +606,7 @@ private:<br>
FnSetType FnSet;<br>
<br>
/// DataLayout for more accurate GEP comparisons. May be NULL.<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
<br>
/// Whether or not the target supports global aliases.<br>
bool HasGlobalAliases;<br>
@@ -623,7 +623,7 @@ ModulePass *llvm::createMergeFunctionsPa<br>
<br>
bool MergeFunctions::runOnModule(Module &M) {<br>
bool Changed = false;<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
<br>
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {<br>
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())<br>
@@ -646,7 +646,7 @@ bool MergeFunctions::runOnModule(Module<br>
Function *F = cast<Function>(*I);<br>
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&<br>
!F->mayBeOverridden()) {<br>
- ComparableFunction CF = ComparableFunction(F, TD);<br>
+ ComparableFunction CF = ComparableFunction(F, DL);<br>
Changed |= insert(CF);<br>
}<br>
}<br>
@@ -661,7 +661,7 @@ bool MergeFunctions::runOnModule(Module<br>
Function *F = cast<Function>(*I);<br>
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&<br>
F->mayBeOverridden()) {<br>
- ComparableFunction CF = ComparableFunction(F, TD);<br>
+ ComparableFunction CF = ComparableFunction(F, DL);<br>
Changed |= insert(CF);<br>
}<br>
}<br>
@@ -682,14 +682,14 @@ bool DenseMapInfo<ComparableFunction>::i<br>
return false;<br>
<br>
// One of these is a special "underlying pointer comparison only" object.<br>
- if (LHS.getTD() == ComparableFunction::LookupOnly ||<br>
- RHS.getTD() == ComparableFunction::LookupOnly)<br>
+ if (LHS.getDataLayout() == ComparableFunction::LookupOnly ||<br>
+ RHS.getDataLayout() == ComparableFunction::LookupOnly)<br>
return false;<br>
<br>
- assert(LHS.getTD() == RHS.getTD() &&<br>
+ assert(LHS.getDataLayout() == RHS.getDataLayout() &&<br>
"Comparing functions for different targets");<br>
<br>
- return FunctionComparator(LHS.getTD(), LHS.getFunc(),<br>
+ return FunctionComparator(LHS.getDataLayout(), LHS.getFunc(),<br>
RHS.getFunc()).compare();<br>
}<br>
<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombine.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombine.h?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombine.h?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombine.h (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombine.h Thu Feb 20 18:06:31 2014<br>
@@ -81,7 +81,7 @@ public:<br>
class LLVM_LIBRARY_VISIBILITY InstCombiner<br>
: public FunctionPass,<br>
public InstVisitor<InstCombiner, Instruction*> {<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
TargetLibraryInfo *TLI;<br>
bool MadeIRChange;<br>
LibCallSimplifier *Simplifier;<br>
@@ -96,7 +96,7 @@ public:<br>
BuilderTy *Builder;<br>
<br>
static char ID; // Pass identification, replacement for typeid<br>
- InstCombiner() : FunctionPass(ID), TD(0), Builder(0) {<br>
+ InstCombiner() : FunctionPass(ID), DL(0), Builder(0) {<br>
MinimizeSize = false;<br>
initializeInstCombinerPass(*PassRegistry::getPassRegistry());<br>
}<br>
@@ -108,7 +108,7 @@ public:<br>
<br>
virtual void getAnalysisUsage(AnalysisUsage &AU) const;<br>
<br>
- DataLayout *getDataLayout() const { return TD; }<br>
+ DataLayout *getDataLayout() const { return DL; }<br>
<br>
TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }<br>
<br>
@@ -234,7 +234,7 @@ private:<br>
Type *Ty);<br>
<br>
Instruction *visitCallSite(CallSite CS);<br>
- Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *TD);<br>
+ Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *DL);<br>
bool transformConstExprCastCall(CallSite CS);<br>
Instruction *transformCallThroughTrampoline(CallSite CS,<br>
IntrinsicInst *Tramp);<br>
@@ -311,15 +311,15 @@ public:<br>
<br>
void ComputeMaskedBits(Value *V, APInt &KnownZero,<br>
APInt &KnownOne, unsigned Depth = 0) const {<br>
- return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);<br>
+ return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, DL, Depth);<br>
}<br>
<br>
bool MaskedValueIsZero(Value *V, const APInt &Mask,<br>
unsigned Depth = 0) const {<br>
- return llvm::MaskedValueIsZero(V, Mask, TD, Depth);<br>
+ return llvm::MaskedValueIsZero(V, Mask, DL, Depth);<br>
}<br>
unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {<br>
- return llvm::ComputeNumSignBits(Op, TD, Depth);<br>
+ return llvm::ComputeNumSignBits(Op, DL, Depth);<br>
}<br>
<br>
private:<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp Thu Feb 20 18:06:31 2014<br>
@@ -919,7 +919,7 @@ Instruction *InstCombiner::visitAdd(Bina<br>
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);<br>
<br>
if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),<br>
- I.hasNoUnsignedWrap(), TD))<br>
+ I.hasNoUnsignedWrap(), DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// (A*B)+(A*C) -> A*(B+C) etc<br>
@@ -1193,7 +1193,7 @@ Instruction *InstCombiner::visitFAdd(Bin<br>
bool Changed = SimplifyAssociativeOrCommutative(I);<br>
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), TD))<br>
+ if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
if (isa<Constant>(RHS)) {<br>
@@ -1300,7 +1300,7 @@ Instruction *InstCombiner::visitFAdd(Bin<br>
///<br>
Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,<br>
Type *Ty) {<br>
- assert(TD && "Must have target data info for this");<br>
+ assert(DL && "Must have target data info for this");<br>
<br>
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize<br>
// this.<br>
@@ -1369,7 +1369,7 @@ Instruction *InstCombiner::visitSub(Bina<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),<br>
- I.hasNoUnsignedWrap(), TD))<br>
+ I.hasNoUnsignedWrap(), DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// (A*B)-(A*C) -> A*(B-C) etc<br>
@@ -1518,7 +1518,7 @@ Instruction *InstCombiner::visitSub(Bina<br>
<br>
// Optimize pointer differences into the same array into a size. Consider:<br>
// &A[10] - &A[0]: we should compile this to "10".<br>
- if (TD) {<br>
+ if (DL) {<br>
Value *LHSOp, *RHSOp;<br>
if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&<br>
match(Op1, m_PtrToInt(m_Value(RHSOp))))<br>
@@ -1538,7 +1538,7 @@ Instruction *InstCombiner::visitSub(Bina<br>
Instruction *InstCombiner::visitFSub(BinaryOperator &I) {<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), TD))<br>
+ if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
if (isa<Constant>(Op0))<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp Thu Feb 20 18:06:31 2014<br>
@@ -1104,7 +1104,7 @@ Instruction *InstCombiner::visitAnd(Bina<br>
bool Changed = SimplifyAssociativeOrCommutative(I);<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyAndInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifyAndInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// (A|B)&(A|C) -> A|(B&C) etc<br>
@@ -1905,7 +1905,7 @@ Instruction *InstCombiner::visitOr(Binar<br>
bool Changed = SimplifyAssociativeOrCommutative(I);<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyOrInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifyOrInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// (A&B)|(A&C) -> A&(B|C) etc<br>
@@ -2237,7 +2237,7 @@ Instruction *InstCombiner::visitXor(Bina<br>
bool Changed = SimplifyAssociativeOrCommutative(I);<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyXorInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifyXorInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// (A&B)^(A&C) -> A&(B^C) etc<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp Thu Feb 20 18:06:31 2014<br>
@@ -56,8 +56,8 @@ static Type *reduceToSingleValueType(Typ<br>
}<br>
<br>
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {<br>
- unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);<br>
- unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);<br>
+ unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL);<br>
+ unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL);<br>
unsigned MinAlign = std::min(DstAlign, SrcAlign);<br>
unsigned CopyAlign = MI->getAlignment();<br>
<br>
@@ -103,7 +103,7 @@ Instruction *InstCombiner::SimplifyMemTr<br>
if (StrippedDest != MI->getArgOperand(0)) {<br>
Type *SrcETy = cast<PointerType>(StrippedDest->getType())<br>
->getElementType();<br>
- if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {<br>
+ if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {<br>
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip<br>
// down through these levels if so.<br>
SrcETy = reduceToSingleValueType(SrcETy);<br>
@@ -152,7 +152,7 @@ Instruction *InstCombiner::SimplifyMemTr<br>
}<br>
<br>
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {<br>
- unsigned Alignment = getKnownAlignment(MI->getDest(), TD);<br>
+ unsigned Alignment = getKnownAlignment(MI->getDest(), DL);<br>
if (MI->getAlignment() < Alignment) {<br>
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),<br>
Alignment, false));<br>
@@ -274,7 +274,7 @@ Instruction *InstCombiner::visitCallInst<br>
default: break;<br>
case Intrinsic::objectsize: {<br>
uint64_t Size;<br>
- if (getObjectSize(II->getArgOperand(0), Size, TD, TLI))<br>
+ if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))<br>
return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));<br>
return 0;<br>
}<br>
@@ -504,7 +504,7 @@ Instruction *InstCombiner::visitCallInst<br>
case Intrinsic::ppc_altivec_lvx:<br>
case Intrinsic::ppc_altivec_lvxl:<br>
// Turn PPC lvx -> load if the pointer is known aligned.<br>
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {<br>
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {<br>
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),<br>
PointerType::getUnqual(II->getType()));<br>
return new LoadInst(Ptr);<br>
@@ -513,7 +513,7 @@ Instruction *InstCombiner::visitCallInst<br>
case Intrinsic::ppc_altivec_stvx:<br>
case Intrinsic::ppc_altivec_stvxl:<br>
// Turn stvx -> store if the pointer is known aligned.<br>
- if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {<br>
+ if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL) >= 16) {<br>
Type *OpPtrTy =<br>
PointerType::getUnqual(II->getArgOperand(0)->getType());<br>
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);<br>
@@ -524,7 +524,7 @@ Instruction *InstCombiner::visitCallInst<br>
case Intrinsic::x86_sse2_storeu_pd:<br>
case Intrinsic::x86_sse2_storeu_dq:<br>
// Turn X86 storeu -> store if the pointer is known aligned.<br>
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {<br>
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {<br>
Type *OpPtrTy =<br>
PointerType::getUnqual(II->getArgOperand(1)->getType());<br>
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);<br>
@@ -641,7 +641,7 @@ Instruction *InstCombiner::visitCallInst<br>
case Intrinsic::arm_neon_vst2lane:<br>
case Intrinsic::arm_neon_vst3lane:<br>
case Intrinsic::arm_neon_vst4lane: {<br>
- unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);<br>
+ unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL);<br>
unsigned AlignArg = II->getNumArgOperands() - 1;<br>
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));<br>
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {<br>
@@ -747,7 +747,7 @@ Instruction *InstCombiner::visitInvokeIn<br>
/// passed through the varargs area, we can eliminate the use of the cast.<br>
static bool isSafeToEliminateVarargsCast(const CallSite CS,<br>
const CastInst * const CI,<br>
- const DataLayout * const TD,<br>
+ const DataLayout * const DL,<br>
const int ix) {<br>
if (!CI->isLosslessCast())<br>
return false;<br>
@@ -763,7 +763,7 @@ static bool isSafeToEliminateVarargsCast<br>
Type* DstTy = cast<PointerType>(CI->getType())->getElementType();<br>
if (!SrcTy->isSized() || !DstTy->isSized())<br>
return false;<br>
- if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))<br>
+ if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))<br>
return false;<br>
return true;<br>
}<br>
@@ -772,7 +772,7 @@ static bool isSafeToEliminateVarargsCast<br>
// Currently we're only working with the checking functions, memcpy_chk,<br>
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,<br>
// strcat_chk and strncat_chk.<br>
-Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *TD) {<br>
+Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {<br>
if (CI->getCalledFunction() == 0) return 0;<br>
<br>
if (Value *With = Simplifier->optimizeCall(CI)) {<br>
@@ -934,7 +934,7 @@ Instruction *InstCombiner::visitCallSite<br>
for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),<br>
E = CS.arg_end(); I != E; ++I, ++ix) {<br>
CastInst *CI = dyn_cast<CastInst>(*I);<br>
- if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {<br>
+ if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {<br>
*I = CI->getOperand(0);<br>
Changed = true;<br>
}<br>
@@ -951,7 +951,7 @@ Instruction *InstCombiner::visitCallSite<br>
// this. None of these calls are seen as possibly dead so go ahead and<br>
// delete the instruction now.<br>
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {<br>
- Instruction *I = tryOptimizeCall(CI, TD);<br>
+ Instruction *I = tryOptimizeCall(CI, DL);<br>
// If we changed something return the result, etc. Otherwise let<br>
// the fallthrough check.<br>
if (I) return EraseInstFromFunction(*I);<br>
@@ -1043,12 +1043,12 @@ bool InstCombiner::transformConstExprCas<br>
CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,<br>
Attribute::ByVal)) {<br>
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);<br>
- if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)<br>
+ if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || DL == 0)<br>
return false;<br>
<br>
Type *CurElTy = ActTy->getPointerElementType();<br>
- if (TD->getTypeAllocSize(CurElTy) !=<br>
- TD->getTypeAllocSize(ParamPTy->getElementType()))<br>
+ if (DL->getTypeAllocSize(CurElTy) !=<br>
+ DL->getTypeAllocSize(ParamPTy->getElementType()))<br>
return false;<br>
}<br>
}<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp Thu Feb 20 18:06:31 2014<br>
@@ -79,7 +79,7 @@ static Value *DecomposeSimpleLinearExpr(<br>
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,<br>
AllocaInst &AI) {<br>
// This requires DataLayout to get the alloca alignment and size information.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
PointerType *PTy = cast<PointerType>(CI.getType());<br>
<br>
@@ -91,8 +91,8 @@ Instruction *InstCombiner::PromoteCastOf<br>
Type *CastElTy = PTy->getElementType();<br>
if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;<br>
<br>
- unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);<br>
- unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);<br>
+ unsigned AllocElTyAlign = DL->getABITypeAlignment(AllocElTy);<br>
+ unsigned CastElTyAlign = DL->getABITypeAlignment(CastElTy);<br>
if (CastElTyAlign < AllocElTyAlign) return 0;<br>
<br>
// If the allocation has multiple uses, only promote it if we are strictly<br>
@@ -100,14 +100,14 @@ Instruction *InstCombiner::PromoteCastOf<br>
// same, we open the door to infinite loops of various kinds.<br>
if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0;<br>
<br>
- uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);<br>
- uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);<br>
+ uint64_t AllocElTySize = DL->getTypeAllocSize(AllocElTy);<br>
+ uint64_t CastElTySize = DL->getTypeAllocSize(CastElTy);<br>
if (CastElTySize == 0 || AllocElTySize == 0) return 0;<br>
<br>
// If the allocation has multiple uses, only promote it if we're not<br>
// shrinking the amount of memory being allocated.<br>
- uint64_t AllocElTyStoreSize = TD->getTypeStoreSize(AllocElTy);<br>
- uint64_t CastElTyStoreSize = TD->getTypeStoreSize(CastElTy);<br>
+ uint64_t AllocElTyStoreSize = DL->getTypeStoreSize(AllocElTy);<br>
+ uint64_t CastElTyStoreSize = DL->getTypeStoreSize(CastElTy);<br>
if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return 0;<br>
<br>
// See if we can satisfy the modulus by pulling a scale out of the array<br>
@@ -161,9 +161,9 @@ Value *InstCombiner::EvaluateInDifferent<br>
bool isSigned) {<br>
if (Constant *C = dyn_cast<Constant>(V)) {<br>
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);<br>
- // If we got a constantexpr back, try to simplify it with TD info.<br>
+ // If we got a constantexpr back, try to simplify it with DL info.<br>
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))<br>
- C = ConstantFoldConstantExpression(CE, TD, TLI);<br>
+ C = ConstantFoldConstantExpression(CE, DL, TLI);<br>
return C;<br>
}<br>
<br>
@@ -235,7 +235,7 @@ isEliminableCastPair(<br>
const CastInst *CI, ///< The first cast instruction<br>
unsigned opcode, ///< The opcode of the second cast instruction<br>
Type *DstTy, ///< The target type for the second cast instruction<br>
- DataLayout *TD ///< The target data for pointer size<br>
+ DataLayout *DL ///< The target data for pointer size<br>
) {<br>
<br>
Type *SrcTy = CI->getOperand(0)->getType(); // A from above<br>
@@ -244,12 +244,12 @@ isEliminableCastPair(<br>
// Get the opcodes of the two Cast instructions<br>
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());<br>
Instruction::CastOps secondOp = Instruction::CastOps(opcode);<br>
- Type *SrcIntPtrTy = TD && SrcTy->isPtrOrPtrVectorTy() ?<br>
- TD->getIntPtrType(SrcTy) : 0;<br>
- Type *MidIntPtrTy = TD && MidTy->isPtrOrPtrVectorTy() ?<br>
- TD->getIntPtrType(MidTy) : 0;<br>
- Type *DstIntPtrTy = TD && DstTy->isPtrOrPtrVectorTy() ?<br>
- TD->getIntPtrType(DstTy) : 0;<br>
+ Type *SrcIntPtrTy = DL && SrcTy->isPtrOrPtrVectorTy() ?<br>
+ DL->getIntPtrType(SrcTy) : 0;<br>
+ Type *MidIntPtrTy = DL && MidTy->isPtrOrPtrVectorTy() ?<br>
+ DL->getIntPtrType(MidTy) : 0;<br>
+ Type *DstIntPtrTy = DL && DstTy->isPtrOrPtrVectorTy() ?<br>
+ DL->getIntPtrType(DstTy) : 0;<br>
unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,<br>
DstTy, SrcIntPtrTy, MidIntPtrTy,<br>
DstIntPtrTy);<br>
@@ -275,7 +275,7 @@ bool InstCombiner::ShouldOptimizeCast(In<br>
// If this is another cast that can be eliminated, we prefer to have it<br>
// eliminated.<br>
if (const CastInst *CI = dyn_cast<CastInst>(V))<br>
- if (isEliminableCastPair(CI, opc, Ty, TD))<br>
+ if (isEliminableCastPair(CI, opc, Ty, DL))<br>
return false;<br>
<br>
// If this is a vector sext from a compare, then we don't want to break the<br>
@@ -295,7 +295,7 @@ Instruction *InstCombiner::commonCastTra<br>
// eliminate it now.<br>
if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast<br>
if (Instruction::CastOps opc =<br>
- isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {<br>
+ isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), DL)) {<br>
// The first cast (CSrc) is eliminable so we need to fix up or replace<br>
// the second cast (CI). CSrc will then have a good chance of being dead.<br>
return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());<br>
@@ -1405,11 +1405,11 @@ Instruction *InstCombiner::visitIntToPtr<br>
// trunc or zext to the intptr_t type, then inttoptr of it. This allows the<br>
// cast to be exposed to other transforms.<br>
<br>
- if (TD) {<br>
+ if (DL) {<br>
unsigned AS = CI.getAddressSpace();<br>
if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=<br>
- TD->getPointerSizeInBits(AS)) {<br>
- Type *Ty = TD->getIntPtrType(CI.getContext(), AS);<br>
+ DL->getPointerSizeInBits(AS)) {<br>
+ Type *Ty = DL->getIntPtrType(CI.getContext(), AS);<br>
if (CI.getType()->isVectorTy()) // Handle vectors of pointers.<br>
Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());<br>
<br>
@@ -1440,7 +1440,7 @@ Instruction *InstCombiner::commonPointer<br>
return &CI;<br>
}<br>
<br>
- if (!TD)<br>
+ if (!DL)<br>
return commonCastTransforms(CI);<br>
<br>
// If the GEP has a single use, and the base pointer is a bitcast, and the<br>
@@ -1448,12 +1448,12 @@ Instruction *InstCombiner::commonPointer<br>
// instructions into fewer. This typically happens with unions and other<br>
// non-type-safe code.<br>
unsigned AS = GEP->getPointerAddressSpace();<br>
- unsigned OffsetBits = TD->getPointerSizeInBits(AS);<br>
+ unsigned OffsetBits = DL->getPointerSizeInBits(AS);<br>
APInt Offset(OffsetBits, 0);<br>
BitCastInst *BCI = dyn_cast<BitCastInst>(GEP->getOperand(0));<br>
if (GEP->hasOneUse() &&<br>
BCI &&<br>
- GEP->accumulateConstantOffset(*TD, Offset)) {<br>
+ GEP->accumulateConstantOffset(*DL, Offset)) {<br>
// Get the base pointer input of the bitcast, and the type it points to.<br>
Value *OrigBase = BCI->getOperand(0);<br>
SmallVector<Value*, 8> NewIndices;<br>
@@ -1484,16 +1484,16 @@ Instruction *InstCombiner::visitPtrToInt<br>
// do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast<br>
// to be exposed to other transforms.<br>
<br>
- if (!TD)<br>
+ if (!DL)<br>
return commonPointerCastTransforms(CI);<br>
<br>
Type *Ty = CI.getType();<br>
unsigned AS = CI.getPointerAddressSpace();<br>
<br>
- if (Ty->getScalarSizeInBits() == TD->getPointerSizeInBits(AS))<br>
+ if (Ty->getScalarSizeInBits() == DL->getPointerSizeInBits(AS))<br>
return commonPointerCastTransforms(CI);<br>
<br>
- Type *PtrTy = TD->getIntPtrType(CI.getContext(), AS);<br>
+ Type *PtrTy = DL->getIntPtrType(CI.getContext(), AS);<br>
if (Ty->isVectorTy()) // Handle vectors of pointers.<br>
PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());<br>
<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp Thu Feb 20 18:06:31 2014<br>
@@ -218,7 +218,7 @@ Instruction *InstCombiner::<br>
FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,<br>
CmpInst &ICI, ConstantInt *AndCst) {<br>
// We need TD information to know the pointer size unless this is inbounds.<br>
- if (!GEP->isInBounds() && TD == 0)<br>
+ if (!GEP->isInBounds() && DL == 0)<br>
return 0;<br>
<br>
Constant *Init = GV->getInitializer();<br>
@@ -307,7 +307,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementP<br>
<br>
// Find out if the comparison would be true or false for the i'th element.<br>
Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,<br>
- CompareRHS, TD, TLI);<br>
+ CompareRHS, DL, TLI);<br>
// If the result is undef for this element, ignore it.<br>
if (isa<UndefValue>(C)) {<br>
// Extend range state machines to cover this element in case there is an<br>
@@ -386,7 +386,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementP<br>
// index down like the GEP would do implicitly. We don't have to do this for<br>
// an inbounds GEP because the index can't be out of range.<br>
if (!GEP->isInBounds()) {<br>
- Type *IntPtrTy = TD->getIntPtrType(GEP->getType());<br>
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());<br>
unsigned PtrSize = IntPtrTy->getIntegerBitWidth();<br>
if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)<br>
Idx = Builder->CreateTrunc(Idx, IntPtrTy);<br>
@@ -475,8 +475,8 @@ FoldCmpLoadFromIndexedGlobal(GetElementP<br>
// - Default to i32<br>
if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())<br>
Ty = Idx->getType();<br>
- else if (TD)<br>
- Ty = TD->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);<br>
+ else if (DL)<br>
+ Ty = DL->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);<br>
else if (ArrayElementCount <= 32)<br>
Ty = Type::getInt32Ty(Init->getContext());<br>
<br>
@@ -503,7 +503,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementP<br>
/// If we can't emit an optimized form for this expression, this returns null.<br>
///<br>
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {<br>
- DataLayout &TD = *IC.getDataLayout();<br>
+ DataLayout &DL = *IC.getDataLayout();<br>
gep_type_iterator GTI = gep_type_begin(GEP);<br>
<br>
// Check to see if this gep only has a single variable index. If so, and if<br>
@@ -520,9 +520,9 @@ static Value *EvaluateGEPOffsetExpressio<br>
<br>
// Handle a struct index, which adds its field offset to the pointer.<br>
if (StructType *STy = dyn_cast<StructType>(*GTI)) {<br>
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());<br>
+ Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());<br>
} else {<br>
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());<br>
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());<br>
Offset += Size*CI->getSExtValue();<br>
}<br>
} else {<br>
@@ -538,7 +538,7 @@ static Value *EvaluateGEPOffsetExpressio<br>
Value *VariableIdx = GEP->getOperand(i);<br>
// Determine the scale factor of the variable element. For example, this is<br>
// 4 if the variable index is into an array of i32.<br>
- uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());<br>
+ uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());<br>
<br>
// Verify that there are no other variable indices. If so, emit the hard way.<br>
for (++i, ++GTI; i != e; ++i, ++GTI) {<br>
@@ -550,9 +550,9 @@ static Value *EvaluateGEPOffsetExpressio<br>
<br>
// Handle a struct index, which adds its field offset to the pointer.<br>
if (StructType *STy = dyn_cast<StructType>(*GTI)) {<br>
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());<br>
+ Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());<br>
} else {<br>
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());<br>
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());<br>
Offset += Size*CI->getSExtValue();<br>
}<br>
}<br>
@@ -562,7 +562,7 @@ static Value *EvaluateGEPOffsetExpressio<br>
// Okay, we know we have a single variable index, which must be a<br>
// pointer/array/vector index. If there is no offset, life is simple, return<br>
// the index.<br>
- Type *IntPtrTy = TD.getIntPtrType(GEP->getOperand(0)->getType());<br>
+ Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());<br>
unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();<br>
if (Offset == 0) {<br>
// Cast to intptrty in case a truncation occurs. If an extension is needed,<br>
@@ -615,7 +615,7 @@ Instruction *InstCombiner::FoldGEPICmp(G<br>
RHS = BCI->getOperand(0);<br>
<br>
Value *PtrBase = GEPLHS->getOperand(0);<br>
- if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {<br>
+ if (DL && PtrBase == RHS && GEPLHS->isInBounds()) {<br>
// ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).<br>
// This transformation (ignoring the base and scales) is valid because we<br>
// know pointers can't overflow since the gep is inbounds. See if we can<br>
@@ -648,7 +648,7 @@ Instruction *InstCombiner::FoldGEPICmp(G<br>
// If we're comparing GEPs with two base pointers that only differ in type<br>
// and both GEPs have only constant indices or just one use, then fold<br>
// the compare with the adjusted indices.<br>
- if (TD && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&<br>
+ if (DL && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&<br>
(GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&<br>
(GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&<br>
PtrBase->stripPointerCasts() ==<br>
@@ -719,7 +719,7 @@ Instruction *InstCombiner::FoldGEPICmp(G<br>
<br>
// Only lower this if the icmp is the only user of the GEP or if we expect<br>
// the result to fold to a constant!<br>
- if (TD &&<br>
+ if (DL &&<br>
GEPsInBounds &&<br>
(isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&<br>
(isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {<br>
@@ -1792,8 +1792,8 @@ Instruction *InstCombiner::visitICmpInst<br>
<br>
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the<br>
// integer type is the same size as the pointer type.<br>
- if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&<br>
- TD->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {<br>
+ if (DL && LHSCI->getOpcode() == Instruction::PtrToInt &&<br>
+ DL->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {<br>
Value *RHSOp = 0;<br>
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {<br>
RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);<br>
@@ -2104,7 +2104,7 @@ Instruction *InstCombiner::visitICmpInst<br>
Changed = true;<br>
}<br>
<br>
- if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))<br>
+ if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// comparing -val or val with non-zero is the same as just comparing val<br>
@@ -2172,8 +2172,8 @@ Instruction *InstCombiner::visitICmpInst<br>
unsigned BitWidth = 0;<br>
if (Ty->isIntOrIntVectorTy())<br>
BitWidth = Ty->getScalarSizeInBits();<br>
- else if (TD) // Pointers require TD info to get their size.<br>
- BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());<br>
+ else if (DL) // Pointers require DL info to get their size.<br>
+ BitWidth = DL->getTypeSizeInBits(Ty->getScalarType());<br>
<br>
bool isSignBit = false;<br>
<br>
@@ -2532,8 +2532,8 @@ Instruction *InstCombiner::visitICmpInst<br>
}<br>
case Instruction::IntToPtr:<br>
// icmp pred inttoptr(X), null -> icmp pred X, 0<br>
- if (RHSC->isNullValue() && TD &&<br>
- TD->getIntPtrType(RHSC->getType()) ==<br>
+ if (RHSC->isNullValue() && DL &&<br>
+ DL->getIntPtrType(RHSC->getType()) ==<br>
LHSI->getOperand(0)->getType())<br>
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),<br>
Constant::getNullValue(LHSI->getOperand(0)->getType()));<br>
@@ -3229,7 +3229,7 @@ Instruction *InstCombiner::visitFCmpInst<br>
<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))<br>
+ if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// Simplify 'fcmp pred X, X'<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp Thu Feb 20 18:06:31 2014<br>
@@ -157,8 +157,8 @@ isOnlyCopiedFromConstantGlobal(AllocaIns<br>
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {<br>
// Ensure that the alloca array size argument has type intptr_t, so that<br>
// any casting is exposed early.<br>
- if (TD) {<br>
- Type *IntPtrTy = TD->getIntPtrType(AI.getType());<br>
+ if (DL) {<br>
+ Type *IntPtrTy = DL->getIntPtrType(AI.getType());<br>
if (AI.getArraySize()->getType() != IntPtrTy) {<br>
Value *V = Builder->CreateIntCast(AI.getArraySize(),<br>
IntPtrTy, false);<br>
@@ -184,8 +184,8 @@ Instruction *InstCombiner::visitAllocaIn<br>
// Now that I is pointing to the first non-allocation-inst in the block,<br>
// insert our getelementptr instruction...<br>
//<br>
- Type *IdxTy = TD<br>
- ? TD->getIntPtrType(AI.getType())<br>
+ Type *IdxTy = DL<br>
+ ? DL->getIntPtrType(AI.getType())<br>
: Type::getInt64Ty(AI.getContext());<br>
Value *NullIdx = Constant::getNullValue(IdxTy);<br>
Value *Idx[2] = { NullIdx, NullIdx };<br>
@@ -201,15 +201,15 @@ Instruction *InstCombiner::visitAllocaIn<br>
}<br>
}<br>
<br>
- if (TD && AI.getAllocatedType()->isSized()) {<br>
+ if (DL && AI.getAllocatedType()->isSized()) {<br>
// If the alignment is 0 (unspecified), assign it the preferred alignment.<br>
if (AI.getAlignment() == 0)<br>
- AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));<br>
+ AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));<br>
<br>
// Move all alloca's of zero byte objects to the entry block and merge them<br>
// together. Note that we only do this for alloca's, because malloc should<br>
// allocate and return a unique pointer, even for a zero byte allocation.<br>
- if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {<br>
+ if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {<br>
// For a zero sized alloca there is no point in doing an array allocation.<br>
// This is helpful if the array size is a complicated expression not used<br>
// elsewhere.<br>
@@ -227,7 +227,7 @@ Instruction *InstCombiner::visitAllocaIn<br>
// dominance as the array size was forced to a constant earlier already.<br>
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);<br>
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||<br>
- TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {<br>
+ DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {<br>
AI.moveBefore(FirstInst);<br>
return &AI;<br>
}<br>
@@ -236,7 +236,7 @@ Instruction *InstCombiner::visitAllocaIn<br>
// assign it the preferred alignment.<br>
if (EntryAI->getAlignment() == 0)<br>
EntryAI->setAlignment(<br>
- TD->getPrefTypeAlignment(EntryAI->getAllocatedType()));<br>
+ DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));<br>
// Replace this zero-sized alloca with the one at the start of the entry<br>
// block after ensuring that the address will be aligned enough for both<br>
// types.<br>
@@ -260,7 +260,7 @@ Instruction *InstCombiner::visitAllocaIn<br>
SmallVector<Instruction *, 4> ToDelete;<br>
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {<br>
unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),<br>
- AI.getAlignment(), TD);<br>
+ AI.getAlignment(), DL);<br>
if (AI.getAlignment() <= SourceAlign) {<br>
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');<br>
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');<br>
@@ -285,7 +285,7 @@ Instruction *InstCombiner::visitAllocaIn<br>
<br>
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.<br>
static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,<br>
- const DataLayout *TD) {<br>
+ const DataLayout *DL) {<br>
User *CI = cast<User>(LI.getOperand(0));<br>
Value *CastOp = CI->getOperand(0);<br>
<br>
@@ -307,8 +307,8 @@ static Instruction *InstCombineLoadCast(<br>
if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))<br>
if (Constant *CSrc = dyn_cast<Constant>(CastOp))<br>
if (ASrcTy->getNumElements() != 0) {<br>
- Type *IdxTy = TD<br>
- ? TD->getIntPtrType(SrcTy)<br>
+ Type *IdxTy = DL<br>
+ ? DL->getIntPtrType(SrcTy)<br>
: Type::getInt64Ty(SrcTy->getContext());<br>
Value *Idx = Constant::getNullValue(IdxTy);<br>
Value *Idxs[2] = { Idx, Idx };<br>
@@ -346,12 +346,12 @@ Instruction *InstCombiner::visitLoadInst<br>
Value *Op = LI.getOperand(0);<br>
<br>
// Attempt to improve the alignment.<br>
- if (TD) {<br>
+ if (DL) {<br>
unsigned KnownAlign =<br>
- getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);<br>
+ getOrEnforceKnownAlignment(Op, DL->getPrefTypeAlignment(LI.getType()),DL);<br>
unsigned LoadAlign = LI.getAlignment();<br>
unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :<br>
- TD->getABITypeAlignment(LI.getType());<br>
+ DL->getABITypeAlignment(LI.getType());<br>
<br>
if (KnownAlign > EffectiveLoadAlign)<br>
LI.setAlignment(KnownAlign);<br>
@@ -361,7 +361,7 @@ Instruction *InstCombiner::visitLoadInst<br>
<br>
// load (cast X) --> cast (load X) iff safe.<br>
if (isa<CastInst>(Op))<br>
- if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))<br>
+ if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))<br>
return Res;<br>
<br>
// None of the following transforms are legal for volatile/atomic loads.<br>
@@ -405,7 +405,7 @@ Instruction *InstCombiner::visitLoadInst<br>
// Instcombine load (constantexpr_cast global) -> cast (load global)<br>
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))<br>
if (CE->isCast())<br>
- if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))<br>
+ if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))<br>
return Res;<br>
<br>
if (Op->hasOneUse()) {<br>
@@ -422,8 +422,8 @@ Instruction *InstCombiner::visitLoadInst<br>
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {<br>
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).<br>
unsigned Align = LI.getAlignment();<br>
- if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&<br>
- isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {<br>
+ if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&<br>
+ isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {<br>
LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),<br>
SI->getOperand(1)->getName()+".val");<br>
LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),<br>
@@ -572,13 +572,13 @@ Instruction *InstCombiner::visitStoreIns<br>
Value *Ptr = SI.getOperand(1);<br>
<br>
// Attempt to improve the alignment.<br>
- if (TD) {<br>
+ if (DL) {<br>
unsigned KnownAlign =<br>
- getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),<br>
- TD);<br>
+ getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),<br>
+ DL);<br>
unsigned StoreAlign = SI.getAlignment();<br>
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :<br>
- TD->getABITypeAlignment(Val->getType());<br>
+ DL->getABITypeAlignment(Val->getType());<br>
<br>
if (KnownAlign > EffectiveStoreAlign)<br>
SI.setAlignment(KnownAlign);<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp Thu Feb 20 18:06:31 2014<br>
@@ -118,7 +118,7 @@ Instruction *InstCombiner::visitMul(Bina<br>
bool Changed = SimplifyAssociativeOrCommutative(I);<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyMulInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifyMulInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
if (Value *V = SimplifyUsingDistributiveLaws(I))<br>
@@ -429,7 +429,7 @@ Instruction *InstCombiner::visitFMul(Bin<br>
if (isa<Constant>(Op0))<br>
std::swap(Op0, Op1);<br>
<br>
- if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), TD))<br>
+ if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
bool AllowReassociate = I.hasUnsafeAlgebra();<br>
@@ -875,7 +875,7 @@ static size_t visitUDivOperand(Value *Op<br>
Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyUDivInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifyUDivInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// Handle the integer div common cases<br>
@@ -934,7 +934,7 @@ Instruction *InstCombiner::visitUDiv(Bin<br>
Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifySDivInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifySDivInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// Handle the integer div common cases<br>
@@ -1020,7 +1020,7 @@ static Instruction *CvtFDivConstToRecipr<br>
Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyFDivInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifyFDivInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
if (isa<Constant>(Op0))<br>
@@ -1182,7 +1182,7 @@ Instruction *InstCombiner::commonIRemTra<br>
Instruction *InstCombiner::visitURem(BinaryOperator &I) {<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyURemInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifyURemInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
if (Instruction *common = commonIRemTransforms(I))<br>
@@ -1214,7 +1214,7 @@ Instruction *InstCombiner::visitURem(Bin<br>
Instruction *InstCombiner::visitSRem(BinaryOperator &I) {<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifySRemInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifySRemInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// Handle the integer rem common cases<br>
@@ -1285,7 +1285,7 @@ Instruction *InstCombiner::visitSRem(Bin<br>
Instruction *InstCombiner::visitFRem(BinaryOperator &I) {<br>
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);<br>
<br>
- if (Value *V = SimplifyFRemInst(Op0, Op1, TD))<br>
+ if (Value *V = SimplifyFRemInst(Op0, Op1, DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
// Handle cases involving: rem X, (select Cond, Y, Z)<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombinePHI.cpp Thu Feb 20 18:06:31 2014<br>
@@ -790,7 +790,7 @@ Instruction *InstCombiner::SliceUpIllega<br>
// PHINode simplification<br>
//<br>
Instruction *InstCombiner::visitPHINode(PHINode &PN) {<br>
- if (Value *V = SimplifyInstruction(&PN, TD, TLI))<br>
+ if (Value *V = SimplifyInstruction(&PN, DL, TLI))<br>
return ReplaceInstUsesWith(PN, V);<br>
<br>
// If all PHI operands are the same operation, pull them through the PHI,<br>
@@ -893,8 +893,8 @@ Instruction *InstCombiner::visitPHINode(<br>
// it is only used by trunc or trunc(lshr) operations. If so, we split the<br>
// PHI into the various pieces being extracted. This sort of thing is<br>
// introduced when SROA promotes an aggregate to a single large integer type.<br>
- if (PN.getType()->isIntegerTy() && TD &&<br>
- !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))<br>
+ if (PN.getType()->isIntegerTy() && DL &&<br>
+ !DL->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))<br>
if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))<br>
return Res;<br>
<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp Thu Feb 20 18:06:31 2014<br>
@@ -554,18 +554,18 @@ Instruction *InstCombiner::visitSelectIn<br>
// arms of the select. See if substituting this value into the arm and<br>
// simplifying the result yields the same value as the other arm.<br>
if (Pred == ICmpInst::ICMP_EQ) {<br>
- if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) == TrueVal ||<br>
- SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) == TrueVal)<br>
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI) == TrueVal ||<br>
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI) == TrueVal)<br>
return ReplaceInstUsesWith(SI, FalseVal);<br>
- if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) == FalseVal ||<br>
- SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) == FalseVal)<br>
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI) == FalseVal ||<br>
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI) == FalseVal)<br>
return ReplaceInstUsesWith(SI, FalseVal);<br>
} else if (Pred == ICmpInst::ICMP_NE) {<br>
- if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) == FalseVal ||<br>
- SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) == FalseVal)<br>
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI) == FalseVal ||<br>
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI) == FalseVal)<br>
return ReplaceInstUsesWith(SI, TrueVal);<br>
- if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) == TrueVal ||<br>
- SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) == TrueVal)<br>
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI) == TrueVal ||<br>
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI) == TrueVal)<br>
return ReplaceInstUsesWith(SI, TrueVal);<br>
}<br>
<br>
@@ -734,7 +734,7 @@ Instruction *InstCombiner::visitSelectIn<br>
Value *TrueVal = SI.getTrueValue();<br>
Value *FalseVal = SI.getFalseValue();<br>
<br>
- if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal, TD))<br>
+ if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal, DL))<br>
return ReplaceInstUsesWith(SI, V);<br>
<br>
if (SI.getType()->isIntegerTy(1)) {<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp Thu Feb 20 18:06:31 2014<br>
@@ -677,7 +677,7 @@ Instruction *InstCombiner::FoldShiftByCo<br>
Instruction *InstCombiner::visitShl(BinaryOperator &I) {<br>
if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),<br>
I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),<br>
- TD))<br>
+ DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
if (Instruction *V = commonShiftTransforms(I))<br>
@@ -714,7 +714,7 @@ Instruction *InstCombiner::visitShl(Bina<br>
<br>
Instruction *InstCombiner::visitLShr(BinaryOperator &I) {<br>
if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1),<br>
- I.isExact(), TD))<br>
+ I.isExact(), DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
if (Instruction *R = commonShiftTransforms(I))<br>
@@ -754,7 +754,7 @@ Instruction *InstCombiner::visitLShr(Bin<br>
<br>
Instruction *InstCombiner::visitAShr(BinaryOperator &I) {<br>
if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1),<br>
- I.isExact(), TD))<br>
+ I.isExact(), DL))<br>
return ReplaceInstUsesWith(I, V);<br>
<br>
if (Instruction *R = commonShiftTransforms(I))<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp Thu Feb 20 18:06:31 2014<br>
@@ -105,9 +105,9 @@ Value *InstCombiner::SimplifyDemandedUse<br>
assert(Depth <= 6 && "Limit Search Depth");<br>
uint32_t BitWidth = DemandedMask.getBitWidth();<br>
Type *VTy = V->getType();<br>
- assert((TD || !VTy->isPointerTy()) &&<br>
+ assert((DL || !VTy->isPointerTy()) &&<br>
"SimplifyDemandedBits needs to know bit widths!");<br>
- assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&<br>
+ assert((!DL || DL->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&<br>
(!VTy->isIntOrIntVectorTy() ||<br>
VTy->getScalarSizeInBits() == BitWidth) &&<br>
KnownZero.getBitWidth() == BitWidth &&<br>
<br>
Modified: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp Thu Feb 20 18:06:31 2014<br>
@@ -103,13 +103,13 @@ Value *InstCombiner::EmitGEPOffset(User<br>
bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {<br>
assert(From->isIntegerTy() && To->isIntegerTy());<br>
<br>
- // If we don't have TD, we don't know if the source/dest are legal.<br>
- if (!TD) return false;<br>
+ // If we don't have DL, we don't know if the source/dest are legal.<br>
+ if (!DL) return false;<br>
<br>
unsigned FromWidth = From->getPrimitiveSizeInBits();<br>
unsigned ToWidth = To->getPrimitiveSizeInBits();<br>
- bool FromLegal = TD->isLegalInteger(FromWidth);<br>
- bool ToLegal = TD->isLegalInteger(ToWidth);<br>
+ bool FromLegal = DL->isLegalInteger(FromWidth);<br>
+ bool ToLegal = DL->isLegalInteger(ToWidth);<br>
<br>
// If this is a legal integer from type, and the result would be an illegal<br>
// type, don't do the transformation.<br>
@@ -221,7 +221,7 @@ bool InstCombiner::SimplifyAssociativeOr<br>
Value *C = I.getOperand(1);<br>
<br>
// Does "B op C" simplify?<br>
- if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {<br>
+ if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) {<br>
// It simplifies to V. Form "A op V".<br>
I.setOperand(0, A);<br>
I.setOperand(1, V);<br>
@@ -250,7 +250,7 @@ bool InstCombiner::SimplifyAssociativeOr<br>
Value *C = Op1->getOperand(1);<br>
<br>
// Does "A op B" simplify?<br>
- if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {<br>
+ if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) {<br>
// It simplifies to V. Form "V op C".<br>
I.setOperand(0, V);<br>
I.setOperand(1, C);<br>
@@ -272,7 +272,7 @@ bool InstCombiner::SimplifyAssociativeOr<br>
Value *C = I.getOperand(1);<br>
<br>
// Does "C op A" simplify?<br>
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {<br>
+ if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {<br>
// It simplifies to V. Form "V op B".<br>
I.setOperand(0, V);<br>
I.setOperand(1, B);<br>
@@ -292,7 +292,7 @@ bool InstCombiner::SimplifyAssociativeOr<br>
Value *C = Op1->getOperand(1);<br>
<br>
// Does "C op A" simplify?<br>
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {<br>
+ if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {<br>
// It simplifies to V. Form "B op V".<br>
I.setOperand(0, B);<br>
I.setOperand(1, V);<br>
@@ -425,7 +425,7 @@ Value *InstCombiner::SimplifyUsingDistri<br>
std::swap(C, D);<br>
// Consider forming "A op' (B op D)".<br>
// If "B op D" simplifies then it can be formed with no cost.<br>
- Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);<br>
+ Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL);<br>
// If "B op D" doesn't simplify then only go on if both of the existing<br>
// operations "A op' B" and "C op' D" will be zapped as no longer used.<br>
if (!V && Op0->hasOneUse() && Op1->hasOneUse())<br>
@@ -447,7 +447,7 @@ Value *InstCombiner::SimplifyUsingDistri<br>
std::swap(C, D);<br>
// Consider forming "(A op C) op' B".<br>
// If "A op C" simplifies then it can be formed with no cost.<br>
- Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);<br>
+ Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL);<br>
// If "A op C" doesn't simplify then only go on if both of the existing<br>
// operations "A op' B" and "C op' D" will be zapped as no longer used.<br>
if (!V && Op0->hasOneUse() && Op1->hasOneUse())<br>
@@ -469,8 +469,8 @@ Value *InstCombiner::SimplifyUsingDistri<br>
Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'<br>
<br>
// Do "A op C" and "B op C" both simplify?<br>
- if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))<br>
- if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {<br>
+ if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL))<br>
+ if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {<br>
// They do! Return "L op' R".<br>
++NumExpand;<br>
// If "L op' R" equals "A op' B" then "L op' R" is just the LHS.<br>
@@ -478,7 +478,7 @@ Value *InstCombiner::SimplifyUsingDistri<br>
(Instruction::isCommutative(InnerOpcode) && L == B && R == A))<br>
return Op0;<br>
// Otherwise return "L op' R" if it simplifies.<br>
- if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))<br>
+ if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))<br>
return V;<br>
// Otherwise, create a new instruction.<br>
C = Builder->CreateBinOp(InnerOpcode, L, R);<br>
@@ -494,8 +494,8 @@ Value *InstCombiner::SimplifyUsingDistri<br>
Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'<br>
<br>
// Do "A op B" and "A op C" both simplify?<br>
- if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))<br>
- if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {<br>
+ if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL))<br>
+ if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {<br>
// They do! Return "L op' R".<br>
++NumExpand;<br>
// If "L op' R" equals "B op' C" then "L op' R" is just the RHS.<br>
@@ -503,7 +503,7 @@ Value *InstCombiner::SimplifyUsingDistri<br>
(Instruction::isCommutative(InnerOpcode) && L == C && R == B))<br>
return Op1;<br>
// Otherwise return "L op' R" if it simplifies.<br>
- if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))<br>
+ if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))<br>
return V;<br>
// Otherwise, create a new instruction.<br>
A = Builder->CreateBinOp(InnerOpcode, L, R);<br>
@@ -777,7 +777,7 @@ Type *InstCombiner::FindElementAtOffset(<br>
SmallVectorImpl<Value*> &NewIndices) {<br>
assert(PtrTy->isPtrOrPtrVectorTy());<br>
<br>
- if (!TD)<br>
+ if (!DL)<br>
return 0;<br>
<br>
Type *Ty = PtrTy->getPointerElementType();<br>
@@ -787,9 +787,9 @@ Type *InstCombiner::FindElementAtOffset(<br>
// Start with the index over the outer type. Note that the type size<br>
// might be zero (even if the offset isn't zero) if the indexed type<br>
// is something like [0 x {int, int}]<br>
- Type *IntPtrTy = TD->getIntPtrType(PtrTy);<br>
+ Type *IntPtrTy = DL->getIntPtrType(PtrTy);<br>
int64_t FirstIdx = 0;<br>
- if (int64_t TySize = TD->getTypeAllocSize(Ty)) {<br>
+ if (int64_t TySize = DL->getTypeAllocSize(Ty)) {<br>
FirstIdx = Offset/TySize;<br>
Offset -= FirstIdx*TySize;<br>
<br>
@@ -807,11 +807,11 @@ Type *InstCombiner::FindElementAtOffset(<br>
// Index into the types. If we fail, set OrigBase to null.<br>
while (Offset) {<br>
// Indexing into tail padding between struct/array elements.<br>
- if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))<br>
+ if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))<br>
return 0;<br>
<br>
if (StructType *STy = dyn_cast<StructType>(Ty)) {<br>
- const StructLayout *SL = TD->getStructLayout(STy);<br>
+ const StructLayout *SL = DL->getStructLayout(STy);<br>
assert(Offset < (int64_t)SL->getSizeInBytes() &&<br>
"Offset must stay within the indexed type");<br>
<br>
@@ -822,7 +822,7 @@ Type *InstCombiner::FindElementAtOffset(<br>
Offset -= SL->getElementOffset(Elt);<br>
Ty = STy->getElementType(Elt);<br>
} else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {<br>
- uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());<br>
+ uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType());<br>
assert(EltSize && "Cannot index into a zero-sized array");<br>
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));<br>
Offset %= EltSize;<br>
@@ -1087,16 +1087,16 @@ Value *InstCombiner::Descale(Value *Val,<br>
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {<br>
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());<br>
<br>
- if (Value *V = SimplifyGEPInst(Ops, TD))<br>
+ if (Value *V = SimplifyGEPInst(Ops, DL))<br>
return ReplaceInstUsesWith(GEP, V);<br>
<br>
Value *PtrOp = GEP.getOperand(0);<br>
<br>
// Eliminate unneeded casts for indices, and replace indices which displace<br>
// by multiples of a zero size type with zero.<br>
- if (TD) {<br>
+ if (DL) {<br>
bool MadeChange = false;<br>
- Type *IntPtrTy = TD->getIntPtrType(GEP.getPointerOperandType());<br>
+ Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType());<br>
<br>
gep_type_iterator GTI = gep_type_begin(GEP);<br>
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();<br>
@@ -1108,7 +1108,7 @@ Instruction *InstCombiner::visitGetEleme<br>
// If the element type has zero size then any index over it is equivalent<br>
// to an index of zero, so replace it with zero if it is not zero already.<br>
if (SeqTy->getElementType()->isSized() &&<br>
- TD->getTypeAllocSize(SeqTy->getElementType()) == 0)<br>
+ DL->getTypeAllocSize(SeqTy->getElementType()) == 0)<br>
if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {<br>
*I = Constant::getNullValue(IntPtrTy);<br>
MadeChange = true;<br>
@@ -1199,12 +1199,12 @@ Instruction *InstCombiner::visitGetEleme<br>
// Canonicalize (gep i8* X, -(ptrtoint Y)) to (sub (ptrtoint X), (ptrtoint Y))<br>
// The GEP pattern is emitted by the SCEV expander for certain kinds of<br>
// pointer arithmetic.<br>
- if (TD && GEP.getNumIndices() == 1 &&<br>
+ if (DL && GEP.getNumIndices() == 1 &&<br>
match(GEP.getOperand(1), m_Neg(m_PtrToInt(m_Value())))) {<br>
unsigned AS = GEP.getPointerAddressSpace();<br>
if (GEP.getType() == Builder->getInt8PtrTy(AS) &&<br>
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==<br>
- TD->getPointerSizeInBits(AS)) {<br>
+ DL->getPointerSizeInBits(AS)) {<br>
Operator *Index = cast<Operator>(GEP.getOperand(1));<br>
Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());<br>
Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));<br>
@@ -1266,10 +1266,10 @@ Instruction *InstCombiner::visitGetEleme<br>
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast<br>
Type *SrcElTy = StrippedPtrTy->getElementType();<br>
Type *ResElTy = PtrOp->getType()->getPointerElementType();<br>
- if (TD && SrcElTy->isArrayTy() &&<br>
- TD->getTypeAllocSize(SrcElTy->getArrayElementType()) ==<br>
- TD->getTypeAllocSize(ResElTy)) {<br>
- Type *IdxType = TD->getIntPtrType(GEP.getType());<br>
+ if (DL && SrcElTy->isArrayTy() &&<br>
+ DL->getTypeAllocSize(SrcElTy->getArrayElementType()) ==<br>
+ DL->getTypeAllocSize(ResElTy)) {<br>
+ Type *IdxType = DL->getIntPtrType(GEP.getType());<br>
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };<br>
Value *NewGEP = GEP.isInBounds() ?<br>
Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :<br>
@@ -1285,11 +1285,11 @@ Instruction *InstCombiner::visitGetEleme<br>
// %V = mul i64 %N, 4<br>
// %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V<br>
// into: %t1 = getelementptr i32* %arr, i32 %N; bitcast<br>
- if (TD && ResElTy->isSized() && SrcElTy->isSized()) {<br>
+ if (DL && ResElTy->isSized() && SrcElTy->isSized()) {<br>
// Check that changing the type amounts to dividing the index by a scale<br>
// factor.<br>
- uint64_t ResSize = TD->getTypeAllocSize(ResElTy);<br>
- uint64_t SrcSize = TD->getTypeAllocSize(SrcElTy);<br>
+ uint64_t ResSize = DL->getTypeAllocSize(ResElTy);<br>
+ uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy);<br>
if (ResSize && SrcSize % ResSize == 0) {<br>
Value *Idx = GEP.getOperand(1);<br>
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();<br>
@@ -1297,7 +1297,7 @@ Instruction *InstCombiner::visitGetEleme<br>
<br>
// Earlier transforms ensure that the index has type IntPtrType, which<br>
// considerably simplifies the logic by eliminating implicit casts.<br>
- assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&<br>
+ assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&<br>
"Index not cast to pointer width?");<br>
<br>
bool NSW;<br>
@@ -1321,13 +1321,13 @@ Instruction *InstCombiner::visitGetEleme<br>
// getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp<br>
// (where tmp = 8*tmp2) into:<br>
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast<br>
- if (TD && ResElTy->isSized() && SrcElTy->isSized() &&<br>
+ if (DL && ResElTy->isSized() && SrcElTy->isSized() &&<br>
SrcElTy->isArrayTy()) {<br>
// Check that changing to the array element type amounts to dividing the<br>
// index by a scale factor.<br>
- uint64_t ResSize = TD->getTypeAllocSize(ResElTy);<br>
+ uint64_t ResSize = DL->getTypeAllocSize(ResElTy);<br>
uint64_t ArrayEltSize<br>
- = TD->getTypeAllocSize(SrcElTy->getArrayElementType());<br>
+ = DL->getTypeAllocSize(SrcElTy->getArrayElementType());<br>
if (ResSize && ArrayEltSize % ResSize == 0) {<br>
Value *Idx = GEP.getOperand(1);<br>
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();<br>
@@ -1335,7 +1335,7 @@ Instruction *InstCombiner::visitGetEleme<br>
<br>
// Earlier transforms ensure that the index has type IntPtrType, which<br>
// considerably simplifies the logic by eliminating implicit casts.<br>
- assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&<br>
+ assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&<br>
"Index not cast to pointer width?");<br>
<br>
bool NSW;<br>
@@ -1344,7 +1344,7 @@ Instruction *InstCombiner::visitGetEleme<br>
// If the multiplication NewIdx * Scale may overflow then the new<br>
// GEP may not be "inbounds".<br>
Value *Off[2] = {<br>
- Constant::getNullValue(TD->getIntPtrType(GEP.getType())),<br>
+ Constant::getNullValue(DL->getIntPtrType(GEP.getType())),<br>
NewIdx<br>
};<br>
<br>
@@ -1361,7 +1361,7 @@ Instruction *InstCombiner::visitGetEleme<br>
}<br>
}<br>
<br>
- if (!TD)<br>
+ if (!DL)<br>
return 0;<br>
<br>
/// See if we can simplify:<br>
@@ -1372,10 +1372,10 @@ Instruction *InstCombiner::visitGetEleme<br>
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {<br>
Value *Operand = BCI->getOperand(0);<br>
PointerType *OpType = cast<PointerType>(Operand->getType());<br>
- unsigned OffsetBits = TD->getPointerTypeSizeInBits(OpType);<br>
+ unsigned OffsetBits = DL->getPointerTypeSizeInBits(OpType);<br>
APInt Offset(OffsetBits, 0);<br>
if (!isa<BitCastInst>(Operand) &&<br>
- GEP.accumulateConstantOffset(*TD, Offset) &&<br>
+ GEP.accumulateConstantOffset(*DL, Offset) &&<br>
StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {<br>
<br>
// If this GEP instruction doesn't move the pointer, just replace the GEP<br>
@@ -2231,7 +2231,7 @@ static bool TryToSinkInstruction(Instruc<br>
static bool AddReachableCodeToWorklist(BasicBlock *BB,<br>
SmallPtrSet<BasicBlock*, 64> &Visited,<br>
InstCombiner &IC,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const TargetLibraryInfo *TLI) {<br>
bool MadeIRChange = false;<br>
SmallVector<BasicBlock*, 256> Worklist;<br>
@@ -2259,7 +2259,7 @@ static bool AddReachableCodeToWorklist(B<br>
<br>
// ConstantProp instruction if trivially constant.<br>
if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))<br>
- if (Constant *C = ConstantFoldInstruction(Inst, TD, TLI)) {<br>
+ if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {<br>
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "<br>
<< *Inst << '\n');<br>
Inst->replaceAllUsesWith(C);<br>
@@ -2268,7 +2268,7 @@ static bool AddReachableCodeToWorklist(B<br>
continue;<br>
}<br>
<br>
- if (TD) {<br>
+ if (DL) {<br>
// See if we can constant fold its operands.<br>
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();<br>
i != e; ++i) {<br>
@@ -2277,7 +2277,7 @@ static bool AddReachableCodeToWorklist(B<br>
<br>
Constant*& FoldRes = FoldedConstants[CE];<br>
if (!FoldRes)<br>
- FoldRes = ConstantFoldConstantExpression(CE, TD, TLI);<br>
+ FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);<br>
if (!FoldRes)<br>
FoldRes = CE;<br>
<br>
@@ -2344,7 +2344,7 @@ bool InstCombiner::DoOneIteration(Functi<br>
// the reachable instructions. Ignore blocks that are not reachable. Keep<br>
// track of which blocks we visit.<br>
SmallPtrSet<BasicBlock*, 64> Visited;<br>
- MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD,<br>
+ MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, DL,<br>
TLI);<br>
<br>
// Do a quick scan over the function. If we find any blocks that are<br>
@@ -2390,7 +2390,7 @@ bool InstCombiner::DoOneIteration(Functi<br>
<br>
// Instruction isn't dead, see if we can constant propagate it.<br>
if (!I->use_empty() && isa<Constant>(I->getOperand(0)))<br>
- if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {<br>
+ if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {<br>
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');<br>
<br>
// Add operands to the worklist.<br>
@@ -2499,10 +2499,10 @@ namespace {<br>
class InstCombinerLibCallSimplifier : public LibCallSimplifier {<br>
InstCombiner *IC;<br>
public:<br>
- InstCombinerLibCallSimplifier(const DataLayout *TD,<br>
+ InstCombinerLibCallSimplifier(const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
InstCombiner *IC)<br>
- : LibCallSimplifier(TD, TLI, UnsafeFPShrink) {<br>
+ : LibCallSimplifier(DL, TLI, UnsafeFPShrink) {<br>
this->IC = IC;<br>
}<br>
<br>
@@ -2518,7 +2518,7 @@ bool InstCombiner::runOnFunction(Functio<br>
if (skipOptnoneFunction(F))<br>
return false;<br>
<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
// Minimizing size?<br>
MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,<br>
@@ -2527,11 +2527,11 @@ bool InstCombiner::runOnFunction(Functio<br>
/// Builder - This is an IRBuilder that automatically inserts new<br>
/// instructions into the worklist when they are created.<br>
IRBuilder<true, TargetFolder, InstCombineIRInserter><br>
- TheBuilder(F.getContext(), TargetFolder(TD),<br>
+ TheBuilder(F.getContext(), TargetFolder(DL),<br>
InstCombineIRInserter(Worklist));<br>
Builder = &TheBuilder;<br>
<br>
- InstCombinerLibCallSimplifier TheSimplifier(TD, TLI, this);<br>
+ InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this);<br>
Simplifier = &TheSimplifier;<br>
<br>
bool EverMadeChange = false;<br>
<br>
Modified: llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Instrumentation/AddressSanitizer.cpp Thu Feb 20 18:06:31 2014<br>
@@ -336,7 +336,7 @@ struct AddressSanitizer : public Functio<br>
SmallString<64> BlacklistFile;<br>
<br>
LLVMContext *C;<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
int LongSize;<br>
Type *IntptrTy;<br>
ShadowMapping Mapping;<br>
@@ -385,7 +385,7 @@ class AddressSanitizerModule : public Mo<br>
SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;<br>
Type *IntptrTy;<br>
LLVMContext *C;<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
ShadowMapping Mapping;<br>
Function *AsanPoisonGlobals;<br>
Function *AsanUnpoisonGlobals;<br>
@@ -516,7 +516,7 @@ struct FunctionStackPoisoner : public In<br>
<br>
uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {<br>
Type *Ty = AI->getAllocatedType();<br>
- uint64_t SizeInBytes = ASan.TD->getTypeAllocSize(Ty);<br>
+ uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty);<br>
return SizeInBytes;<br>
}<br>
/// Finds alloca where the value comes from.<br>
@@ -691,7 +691,7 @@ void AddressSanitizer::instrumentMop(Ins<br>
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();<br>
<br>
assert(OrigTy->isSized());<br>
- uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);<br>
+ uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);<br>
<br>
assert((TypeSize % 8) == 0);<br>
<br>
@@ -912,13 +912,13 @@ void AddressSanitizerModule::initializeC<br>
// redzones and inserts this function into llvm.global_ctors.<br>
bool AddressSanitizerModule::runOnModule(Module &M) {<br>
if (!ClGlobals) return false;<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
- if (!TD)<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
+ if (!DL)<br>
return false;<br>
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));<br>
if (BL->isIn(M)) return false;<br>
C = &(M.getContext());<br>
- int LongSize = TD->getPointerSizeInBits();<br>
+ int LongSize = DL->getPointerSizeInBits();<br>
IntptrTy = Type::getIntNTy(*C, LongSize);<br>
Mapping = getShadowMapping(M, LongSize);<br>
initializeCallbacks(M);<br>
@@ -964,7 +964,7 @@ bool AddressSanitizerModule::runOnModule<br>
GlobalVariable *G = GlobalsToChange[i];<br>
PointerType *PtrTy = cast<PointerType>(G->getType());<br>
Type *Ty = PtrTy->getElementType();<br>
- uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);<br>
+ uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);<br>
uint64_t MinRZ = MinRedzoneSizeForGlobal();<br>
// MinRZ <= RZ <= kMaxGlobalRedzone<br>
// and trying to make RZ to be ~ 1/4 of SizeInBytes.<br>
@@ -1105,15 +1105,15 @@ void AddressSanitizer::emitShadowMapping<br>
// virtual<br>
bool AddressSanitizer::doInitialization(Module &M) {<br>
// Initialize the private fields. No one has accessed them before.<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
<br>
- if (!TD)<br>
+ if (!DL)<br>
return false;<br>
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));<br>
DynamicallyInitializedGlobals.Init(M);<br>
<br>
C = &(M.getContext());<br>
- LongSize = TD->getPointerSizeInBits();<br>
+ LongSize = DL->getPointerSizeInBits();<br>
IntptrTy = Type::getIntNTy(*C, LongSize);<br>
<br>
AsanCtorFunction = Function::Create(<br>
@@ -1378,7 +1378,7 @@ FunctionStackPoisoner::poisonRedZones(co<br>
for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {<br>
uint64_t Val = 0;<br>
for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {<br>
- if (ASan.TD->isLittleEndian())<br>
+ if (ASan.DL->isLittleEndian())<br>
Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);<br>
else<br>
Val = (Val << 8) | ShadowBytes[i + j];<br>
<br>
Modified: llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Instrumentation/BoundsChecking.cpp Thu Feb 20 18:06:31 2014<br>
@@ -53,7 +53,7 @@ namespace {<br>
}<br>
<br>
private:<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
const TargetLibraryInfo *TLI;<br>
ObjectSizeOffsetEvaluator *ObjSizeEval;<br>
BuilderTy *Builder;<br>
@@ -127,7 +127,7 @@ void BoundsChecking::emitBranchToTrap(Va<br>
/// size of memory block that is touched.<br>
/// Returns true if any change was made to the IR, false otherwise.<br>
bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {<br>
- uint64_t NeededSize = TD->getTypeStoreSize(InstVal->getType());<br>
+ uint64_t NeededSize = DL->getTypeStoreSize(InstVal->getType());<br>
DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)<br>
<< " bytes\n");<br>
<br>
@@ -142,7 +142,7 @@ bool BoundsChecking::instrument(Value *P<br>
Value *Offset = SizeOffset.second;<br>
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);<br>
<br>
- Type *IntTy = TD->getIntPtrType(Ptr->getType());<br>
+ Type *IntTy = DL->getIntPtrType(Ptr->getType());<br>
Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);<br>
<br>
// three checks are required to ensure safety:<br>
@@ -166,13 +166,13 @@ bool BoundsChecking::instrument(Value *P<br>
}<br>
<br>
bool BoundsChecking::runOnFunction(Function &F) {<br>
- TD = &getAnalysis<DataLayout>();<br>
+ DL = &getAnalysis<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
<br>
TrapBB = 0;<br>
- BuilderTy TheBuilder(F.getContext(), TargetFolder(TD));<br>
+ BuilderTy TheBuilder(F.getContext(), TargetFolder(DL));<br>
Builder = &TheBuilder;<br>
- ObjectSizeOffsetEvaluator TheObjSizeEval(TD, TLI, F.getContext(),<br>
+ ObjectSizeOffsetEvaluator TheObjSizeEval(DL, TLI, F.getContext(),<br>
/*RoundToAlign=*/true);<br>
ObjSizeEval = &TheObjSizeEval;<br>
<br>
<br>
Modified: llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Instrumentation/MemorySanitizer.cpp Thu Feb 20 18:06:31 2014<br>
@@ -207,7 +207,7 @@ class MemorySanitizer : public FunctionP<br>
StringRef BlacklistFile = StringRef())<br>
: FunctionPass(ID),<br>
TrackOrigins(TrackOrigins || ClTrackOrigins),<br>
- TD(0),<br>
+ DL(0),<br>
WarningFn(0),<br>
BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),<br>
WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}<br>
@@ -222,7 +222,7 @@ class MemorySanitizer : public FunctionP<br>
/// \brief Track origins (allocation points) of uninitialized values.<br>
bool TrackOrigins;<br>
<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
LLVMContext *C;<br>
Type *IntptrTy;<br>
Type *OriginTy;<br>
@@ -399,12 +399,12 @@ void MemorySanitizer::initializeCallback<br>
///<br>
/// inserts a call to __msan_init to the module's constructor list.<br>
bool MemorySanitizer::doInitialization(Module &M) {<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
- if (!TD)<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
+ if (!DL)<br>
return false;<br>
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));<br>
C = &(M.getContext());<br>
- unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);<br>
+ unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);<br>
switch (PtrSize) {<br>
case 64:<br>
ShadowMask = kShadowMask64;<br>
@@ -420,7 +420,7 @@ bool MemorySanitizer::doInitialization(M<br>
}<br>
<br>
IRBuilder<> IRB(*C);<br>
- IntptrTy = IRB.getIntPtrTy(TD);<br>
+ IntptrTy = IRB.getIntPtrTy(DL);<br>
OriginTy = IRB.getInt32Ty();<br>
<br>
ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);<br>
@@ -650,7 +650,7 @@ struct MemorySanitizerVisitor : public I<br>
/// \brief Add MemorySanitizer instrumentation to a function.<br>
bool runOnFunction() {<br>
MS.initializeCallbacks(*F.getParent());<br>
- if (!<a href="http://MS.TD" target="_blank">MS.TD</a>) return false;<br>
+ if (!MS.DL) return false;<br>
<br>
// In the presence of unreachable blocks, we may see Phi nodes with<br>
// incoming nodes from such blocks. Since InstVisitor skips unreachable<br>
@@ -710,7 +710,7 @@ struct MemorySanitizerVisitor : public I<br>
if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))<br>
return IT;<br>
if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {<br>
- uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());<br>
+ uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());<br>
return VectorType::get(IntegerType::get(*MS.C, EltSize),<br>
VT->getNumElements());<br>
}<br>
@@ -722,7 +722,7 @@ struct MemorySanitizerVisitor : public I<br>
DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");<br>
return Res;<br>
}<br>
- uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);<br>
+ uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);<br>
return IntegerType::get(*MS.C, TypeSize);<br>
}<br>
<br>
@@ -889,8 +889,8 @@ struct MemorySanitizerVisitor : public I<br>
continue;<br>
}<br>
unsigned Size = AI->hasByValAttr()<br>
- ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())<br>
- : MS.TD->getTypeAllocSize(AI->getType());<br>
+ ? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType())<br>
+ : MS.DL->getTypeAllocSize(AI->getType());<br>
if (A == AI) {<br>
Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);<br>
if (AI->hasByValAttr()) {<br>
@@ -900,7 +900,7 @@ struct MemorySanitizerVisitor : public I<br>
unsigned ArgAlign = AI->getParamAlignment();<br>
if (ArgAlign == 0) {<br>
Type *EltType = A->getType()->getPointerElementType();<br>
- ArgAlign = MS.TD->getABITypeAlignment(EltType);<br>
+ ArgAlign = MS.DL->getABITypeAlignment(EltType);<br>
}<br>
unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);<br>
Value *Cpy = EntryIRB.CreateMemCpy(<br>
@@ -1935,13 +1935,13 @@ struct MemorySanitizerVisitor : public I<br>
if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {<br>
assert(A->getType()->isPointerTy() &&<br>
"ByVal argument is not a pointer!");<br>
- Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());<br>
+ Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());<br>
unsigned Alignment = CS.getParamAlignment(i + 1);<br>
Store = IRB.CreateMemCpy(ArgShadowBase,<br>
getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),<br>
Size, Alignment);<br>
} else {<br>
- Size = MS.TD->getTypeAllocSize(A->getType());<br>
+ Size = MS.DL->getTypeAllocSize(A->getType());<br>
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,<br>
kShadowTLSAlignment);<br>
}<br>
@@ -2024,7 +2024,7 @@ struct MemorySanitizerVisitor : public I<br>
void visitAllocaInst(AllocaInst &I) {<br>
setShadow(&I, getCleanShadow(&I));<br>
IRBuilder<> IRB(I.getNextNode());<br>
- uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());<br>
+ uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());<br>
if (PoisonStack && ClPoisonStackWithCall) {<br>
IRB.CreateCall2(MS.MsanPoisonStackFn,<br>
IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),<br>
@@ -2223,7 +2223,7 @@ struct VarArgAMD64Helper : public VarArg<br>
FpOffset += 16;<br>
break;<br>
case AK_Memory:<br>
- uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());<br>
+ uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());<br>
Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);<br>
OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);<br>
}<br>
<br>
Modified: llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Instrumentation/ThreadSanitizer.cpp Thu Feb 20 18:06:31 2014<br>
@@ -78,7 +78,7 @@ namespace {<br>
struct ThreadSanitizer : public FunctionPass {<br>
ThreadSanitizer(StringRef BlacklistFile = StringRef())<br>
: FunctionPass(ID),<br>
- TD(0),<br>
+ DL(0),<br>
BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile<br>
: BlacklistFile) { }<br>
const char *getPassName() const;<br>
@@ -96,7 +96,7 @@ struct ThreadSanitizer : public Function<br>
bool addrPointsToConstantData(Value *Addr);<br>
int getMemoryAccessFuncIndex(Value *Addr);<br>
<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
Type *IntptrTy;<br>
SmallString<64> BlacklistFile;<br>
OwningPtr<SpecialCaseList> BL;<br>
@@ -224,14 +224,14 @@ void ThreadSanitizer::initializeCallback<br>
}<br>
<br>
bool ThreadSanitizer::doInitialization(Module &M) {<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
- if (!TD)<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
+ if (!DL)<br>
return false;<br>
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));<br>
<br>
// Always insert a call to __tsan_init into the module's CTORs.<br>
IRBuilder<> IRB(M.getContext());<br>
- IntptrTy = IRB.getIntPtrTy(TD);<br>
+ IntptrTy = IRB.getIntPtrTy(DL);<br>
Value *TsanInit = M.getOrInsertFunction("__tsan_init",<br>
IRB.getVoidTy(), NULL);<br>
appendToGlobalCtors(M, cast<Function>(TsanInit), 0);<br>
@@ -320,7 +320,7 @@ static bool isAtomic(Instruction *I) {<br>
}<br>
<br>
bool ThreadSanitizer::runOnFunction(Function &F) {<br>
- if (!TD) return false;<br>
+ if (!DL) return false;<br>
if (BL->isIn(F)) return false;<br>
initializeCallbacks(*F.getParent());<br>
SmallVector<Instruction*, 8> RetVec;<br>
@@ -573,7 +573,7 @@ int ThreadSanitizer::getMemoryAccessFunc<br>
Type *OrigPtrTy = Addr->getType();<br>
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();<br>
assert(OrigTy->isSized());<br>
- uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);<br>
+ uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);<br>
if (TypeSize != 8 && TypeSize != 16 &&<br>
TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {<br>
NumAccessesWithBadSize++;<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/EarlyCSE.cpp Thu Feb 20 18:06:31 2014<br>
@@ -262,7 +262,7 @@ namespace {<br>
/// cases.<br>
class EarlyCSE : public FunctionPass {<br>
public:<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
const TargetLibraryInfo *TLI;<br>
DominatorTree *DT;<br>
typedef RecyclingAllocator<BumpPtrAllocator,<br>
@@ -432,7 +432,7 @@ bool EarlyCSE::processNode(DomTreeNode *<br>
<br>
// If the instruction can be simplified (e.g. X+0 = X) then replace it with<br>
// its simpler value.<br>
- if (Value *V = SimplifyInstruction(Inst, TD, TLI, DT)) {<br>
+ if (Value *V = SimplifyInstruction(Inst, DL, TLI, DT)) {<br>
DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n');<br>
Inst->replaceAllUsesWith(V);<br>
Inst->eraseFromParent();<br>
@@ -557,7 +557,7 @@ bool EarlyCSE::runOnFunction(Function &F<br>
<br>
std::vector<StackNode *> nodesToProcess;<br>
<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();<br>
<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/GVN.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/GVN.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/GVN.cpp Thu Feb 20 18:06:31 2014<br>
@@ -586,7 +586,7 @@ namespace {<br>
bool NoLoads;<br>
MemoryDependenceAnalysis *MD;<br>
DominatorTree *DT;<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
const TargetLibraryInfo *TLI;<br>
SetVector<BasicBlock *> DeadBlocks;<br>
<br>
@@ -624,7 +624,7 @@ namespace {<br>
InstrsToErase.push_back(I);<br>
}<br>
<br>
- const DataLayout *getDataLayout() const { return TD; }<br>
+ const DataLayout *getDataLayout() const { return DL; }<br>
DominatorTree &getDominatorTree() const { return *DT; }<br>
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }<br>
MemoryDependenceAnalysis &getMemDep() const { return *MD; }<br>
@@ -828,7 +828,7 @@ SpeculationFailure:<br>
/// CoerceAvailableValueToLoadType will succeed.<br>
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,<br>
Type *LoadTy,<br>
- const DataLayout &TD) {<br>
+ const DataLayout &DL) {<br>
// If the loaded or stored value is an first class array or struct, don't try<br>
// to transform them. We need to be able to bitcast to integer.<br>
if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||<br>
@@ -837,8 +837,8 @@ static bool CanCoerceMustAliasedValueToL<br>
return false;<br>
<br>
// The store has to be at least as big as the load.<br>
- if (TD.getTypeSizeInBits(StoredVal->getType()) <<br>
- TD.getTypeSizeInBits(LoadTy))<br>
+ if (DL.getTypeSizeInBits(StoredVal->getType()) <<br>
+ DL.getTypeSizeInBits(LoadTy))<br>
return false;<br>
<br>
return true;<br>
@@ -853,15 +853,15 @@ static bool CanCoerceMustAliasedValueToL<br>
static Value *CoerceAvailableValueToLoadType(Value *StoredVal,<br>
Type *LoadedTy,<br>
Instruction *InsertPt,<br>
- const DataLayout &TD) {<br>
- if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))<br>
+ const DataLayout &DL) {<br>
+ if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL))<br>
return 0;<br>
<br>
// If this is already the right type, just return it.<br>
Type *StoredValTy = StoredVal->getType();<br>
<br>
- uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);<br>
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);<br>
+ uint64_t StoreSize = DL.getTypeSizeInBits(StoredValTy);<br>
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadedTy);<br>
<br>
// If the store and reload are the same size, we can always reuse it.<br>
if (StoreSize == LoadSize) {<br>
@@ -872,13 +872,13 @@ static Value *CoerceAvailableValueToLoad<br>
<br>
// Convert source pointers to integers, which can be bitcast.<br>
if (StoredValTy->getScalarType()->isPointerTy()) {<br>
- StoredValTy = TD.getIntPtrType(StoredValTy);<br>
+ StoredValTy = DL.getIntPtrType(StoredValTy);<br>
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);<br>
}<br>
<br>
Type *TypeToCastTo = LoadedTy;<br>
if (TypeToCastTo->getScalarType()->isPointerTy())<br>
- TypeToCastTo = TD.getIntPtrType(TypeToCastTo);<br>
+ TypeToCastTo = DL.getIntPtrType(TypeToCastTo);<br>
<br>
if (StoredValTy != TypeToCastTo)<br>
StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);<br>
@@ -897,7 +897,7 @@ static Value *CoerceAvailableValueToLoad<br>
<br>
// Convert source pointers to integers, which can be manipulated.<br>
if (StoredValTy->getScalarType()->isPointerTy()) {<br>
- StoredValTy = TD.getIntPtrType(StoredValTy);<br>
+ StoredValTy = DL.getIntPtrType(StoredValTy);<br>
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);<br>
}<br>
<br>
@@ -909,7 +909,7 @@ static Value *CoerceAvailableValueToLoad<br>
<br>
// If this is a big-endian system, we need to shift the value down to the low<br>
// bits so that a truncate will work.<br>
- if (TD.isBigEndian()) {<br>
+ if (DL.isBigEndian()) {<br>
Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);<br>
StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);<br>
}<br>
@@ -940,15 +940,15 @@ static Value *CoerceAvailableValueToLoad<br>
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,<br>
Value *WritePtr,<br>
uint64_t WriteSizeInBits,<br>
- const DataLayout &TD) {<br>
+ const DataLayout &DL) {<br>
// If the loaded or stored value is a first class array or struct, don't try<br>
// to transform them. We need to be able to bitcast to integer.<br>
if (LoadTy->isStructTy() || LoadTy->isArrayTy())<br>
return -1;<br>
<br>
int64_t StoreOffset = 0, LoadOffset = 0;<br>
- Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&TD);<br>
- Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &TD);<br>
+ Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&DL);<br>
+ Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &DL);<br>
if (StoreBase != LoadBase)<br>
return -1;<br>
<br>
@@ -970,7 +970,7 @@ static int AnalyzeLoadFromClobberingWrit<br>
// If the load and store don't overlap at all, the store doesn't provide<br>
// anything to the load. In this case, they really don't alias at all, AA<br>
// must have gotten confused.<br>
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);<br>
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy);<br>
<br>
if ((WriteSizeInBits & 7) | (LoadSize & 7))<br>
return -1;<br>
@@ -1013,51 +1013,51 @@ static int AnalyzeLoadFromClobberingWrit<br>
/// memdep query of a load that ends up being a clobbering store.<br>
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,<br>
StoreInst *DepSI,<br>
- const DataLayout &TD) {<br>
+ const DataLayout &DL) {<br>
// Cannot handle reading from store of first-class aggregate yet.<br>
if (DepSI->getValueOperand()->getType()->isStructTy() ||<br>
DepSI->getValueOperand()->getType()->isArrayTy())<br>
return -1;<br>
<br>
Value *StorePtr = DepSI->getPointerOperand();<br>
- uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType());<br>
+ uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType());<br>
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,<br>
- StorePtr, StoreSize, TD);<br>
+ StorePtr, StoreSize, DL);<br>
}<br>
<br>
/// AnalyzeLoadFromClobberingLoad - This function is called when we have a<br>
/// memdep query of a load that ends up being clobbered by another load. See if<br>
/// the other load can feed into the second load.<br>
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,<br>
- LoadInst *DepLI, const DataLayout &TD){<br>
+ LoadInst *DepLI, const DataLayout &DL){<br>
// Cannot handle reading from store of first-class aggregate yet.<br>
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())<br>
return -1;<br>
<br>
Value *DepPtr = DepLI->getPointerOperand();<br>
- uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType());<br>
- int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD);<br>
+ uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType());<br>
+ int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL);<br>
if (R != -1) return R;<br>
<br>
// If we have a load/load clobber an DepLI can be widened to cover this load,<br>
// then we should widen it!<br>
int64_t LoadOffs = 0;<br>
const Value *LoadBase =<br>
- GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &TD);<br>
- unsigned LoadSize = TD.getTypeStoreSize(LoadTy);<br>
+ GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &DL);<br>
+ unsigned LoadSize = DL.getTypeStoreSize(LoadTy);<br>
<br>
unsigned Size = MemoryDependenceAnalysis::<br>
- getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD);<br>
+ getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, DL);<br>
if (Size == 0) return -1;<br>
<br>
- return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD);<br>
+ return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL);<br>
}<br>
<br>
<br>
<br>
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,<br>
MemIntrinsic *MI,<br>
- const DataLayout &TD) {<br>
+ const DataLayout &DL) {<br>
// If the mem operation is a non-constant size, we can't handle it.<br>
ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());<br>
if (SizeCst == 0) return -1;<br>
@@ -1067,7 +1067,7 @@ static int AnalyzeLoadFromClobberingMemI<br>
// of the memset..<br>
if (MI->getIntrinsicID() == Intrinsic::memset)<br>
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),<br>
- MemSizeInBits, TD);<br>
+ MemSizeInBits, DL);<br>
<br>
// If we have a memcpy/memmove, the only case we can handle is if this is a<br>
// copy from constant memory. In that case, we can read directly from the<br>
@@ -1077,12 +1077,12 @@ static int AnalyzeLoadFromClobberingMemI<br>
Constant *Src = dyn_cast<Constant>(MTI->getSource());<br>
if (Src == 0) return -1;<br>
<br>
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD));<br>
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &DL));<br>
if (GV == 0 || !GV->isConstant()) return -1;<br>
<br>
// See if the access is within the bounds of the transfer.<br>
int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,<br>
- MI->getDest(), MemSizeInBits, TD);<br>
+ MI->getDest(), MemSizeInBits, DL);<br>
if (Offset == -1)<br>
return Offset;<br>
<br>
@@ -1095,7 +1095,7 @@ static int AnalyzeLoadFromClobberingMemI<br>
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);<br>
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);<br>
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));<br>
- if (ConstantFoldLoadFromConstPtr(Src, &TD))<br>
+ if (ConstantFoldLoadFromConstPtr(Src, &DL))<br>
return Offset;<br>
return -1;<br>
}<br>
@@ -1108,11 +1108,11 @@ static int AnalyzeLoadFromClobberingMemI<br>
/// before we give up.<br>
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,<br>
Type *LoadTy,<br>
- Instruction *InsertPt, const DataLayout &TD){<br>
+ Instruction *InsertPt, const DataLayout &DL){<br>
LLVMContext &Ctx = SrcVal->getType()->getContext();<br>
<br>
- uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;<br>
- uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;<br>
+ uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;<br>
+ uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8;<br>
<br>
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);<br>
<br>
@@ -1120,13 +1120,13 @@ static Value *GetStoreValueForLoad(Value<br>
// to an integer type to start with.<br>
if (SrcVal->getType()->getScalarType()->isPointerTy())<br>
SrcVal = Builder.CreatePtrToInt(SrcVal,<br>
- TD.getIntPtrType(SrcVal->getType()));<br>
+ DL.getIntPtrType(SrcVal->getType()));<br>
if (!SrcVal->getType()->isIntegerTy())<br>
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));<br>
<br>
// Shift the bits to the least significant depending on endianness.<br>
unsigned ShiftAmt;<br>
- if (TD.isLittleEndian())<br>
+ if (DL.isLittleEndian())<br>
ShiftAmt = Offset*8;<br>
else<br>
ShiftAmt = (StoreSize-LoadSize-Offset)*8;<br>
@@ -1137,7 +1137,7 @@ static Value *GetStoreValueForLoad(Value<br>
if (LoadSize != StoreSize)<br>
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));<br>
<br>
- return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);<br>
+ return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL);<br>
}<br>
<br>
/// GetLoadValueForLoad - This function is called when we have a<br>
@@ -1148,11 +1148,11 @@ static Value *GetStoreValueForLoad(Value<br>
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,<br>
Type *LoadTy, Instruction *InsertPt,<br>
GVN &gvn) {<br>
- const DataLayout &TD = *gvn.getDataLayout();<br>
+ const DataLayout &DL = *gvn.getDataLayout();<br>
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to<br>
// widen SrcVal out to a larger load.<br>
- unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());<br>
- unsigned LoadSize = TD.getTypeStoreSize(LoadTy);<br>
+ unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType());<br>
+ unsigned LoadSize = DL.getTypeStoreSize(LoadTy);<br>
if (Offset+LoadSize > SrcValSize) {<br>
assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!");<br>
assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load");<br>
@@ -1184,7 +1184,7 @@ static Value *GetLoadValueForLoad(LoadIn<br>
// Replace uses of the original load with the wider load. On a big endian<br>
// system, we need to shift down to get the relevant bits.<br>
Value *RV = NewLoad;<br>
- if (TD.isBigEndian())<br>
+ if (DL.isBigEndian())<br>
RV = Builder.CreateLShr(RV,<br>
NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits());<br>
RV = Builder.CreateTrunc(RV, SrcVal->getType());<br>
@@ -1199,7 +1199,7 @@ static Value *GetLoadValueForLoad(LoadIn<br>
SrcVal = NewLoad;<br>
}<br>
<br>
- return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD);<br>
+ return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL);<br>
}<br>
<br>
<br>
@@ -1207,9 +1207,9 @@ static Value *GetLoadValueForLoad(LoadIn<br>
/// memdep query of a load that ends up being a clobbering mem intrinsic.<br>
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,<br>
Type *LoadTy, Instruction *InsertPt,<br>
- const DataLayout &TD){<br>
+ const DataLayout &DL){<br>
LLVMContext &Ctx = LoadTy->getContext();<br>
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;<br>
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8;<br>
<br>
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);<br>
<br>
@@ -1240,7 +1240,7 @@ static Value *GetMemInstValueForLoad(Mem<br>
++NumBytesSet;<br>
}<br>
<br>
- return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);<br>
+ return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, DL);<br>
}<br>
<br>
// Otherwise, this is a memcpy/memmove from a constant global.<br>
@@ -1256,7 +1256,7 @@ static Value *GetMemInstValueForLoad(Mem<br>
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);<br>
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);<br>
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));<br>
- return ConstantFoldLoadFromConstPtr(Src, &TD);<br>
+ return ConstantFoldLoadFromConstPtr(Src, &DL);<br>
}<br>
<br>
<br>
@@ -1322,10 +1322,10 @@ Value *AvailableValueInBlock::Materializ<br>
if (isSimpleValue()) {<br>
Res = getSimpleValue();<br>
if (Res->getType() != LoadTy) {<br>
- const DataLayout *TD = gvn.getDataLayout();<br>
- assert(TD && "Need target data to handle type mismatch case");<br>
+ const DataLayout *DL = gvn.getDataLayout();<br>
+ assert(DL && "Need target data to handle type mismatch case");<br>
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),<br>
- *TD);<br>
+ *DL);<br>
<br>
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "<br>
<< *getSimpleValue() << '\n'<br>
@@ -1344,10 +1344,10 @@ Value *AvailableValueInBlock::Materializ<br>
<< *Res << '\n' << "\n\n\n");<br>
}<br>
} else if (isMemIntrinValue()) {<br>
- const DataLayout *TD = gvn.getDataLayout();<br>
- assert(TD && "Need target data to handle type mismatch case");<br>
+ const DataLayout *DL = gvn.getDataLayout();<br>
+ assert(DL && "Need target data to handle type mismatch case");<br>
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,<br>
- LoadTy, BB->getTerminator(), *TD);<br>
+ LoadTy, BB->getTerminator(), *DL);<br>
DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset<br>
<< " " << *getMemIntrinValue() << '\n'<br>
<< *Res << '\n' << "\n\n\n");<br>
@@ -1400,9 +1400,9 @@ void GVN::AnalyzeLoadAvailability(LoadIn<br>
// read by the load, we can extract the bits we need for the load from the<br>
// stored value.<br>
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {<br>
- if (TD && Address) {<br>
+ if (DL && Address) {<br>
int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,<br>
- DepSI, *TD);<br>
+ DepSI, *DL);<br>
if (Offset != -1) {<br>
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,<br>
DepSI->getValueOperand(),<br>
@@ -1419,10 +1419,10 @@ void GVN::AnalyzeLoadAvailability(LoadIn<br>
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {<br>
// If this is a clobber and L is the first instruction in its block, then<br>
// we have the first instruction in the entry block.<br>
- if (DepLI != LI && Address && TD) {<br>
+ if (DepLI != LI && Address && DL) {<br>
int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(),<br>
LI->getPointerOperand(),<br>
- DepLI, *TD);<br>
+ DepLI, *DL);<br>
<br>
if (Offset != -1) {<br>
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,<br>
@@ -1435,9 +1435,9 @@ void GVN::AnalyzeLoadAvailability(LoadIn<br>
// If the clobbering value is a memset/memcpy/memmove, see if we can<br>
// forward a value on from it.<br>
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {<br>
- if (TD && Address) {<br>
+ if (DL && Address) {<br>
int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,<br>
- DepMI, *TD);<br>
+ DepMI, *DL);<br>
if (Offset != -1) {<br>
ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,<br>
Offset));<br>
@@ -1469,8 +1469,8 @@ void GVN::AnalyzeLoadAvailability(LoadIn<br>
if (S->getValueOperand()->getType() != LI->getType()) {<br>
// If the stored value is larger or equal to the loaded value, we can<br>
// reuse it.<br>
- if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),<br>
- LI->getType(), *TD)) {<br>
+ if (DL == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),<br>
+ LI->getType(), *DL)) {<br>
UnavailableBlocks.push_back(DepBB);<br>
continue;<br>
}<br>
@@ -1486,7 +1486,7 @@ void GVN::AnalyzeLoadAvailability(LoadIn<br>
if (LD->getType() != LI->getType()) {<br>
// If the stored value is larger or equal to the loaded value, we can<br>
// reuse it.<br>
- if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){<br>
+ if (DL == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)){<br>
UnavailableBlocks.push_back(DepBB);<br>
continue;<br>
}<br>
@@ -1609,7 +1609,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, A<br>
// If all preds have a single successor, then we know it is safe to insert<br>
// the load on the pred (?!?), so we can insert code to materialize the<br>
// pointer if it is not available.<br>
- PHITransAddr Address(LI->getPointerOperand(), TD);<br>
+ PHITransAddr Address(LI->getPointerOperand(), DL);<br>
Value *LoadPtr = 0;<br>
LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,<br>
*DT, NewInsts);<br>
@@ -1821,7 +1821,7 @@ bool GVN::processLoad(LoadInst *L) {<br>
<br>
// If we have a clobber and target data is around, see if this is a clobber<br>
// that we can fix up through code synthesis.<br>
- if (Dep.isClobber() && TD) {<br>
+ if (Dep.isClobber() && DL) {<br>
// Check to see if we have something like this:<br>
// store i32 123, i32* %P<br>
// %A = bitcast i32* %P to i8*<br>
@@ -1836,10 +1836,10 @@ bool GVN::processLoad(LoadInst *L) {<br>
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) {<br>
int Offset = AnalyzeLoadFromClobberingStore(L->getType(),<br>
L->getPointerOperand(),<br>
- DepSI, *TD);<br>
+ DepSI, *DL);<br>
if (Offset != -1)<br>
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,<br>
- L->getType(), L, *TD);<br>
+ L->getType(), L, *DL);<br>
}<br>
<br>
// Check to see if we have something like this:<br>
@@ -1854,7 +1854,7 @@ bool GVN::processLoad(LoadInst *L) {<br>
<br>
int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),<br>
L->getPointerOperand(),<br>
- DepLI, *TD);<br>
+ DepLI, *DL);<br>
if (Offset != -1)<br>
AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this);<br>
}<br>
@@ -1864,9 +1864,9 @@ bool GVN::processLoad(LoadInst *L) {<br>
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {<br>
int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),<br>
L->getPointerOperand(),<br>
- DepMI, *TD);<br>
+ DepMI, *DL);<br>
if (Offset != -1)<br>
- AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD);<br>
+ AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *DL);<br>
}<br>
<br>
if (AvailVal) {<br>
@@ -1917,9 +1917,9 @@ bool GVN::processLoad(LoadInst *L) {<br>
// actually have the same type. See if we know how to reuse the stored<br>
// value (depending on its type).<br>
if (StoredVal->getType() != L->getType()) {<br>
- if (TD) {<br>
+ if (DL) {<br>
StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),<br>
- L, *TD);<br>
+ L, *DL);<br>
if (StoredVal == 0)<br>
return false;<br>
<br>
@@ -1946,9 +1946,9 @@ bool GVN::processLoad(LoadInst *L) {<br>
// the same type. See if we know how to reuse the previously loaded value<br>
// (depending on its type).<br>
if (DepLI->getType() != L->getType()) {<br>
- if (TD) {<br>
+ if (DL) {<br>
AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(),<br>
- L, *TD);<br>
+ L, *DL);<br>
if (AvailableVal == 0)<br>
return false;<br>
<br>
@@ -2200,7 +2200,7 @@ bool GVN::processInstruction(Instruction<br>
// to value numbering it. Value numbering often exposes redundancies, for<br>
// example if it determines that %y is equal to %x then the instruction<br>
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.<br>
- if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {<br>
+ if (Value *V = SimplifyInstruction(I, DL, TLI, DT)) {<br>
I->replaceAllUsesWith(V);<br>
if (MD && V->getType()->getScalarType()->isPointerTy())<br>
MD->invalidateCachedPointerInfo(V);<br>
@@ -2318,7 +2318,7 @@ bool GVN::runOnFunction(Function& F) {<br>
if (!NoLoads)<br>
MD = &getAnalysis<MemoryDependenceAnalysis>();<br>
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());<br>
VN.setMemDep(MD);<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/GlobalMerge.cpp Thu Feb 20 18:06:31 2014<br>
@@ -126,15 +126,15 @@ namespace {<br>
}<br>
<br>
struct GlobalCmp {<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
<br>
- GlobalCmp(const DataLayout *td) : TD(td) { }<br>
+ GlobalCmp(const DataLayout *DL) : DL(DL) { }<br>
<br>
bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {<br>
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();<br>
Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();<br>
<br>
- return (TD->getTypeAllocSize(Ty1) < TD->getTypeAllocSize(Ty2));<br>
+ return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));<br>
}<br>
};<br>
};<br>
@@ -148,7 +148,7 @@ INITIALIZE_PASS(GlobalMerge, "global-mer<br>
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,<br>
Module &M, bool isConst, unsigned AddrSpace) const {<br>
const TargetLowering *TLI = TM->getTargetLowering();<br>
- const DataLayout *TD = TLI->getDataLayout();<br>
+ const DataLayout *DL = TLI->getDataLayout();<br>
<br>
// FIXME: Infer the maximum possible offset depending on the actual users<br>
// (these max offsets are different for the users inside Thumb or ARM<br>
@@ -156,7 +156,7 @@ bool GlobalMerge::doMerge(SmallVectorImp<br>
unsigned MaxOffset = TLI->getMaximalGlobalOffset();<br>
<br>
// FIXME: Find better heuristics<br>
- std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(TD));<br>
+ std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(DL));<br>
<br>
Type *Int32Ty = Type::getInt32Ty(M.getContext());<br>
<br>
@@ -167,7 +167,7 @@ bool GlobalMerge::doMerge(SmallVectorImp<br>
std::vector<Constant*> Inits;<br>
for (j = i; j != e; ++j) {<br>
Type *Ty = Globals[j]->getType()->getElementType();<br>
- MergedSize += TD->getTypeAllocSize(Ty);<br>
+ MergedSize += DL->getTypeAllocSize(Ty);<br>
if (MergedSize > MaxOffset) {<br>
break;<br>
}<br>
@@ -242,7 +242,7 @@ bool GlobalMerge::doInitialization(Modul<br>
DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,<br>
BSSGlobals;<br>
const TargetLowering *TLI = TM->getTargetLowering();<br>
- const DataLayout *TD = TLI->getDataLayout();<br>
+ const DataLayout *DL = TLI->getDataLayout();<br>
unsigned MaxOffset = TLI->getMaximalGlobalOffset();<br>
bool Changed = false;<br>
setMustKeepGlobalVariables(M);<br>
@@ -260,9 +260,9 @@ bool GlobalMerge::doInitialization(Modul<br>
unsigned AddressSpace = PT->getAddressSpace();<br>
<br>
// Ignore fancy-aligned globals for now.<br>
- unsigned Alignment = TD->getPreferredAlignment(I);<br>
+ unsigned Alignment = DL->getPreferredAlignment(I);<br>
Type *Ty = I->getType()->getElementType();<br>
- if (Alignment > TD->getABITypeAlignment(Ty))<br>
+ if (Alignment > DL->getABITypeAlignment(Ty))<br>
continue;<br>
<br>
// Ignore all 'special' globals.<br>
@@ -274,7 +274,7 @@ bool GlobalMerge::doInitialization(Modul<br>
if (isMustKeepGlobalVariable(I))<br>
continue;<br>
<br>
- if (TD->getTypeAllocSize(Ty) < MaxOffset) {<br>
+ if (DL->getTypeAllocSize(Ty) < MaxOffset) {<br>
if (TargetLoweringObjectFile::getKindForGlobal(I, TLI->getTargetMachine())<br>
.isBSSLocal())<br>
BSSGlobals[AddressSpace].push_back(I);<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/IndVarSimplify.cpp Thu Feb 20 18:06:31 2014<br>
@@ -71,7 +71,7 @@ namespace {<br>
LoopInfo *LI;<br>
ScalarEvolution *SE;<br>
DominatorTree *DT;<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
TargetLibraryInfo *TLI;<br>
<br>
SmallVector<WeakVH, 16> DeadInsts;<br>
@@ -79,7 +79,7 @@ namespace {<br>
public:<br>
<br>
static char ID; // Pass identification, replacement for typeid<br>
- IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), TD(0),<br>
+ IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), DL(0),<br>
Changed(false) {<br>
initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());<br>
}<br>
@@ -659,14 +659,14 @@ namespace {<br>
/// extended by this sign or zero extend operation. This is used to determine<br>
/// the final width of the IV before actually widening it.<br>
static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE,<br>
- const DataLayout *TD) {<br>
+ const DataLayout *DL) {<br>
bool IsSigned = Cast->getOpcode() == Instruction::SExt;<br>
if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)<br>
return;<br>
<br>
Type *Ty = Cast->getType();<br>
uint64_t Width = SE->getTypeSizeInBits(Ty);<br>
- if (TD && !TD->isLegalInteger(Width))<br>
+ if (DL && !DL->isLegalInteger(Width))<br>
return;<br>
<br>
if (!WI.WidestNativeType) {<br>
@@ -1122,15 +1122,15 @@ PHINode *WidenIV::CreateWideIV(SCEVExpan<br>
namespace {<br>
class IndVarSimplifyVisitor : public IVVisitor {<br>
ScalarEvolution *SE;<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
PHINode *IVPhi;<br>
<br>
public:<br>
WideIVInfo WI;<br>
<br>
IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV,<br>
- const DataLayout *TData, const DominatorTree *DTree):<br>
- SE(SCEV), TD(TData), IVPhi(IV) {<br>
+ const DataLayout *DL, const DominatorTree *DTree):<br>
+ SE(SCEV), DL(DL), IVPhi(IV) {<br>
DT = DTree;<br>
WI.NarrowIV = IVPhi;<br>
if (ReduceLiveIVs)<br>
@@ -1138,7 +1138,7 @@ namespace {<br>
}<br>
<br>
// Implement the interface used by simplifyUsersOfIV.<br>
- virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE, TD); }<br>
+ virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE, DL); }<br>
};<br>
}<br>
<br>
@@ -1172,7 +1172,7 @@ void IndVarSimplify::SimplifyAndExtend(L<br>
PHINode *CurrIV = LoopPhis.pop_back_val();<br>
<br>
// Information about sign/zero extensions of CurrIV.<br>
- IndVarSimplifyVisitor Visitor(CurrIV, SE, TD, DT);<br>
+ IndVarSimplifyVisitor Visitor(CurrIV, SE, DL, DT);<br>
<br>
Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor);<br>
<br>
@@ -1444,7 +1444,7 @@ static bool AlmostDeadIV(PHINode *Phi, B<br>
/// could at least handle constant BECounts.<br>
static PHINode *<br>
FindLoopCounter(Loop *L, const SCEV *BECount,<br>
- ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) {<br>
+ ScalarEvolution *SE, DominatorTree *DT, const DataLayout *DL) {<br>
uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());<br>
<br>
Value *Cond =<br>
@@ -1473,7 +1473,7 @@ FindLoopCounter(Loop *L, const SCEV *BEC<br>
// AR may be wider than BECount. With eq/ne tests overflow is immaterial.<br>
// AR may not be a narrower type, or we may never exit.<br>
uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());<br>
- if (PhiWidth < BCWidth || (TD && !TD->isLegalInteger(PhiWidth)))<br>
+ if (PhiWidth < BCWidth || (DL && !DL->isLegalInteger(PhiWidth)))<br>
continue;<br>
<br>
const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));<br>
@@ -1818,7 +1818,7 @@ bool IndVarSimplify::runOnLoop(Loop *L,<br>
LI = &getAnalysis<LoopInfo>();<br>
SE = &getAnalysis<ScalarEvolution>();<br>
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = getAnalysisIfAvailable<TargetLibraryInfo>();<br>
<br>
DeadInsts.clear();<br>
@@ -1860,7 +1860,7 @@ bool IndVarSimplify::runOnLoop(Loop *L,<br>
// If we have a trip count expression, rewrite the loop's exit condition<br>
// using it. We can currently only handle loops with a single exit.<br>
if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) {<br>
- PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD);<br>
+ PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, DL);<br>
if (IndVar) {<br>
// Check preconditions for proper SCEVExpander operation. SCEV does not<br>
// express SCEVExpander's dependencies, such as LoopSimplify. Instead any<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/JumpThreading.cpp Thu Feb 20 18:06:31 2014<br>
@@ -76,7 +76,7 @@ namespace {<br>
/// revectored to the false side of the second if.<br>
///<br>
class JumpThreading : public FunctionPass {<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
TargetLibraryInfo *TLI;<br>
LazyValueInfo *LVI;<br>
#ifdef NDEBUG<br>
@@ -152,7 +152,7 @@ bool JumpThreading::runOnFunction(Functi<br>
return false;<br>
<br>
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
LVI = &getAnalysis<LazyValueInfo>();<br>
<br>
@@ -493,7 +493,7 @@ ComputeValueKnownInPredecessors(Value *V<br>
Value *LHS = PN->getIncomingValue(i);<br>
Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB);<br>
<br>
- Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, TD);<br>
+ Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL);<br>
if (Res == 0) {<br>
if (!isa<Constant>(RHS))<br>
continue;<br>
@@ -695,7 +695,7 @@ bool JumpThreading::ProcessBlock(BasicBl<br>
// Run constant folding to see if we can reduce the condition to a simple<br>
// constant.<br>
if (Instruction *I = dyn_cast<Instruction>(Condition)) {<br>
- Value *SimpleVal = ConstantFoldInstruction(I, TD, TLI);<br>
+ Value *SimpleVal = ConstantFoldInstruction(I, DL, TLI);<br>
if (SimpleVal) {<br>
I->replaceAllUsesWith(SimpleVal);<br>
I->eraseFromParent();<br>
@@ -1478,7 +1478,7 @@ bool JumpThreading::ThreadEdge(BasicBloc<br>
// At this point, the IR is fully up to date and consistent. Do a quick scan<br>
// over the new instructions and zap any that are constants or dead. This<br>
// frequently happens because of phi translation.<br>
- SimplifyInstructionsInBlock(NewBB, TD, TLI);<br>
+ SimplifyInstructionsInBlock(NewBB, DL, TLI);<br>
<br>
// Threaded an edge!<br>
++NumThreads;<br>
@@ -1560,7 +1560,7 @@ bool JumpThreading::DuplicateCondBranchO<br>
// If this instruction can be simplified after the operands are updated,<br>
// just use the simplified value instead. This frequently happens due to<br>
// phi translation.<br>
- if (Value *IV = SimplifyInstruction(New, TD)) {<br>
+ if (Value *IV = SimplifyInstruction(New, DL)) {<br>
delete New;<br>
ValueMapping[BI] = IV;<br>
} else {<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/LICM.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LICM.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LICM.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/LICM.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/LICM.cpp Thu Feb 20 18:06:31 2014<br>
@@ -108,7 +108,7 @@ namespace {<br>
LoopInfo *LI; // Current LoopInfo<br>
DominatorTree *DT; // Dominator Tree for the current Loop.<br>
<br>
- DataLayout *TD; // DataLayout for constant folding.<br>
+ DataLayout *DL; // DataLayout for constant folding.<br>
TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.<br>
<br>
// State that is updated as we process loops.<br>
@@ -221,7 +221,7 @@ bool LICM::runOnLoop(Loop *L, LPPassMana<br>
AA = &getAnalysis<AliasAnalysis>();<br>
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();<br>
<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
<br>
assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");<br>
@@ -394,7 +394,7 @@ void LICM::HoistRegion(DomTreeNode *N) {<br>
// Try constant folding this instruction. If all the operands are<br>
// constants, it is technically hoistable, but it would be better to just<br>
// fold it.<br>
- if (Constant *C = ConstantFoldInstruction(&I, TD, TLI)) {<br>
+ if (Constant *C = ConstantFoldInstruction(&I, DL, TLI)) {<br>
DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');<br>
CurAST->copyValue(&I, C);<br>
CurAST->deleteValue(&I);<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp Thu Feb 20 18:06:31 2014<br>
@@ -132,7 +132,7 @@ namespace {<br>
<br>
class LoopIdiomRecognize : public LoopPass {<br>
Loop *CurLoop;<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
DominatorTree *DT;<br>
ScalarEvolution *SE;<br>
TargetLibraryInfo *TLI;<br>
@@ -141,7 +141,7 @@ namespace {<br>
static char ID;<br>
explicit LoopIdiomRecognize() : LoopPass(ID) {<br>
initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());<br>
- TD = 0; DT = 0; SE = 0; TLI = 0; TTI = 0;<br>
+ DL = 0; DT = 0; SE = 0; TLI = 0; TTI = 0;<br>
}<br>
<br>
bool runOnLoop(Loop *L, LPPassManager &LPM);<br>
@@ -182,7 +182,7 @@ namespace {<br>
}<br>
<br>
const DataLayout *getDataLayout() {<br>
- return TD ? TD : TD=getAnalysisIfAvailable<DataLayout>();<br>
+ return DL ? DL : DL=getAnalysisIfAvailable<DataLayout>();<br>
}<br>
<br>
DominatorTree *getDominatorTree() {<br>
@@ -782,7 +782,7 @@ bool LoopIdiomRecognize::processLoopStor<br>
Value *StorePtr = SI->getPointerOperand();<br>
<br>
// Reject stores that are so large that they overflow an unsigned.<br>
- uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType());<br>
+ uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());<br>
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)<br>
return false;<br>
<br>
@@ -910,7 +910,7 @@ static bool mayLoopAccessLocation(Value<br>
///<br>
/// Note that we don't ever attempt to use memset_pattern8 or 4, because these<br>
/// just replicate their input array and then pass on to memset_pattern16.<br>
-static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) {<br>
+static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) {<br>
// If the value isn't a constant, we can't promote it to being in a constant<br>
// array. We could theoretically do a store to an alloca or something, but<br>
// that doesn't seem worthwhile.<br>
@@ -918,12 +918,12 @@ static Constant *getMemSetPatternValue(V<br>
if (C == 0) return 0;<br>
<br>
// Only handle simple values that are a power of two bytes in size.<br>
- uint64_t Size = TD.getTypeSizeInBits(V->getType());<br>
+ uint64_t Size = DL.getTypeSizeInBits(V->getType());<br>
if (Size == 0 || (Size & 7) || (Size & (Size-1)))<br>
return 0;<br>
<br>
// Don't care enough about darwin/ppc to implement this.<br>
- if (TD.isBigEndian())<br>
+ if (DL.isBigEndian())<br>
return 0;<br>
<br>
// Convert to size in bytes.<br>
@@ -970,7 +970,7 @@ processLoopStridedStore(Value *DestPtr,<br>
PatternValue = 0;<br>
} else if (DestAS == 0 &&<br>
TLI->has(LibFunc::memset_pattern16) &&<br>
- (PatternValue = getMemSetPatternValue(StoredVal, *TD))) {<br>
+ (PatternValue = getMemSetPatternValue(StoredVal, *DL))) {<br>
// Don't create memset_pattern16s with address spaces.<br>
// It looks like we can use PatternValue!<br>
SplatValue = 0;<br>
@@ -1011,7 +1011,7 @@ processLoopStridedStore(Value *DestPtr,<br>
<br>
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to<br>
// pointer size if it isn't already.<br>
- Type *IntPtr = Builder.getIntPtrTy(TD, DestAS);<br>
+ Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);<br>
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);<br>
<br>
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),<br>
@@ -1125,7 +1125,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI<br>
<br>
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to<br>
// pointer size if it isn't already.<br>
- Type *IntPtrTy = Builder.getIntPtrTy(TD, SI->getPointerAddressSpace());<br>
+ Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());<br>
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);<br>
<br>
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1),<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp Thu Feb 20 18:06:31 2014<br>
@@ -195,9 +195,9 @@ class MemsetRanges {<br>
/// because each element is relatively large and expensive to copy.<br>
std::list<MemsetRange> Ranges;<br>
typedef std::list<MemsetRange>::iterator range_iterator;<br>
- const DataLayout &TD;<br>
+ const DataLayout &DL;<br>
public:<br>
- MemsetRanges(const DataLayout &td) : TD(td) {}<br>
+ MemsetRanges(const DataLayout &DL) : DL(DL) {}<br>
<br>
typedef std::list<MemsetRange>::const_iterator const_iterator;<br>
const_iterator begin() const { return Ranges.begin(); }<br>
@@ -212,7 +212,7 @@ public:<br>
}<br>
<br>
void addStore(int64_t OffsetFromFirst, StoreInst *SI) {<br>
- int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());<br>
+ int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());<br>
<br>
addRange(OffsetFromFirst, StoreSize,<br>
SI->getPointerOperand(), SI->getAlignment(), SI);<br>
@@ -305,14 +305,14 @@ namespace {<br>
class MemCpyOpt : public FunctionPass {<br>
MemoryDependenceAnalysis *MD;<br>
TargetLibraryInfo *TLI;<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
public:<br>
static char ID; // Pass identification, replacement for typeid<br>
MemCpyOpt() : FunctionPass(ID) {<br>
initializeMemCpyOptPass(*PassRegistry::getPassRegistry());<br>
MD = 0;<br>
TLI = 0;<br>
- TD = 0;<br>
+ DL = 0;<br>
}<br>
<br>
bool runOnFunction(Function &F);<br>
@@ -366,13 +366,13 @@ INITIALIZE_PASS_END(MemCpyOpt, "memcpyop<br>
/// attempts to merge them together into a memcpy/memset.<br>
Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,<br>
Value *StartPtr, Value *ByteVal) {<br>
- if (TD == 0) return 0;<br>
+ if (DL == 0) return 0;<br>
<br>
// Okay, so we now have a single store that can be splatable. Scan to find<br>
// all subsequent stores of the same value to offset from the same pointer.<br>
// Join these together into ranges, so we can decide whether contiguous blocks<br>
// are stored.<br>
- MemsetRanges Ranges(*TD);<br>
+ MemsetRanges Ranges(*DL);<br>
<br>
BasicBlock::iterator BI = StartInst;<br>
for (++BI; !isa<TerminatorInst>(BI); ++BI) {<br>
@@ -396,7 +396,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe<br>
// Check to see if this store is to a constant offset from the start ptr.<br>
int64_t Offset;<br>
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),<br>
- Offset, *TD))<br>
+ Offset, *DL))<br>
break;<br>
<br>
Ranges.addStore(Offset, NextStore);<br>
@@ -409,7 +409,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe<br>
<br>
// Check to see if this store is to a constant offset from the start ptr.<br>
int64_t Offset;<br>
- if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))<br>
+ if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *DL))<br>
break;<br>
<br>
Ranges.addMemSet(Offset, MSI);<br>
@@ -441,7 +441,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe<br>
if (Range.TheStores.size() == 1) continue;<br>
<br>
// If it is profitable to lower this range to memset, do so now.<br>
- if (!Range.isProfitableToUseMemset(*TD))<br>
+ if (!Range.isProfitableToUseMemset(*DL))<br>
continue;<br>
<br>
// Otherwise, we do want to transform this! Create a new memset.<br>
@@ -453,7 +453,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe<br>
if (Alignment == 0) {<br>
Type *EltType =<br>
cast<PointerType>(StartPtr->getType())->getElementType();<br>
- Alignment = TD->getABITypeAlignment(EltType);<br>
+ Alignment = DL->getABITypeAlignment(EltType);<br>
}<br>
<br>
AMemSet =<br>
@@ -484,7 +484,7 @@ Instruction *MemCpyOpt::tryMergingIntoMe<br>
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {<br>
if (!SI->isSimple()) return false;<br>
<br>
- if (TD == 0) return false;<br>
+ if (DL == 0) return false;<br>
<br>
// Detect cases where we're performing call slot forwarding, but<br>
// happen to be using a load-store pair to implement it, rather than<br>
@@ -514,15 +514,15 @@ bool MemCpyOpt::processStore(StoreInst *<br>
if (C) {<br>
unsigned storeAlign = SI->getAlignment();<br>
if (!storeAlign)<br>
- storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType());<br>
+ storeAlign = DL->getABITypeAlignment(SI->getOperand(0)->getType());<br>
unsigned loadAlign = LI->getAlignment();<br>
if (!loadAlign)<br>
- loadAlign = TD->getABITypeAlignment(LI->getType());<br>
+ loadAlign = DL->getABITypeAlignment(LI->getType());<br>
<br>
bool changed = performCallSlotOptzn(LI,<br>
SI->getPointerOperand()->stripPointerCasts(),<br>
LI->getPointerOperand()->stripPointerCasts(),<br>
- TD->getTypeStoreSize(SI->getOperand(0)->getType()),<br>
+ DL->getTypeStoreSize(SI->getOperand(0)->getType()),<br>
std::min(storeAlign, loadAlign), C);<br>
if (changed) {<br>
MD->removeInstruction(SI);<br>
@@ -596,13 +596,13 @@ bool MemCpyOpt::performCallSlotOptzn(Ins<br>
return false;<br>
<br>
// Check that all of src is copied to dest.<br>
- if (TD == 0) return false;<br>
+ if (DL == 0) return false;<br>
<br>
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());<br>
if (!srcArraySize)<br>
return false;<br>
<br>
- uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *<br>
+ uint64_t srcSize = DL->getTypeAllocSize(srcAlloca->getAllocatedType()) *<br>
srcArraySize->getZExtValue();<br>
<br>
if (cpyLen < srcSize)<br>
@@ -617,7 +617,7 @@ bool MemCpyOpt::performCallSlotOptzn(Ins<br>
if (!destArraySize)<br>
return false;<br>
<br>
- uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *<br>
+ uint64_t destSize = DL->getTypeAllocSize(A->getAllocatedType()) *<br>
destArraySize->getZExtValue();<br>
<br>
if (destSize < srcSize)<br>
@@ -636,7 +636,7 @@ bool MemCpyOpt::performCallSlotOptzn(Ins<br>
return false;<br>
}<br>
<br>
- uint64_t destSize = TD->getTypeAllocSize(StructTy);<br>
+ uint64_t destSize = DL->getTypeAllocSize(StructTy);<br>
if (destSize < srcSize)<br>
return false;<br>
} else {<br>
@@ -646,7 +646,7 @@ bool MemCpyOpt::performCallSlotOptzn(Ins<br>
// Check that dest points to memory that is at least as aligned as src.<br>
unsigned srcAlign = srcAlloca->getAlignment();<br>
if (!srcAlign)<br>
- srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType());<br>
+ srcAlign = DL->getABITypeAlignment(srcAlloca->getAllocatedType());<br>
bool isDestSufficientlyAligned = srcAlign <= cpyAlign;<br>
// If dest is not aligned enough and we can't increase its alignment then<br>
// bail out.<br>
@@ -912,12 +912,12 @@ bool MemCpyOpt::processMemMove(MemMoveIn<br>
<br>
/// processByValArgument - This is called on every byval argument in call sites.<br>
bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {<br>
- if (TD == 0) return false;<br>
+ if (DL == 0) return false;<br>
<br>
// Find out what feeds this byval argument.<br>
Value *ByValArg = CS.getArgument(ArgNo);<br>
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();<br>
- uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);<br>
+ uint64_t ByValSize = DL->getTypeAllocSize(ByValTy);<br>
MemDepResult DepInfo =<br>
MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),<br>
true, CS.getInstruction(),<br>
@@ -946,7 +946,7 @@ bool MemCpyOpt::processByValArgument(Cal<br>
// If it is greater than the memcpy, then we check to see if we can force the<br>
// source of the memcpy to the alignment we need. If we fail, we bail out.<br>
if (MDep->getAlignment() < ByValAlign &&<br>
- getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign)<br>
+ getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, DL) < ByValAlign)<br>
return false;<br>
<br>
// Verify that the copied-from memory doesn't change in between the memcpy and<br>
@@ -1025,7 +1025,7 @@ bool MemCpyOpt::runOnFunction(Function &<br>
<br>
bool MadeChange = false;<br>
MD = &getAnalysis<MemoryDependenceAnalysis>();<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TLI = &getAnalysis<TargetLibraryInfo>();<br>
<br>
// If we don't have at least memset and memcpy, there is little point of doing<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/SCCP.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/SCCP.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/SCCP.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/SCCP.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/SCCP.cpp Thu Feb 20 18:06:31 2014<br>
@@ -153,7 +153,7 @@ namespace {<br>
/// Constant Propagation.<br>
///<br>
class SCCPSolver : public InstVisitor<SCCPSolver> {<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
const TargetLibraryInfo *TLI;<br>
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.<br>
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.<br>
@@ -205,8 +205,8 @@ class SCCPSolver : public InstVisitor<SC<br>
typedef std::pair<BasicBlock*, BasicBlock*> Edge;<br>
DenseSet<Edge> KnownFeasibleEdges;<br>
public:<br>
- SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli)<br>
- : TD(td), TLI(tli) {}<br>
+ SCCPSolver(const DataLayout *DL, const TargetLibraryInfo *tli)<br>
+ : DL(DL), TLI(tli) {}<br>
<br>
/// MarkBlockExecutable - This method can be used by clients to mark all of<br>
/// the blocks that are known to be intrinsically live in the processed unit.<br>
@@ -1067,7 +1067,7 @@ void SCCPSolver::visitLoadInst(LoadInst<br>
}<br>
<br>
// Transform load from a constant into a constant if possible.<br>
- if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, TD))<br>
+ if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, DL))<br>
return markConstant(IV, &I, C);<br>
<br>
// Otherwise we cannot say for certain what value this load will produce.<br>
@@ -1557,9 +1557,9 @@ bool SCCP::runOnFunction(Function &F) {<br>
return false;<br>
<br>
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");<br>
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();<br>
+ const DataLayout *DL = getAnalysisIfAvailable<DataLayout>();<br>
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();<br>
- SCCPSolver Solver(TD, TLI);<br>
+ SCCPSolver Solver(DL, TLI);<br>
<br>
// Mark the first block of the function as being executable.<br>
Solver.MarkBlockExecutable(F.begin());<br>
@@ -1686,9 +1686,9 @@ static bool AddressIsTaken(const GlobalV<br>
}<br>
<br>
bool IPSCCP::runOnModule(Module &M) {<br>
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();<br>
+ const DataLayout *DL = getAnalysisIfAvailable<DataLayout>();<br>
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();<br>
- SCCPSolver Solver(TD, TLI);<br>
+ SCCPSolver Solver(DL, TLI);<br>
<br>
// AddressTakenFunctions - This set keeps track of the address-taken functions<br>
// that are in the input. As IPSCCP runs through and simplifies code,<br>
<br>
Modified: llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp Thu Feb 20 18:06:31 2014<br>
@@ -87,7 +87,7 @@ namespace {<br>
<br>
private:<br>
bool HasDomTree;<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
<br>
/// DeadInsts - Keep track of instructions we have made dead, so that<br>
/// we can remove them after we are done working.<br>
@@ -258,7 +258,7 @@ namespace {<br>
class ConvertToScalarInfo {<br>
/// AllocaSize - The size of the alloca being considered in bytes.<br>
unsigned AllocaSize;<br>
- const DataLayout &TD;<br>
+ const DataLayout &DL;<br>
unsigned ScalarLoadThreshold;<br>
<br>
/// IsNotTrivial - This is set to true if there is some access to the object<br>
@@ -301,9 +301,9 @@ class ConvertToScalarInfo {<br>
bool HadDynamicAccess;<br>
<br>
public:<br>
- explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td,<br>
+ explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL,<br>
unsigned SLT)<br>
- : AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false),<br>
+ : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(false),<br>
ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),<br>
HadDynamicAccess(false) { }<br>
<br>
@@ -364,7 +364,7 @@ AllocaInst *ConvertToScalarInfo::TryConv<br>
return 0;<br>
<br>
if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&<br>
- !HadNonMemTransferAccess && !TD.fitsInLegalInteger(BitWidth))<br>
+ !HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth))<br>
return 0;<br>
// Dynamic accesses on integers aren't yet supported. They need us to shift<br>
// by a dynamic amount which could be difficult to work out as we might not<br>
@@ -520,7 +520,7 @@ bool ConvertToScalarInfo::CanConvertToSc<br>
HadDynamicAccess = true;<br>
} else<br>
GEPNonConstantIdx = NonConstantIdx;<br>
- uint64_t GEPOffset = TD.getIndexedOffset(PtrTy,<br>
+ uint64_t GEPOffset = DL.getIndexedOffset(PtrTy,<br>
Indices);<br>
// See if all uses can be converted.<br>
if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx))<br>
@@ -615,7 +615,7 @@ void ConvertToScalarInfo::ConvertUsesToS<br>
GEPNonConstantIdx = Indices.pop_back_val();<br>
} else<br>
GEPNonConstantIdx = NonConstantIdx;<br>
- uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),<br>
+ uint64_t GEPOffset = DL.getIndexedOffset(GEP->getPointerOperandType(),<br>
Indices);<br>
ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx);<br>
GEP->eraseFromParent();<br>
@@ -692,9 +692,9 @@ void ConvertToScalarInfo::ConvertUsesToS<br>
// If the source and destination are both to the same alloca, then this is<br>
// a noop copy-to-self, just delete it. Otherwise, emit a load and store<br>
// as appropriate.<br>
- AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &TD, 0));<br>
+ AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &DL, 0));<br>
<br>
- if (GetUnderlyingObject(MTI->getSource(), &TD, 0) != OrigAI) {<br>
+ if (GetUnderlyingObject(MTI->getSource(), &DL, 0) != OrigAI) {<br>
// Dest must be OrigAI, change this to be a load from the original<br>
// pointer (bitcasted), then a store to our new alloca.<br>
assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");<br>
@@ -710,7 +710,7 @@ void ConvertToScalarInfo::ConvertUsesToS<br>
LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");<br>
SrcVal->setAlignment(MTI->getAlignment());<br>
Builder.CreateStore(SrcVal, NewAI);<br>
- } else if (GetUnderlyingObject(MTI->getDest(), &TD, 0) != OrigAI) {<br>
+ } else if (GetUnderlyingObject(MTI->getDest(), &DL, 0) != OrigAI) {<br>
// Src must be OrigAI, change this to be a load from NewAI then a store<br>
// through the original dest pointer (bitcasted).<br>
assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");<br>
@@ -770,15 +770,15 @@ ConvertScalar_ExtractValue(Value *FromVa<br>
// If the result alloca is a vector type, this is either an element<br>
// access or a bitcast to another vector type of the same size.<br>
if (VectorType *VTy = dyn_cast<VectorType>(FromType)) {<br>
- unsigned FromTypeSize = TD.getTypeAllocSize(FromType);<br>
- unsigned ToTypeSize = TD.getTypeAllocSize(ToType);<br>
+ unsigned FromTypeSize = DL.getTypeAllocSize(FromType);<br>
+ unsigned ToTypeSize = DL.getTypeAllocSize(ToType);<br>
if (FromTypeSize == ToTypeSize)<br>
return Builder.CreateBitCast(FromVal, ToType);<br>
<br>
// Otherwise it must be an element access.<br>
unsigned Elt = 0;<br>
if (Offset) {<br>
- unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());<br>
+ unsigned EltSize = DL.getTypeAllocSizeInBits(VTy->getElementType());<br>
Elt = Offset/EltSize;<br>
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");<br>
}<br>
@@ -804,7 +804,7 @@ ConvertScalar_ExtractValue(Value *FromVa<br>
if (StructType *ST = dyn_cast<StructType>(ToType)) {<br>
assert(!NonConstantIdx &&<br>
"Dynamic indexing into struct types not supported");<br>
- const StructLayout &Layout = *TD.getStructLayout(ST);<br>
+ const StructLayout &Layout = *DL.getStructLayout(ST);<br>
Value *Res = UndefValue::get(ST);<br>
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {<br>
Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),<br>
@@ -818,7 +818,7 @@ ConvertScalar_ExtractValue(Value *FromVa<br>
if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) {<br>
assert(!NonConstantIdx &&<br>
"Dynamic indexing into array types not supported");<br>
- uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());<br>
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());<br>
Value *Res = UndefValue::get(AT);<br>
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {<br>
Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),<br>
@@ -834,12 +834,12 @@ ConvertScalar_ExtractValue(Value *FromVa<br>
// If this is a big-endian system and the load is narrower than the<br>
// full alloca type, we need to do a shift to get the right bits.<br>
int ShAmt = 0;<br>
- if (TD.isBigEndian()) {<br>
+ if (DL.isBigEndian()) {<br>
// On big-endian machines, the lowest bit is stored at the bit offset<br>
// from the pointer given by getTypeStoreSizeInBits. This matters for<br>
// integers with a bitwidth that is not a multiple of 8.<br>
- ShAmt = TD.getTypeStoreSizeInBits(NTy) -<br>
- TD.getTypeStoreSizeInBits(ToType) - Offset;<br>
+ ShAmt = DL.getTypeStoreSizeInBits(NTy) -<br>
+ DL.getTypeStoreSizeInBits(ToType) - Offset;<br>
} else {<br>
ShAmt = Offset;<br>
}<br>
@@ -855,7 +855,7 @@ ConvertScalar_ExtractValue(Value *FromVa<br>
ConstantInt::get(FromVal->getType(), -ShAmt));<br>
<br>
// Finally, unconditionally truncate the integer to the right width.<br>
- unsigned LIBitWidth = TD.getTypeSizeInBits(ToType);<br>
+ unsigned LIBitWidth = DL.getTypeSizeInBits(ToType);<br>
if (LIBitWidth < NTy->getBitWidth())<br>
FromVal =<br>
Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),<br>
@@ -902,8 +902,8 @@ ConvertScalar_InsertValue(Value *SV, Val<br>
LLVMContext &Context = Old->getContext();<br>
<br>
if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {<br>
- uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);<br>
- uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());<br>
+ uint64_t VecSize = DL.getTypeAllocSizeInBits(VTy);<br>
+ uint64_t ValSize = DL.getTypeAllocSizeInBits(SV->getType());<br>
<br>
// Changing the whole vector with memset or with an access of a different<br>
// vector type?<br>
@@ -914,7 +914,7 @@ ConvertScalar_InsertValue(Value *SV, Val<br>
Type *EltTy = VTy->getElementType();<br>
if (SV->getType() != EltTy)<br>
SV = Builder.CreateBitCast(SV, EltTy);<br>
- uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy);<br>
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy);<br>
unsigned Elt = Offset/EltSize;<br>
Value *Idx;<br>
if (NonConstantIdx) {<br>
@@ -933,7 +933,7 @@ ConvertScalar_InsertValue(Value *SV, Val<br>
if (StructType *ST = dyn_cast<StructType>(SV->getType())) {<br>
assert(!NonConstantIdx &&<br>
"Dynamic indexing into struct types not supported");<br>
- const StructLayout &Layout = *TD.getStructLayout(ST);<br>
+ const StructLayout &Layout = *DL.getStructLayout(ST);<br>
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {<br>
Value *Elt = Builder.CreateExtractValue(SV, i);<br>
Old = ConvertScalar_InsertValue(Elt, Old,<br>
@@ -946,7 +946,7 @@ ConvertScalar_InsertValue(Value *SV, Val<br>
if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {<br>
assert(!NonConstantIdx &&<br>
"Dynamic indexing into array types not supported");<br>
- uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());<br>
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());<br>
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {<br>
Value *Elt = Builder.CreateExtractValue(SV, i);<br>
Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, 0, Builder);<br>
@@ -956,14 +956,14 @@ ConvertScalar_InsertValue(Value *SV, Val<br>
<br>
// If SV is a float, convert it to the appropriate integer type.<br>
// If it is a pointer, do the same.<br>
- unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());<br>
- unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);<br>
- unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());<br>
- unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType);<br>
+ unsigned SrcWidth = DL.getTypeSizeInBits(SV->getType());<br>
+ unsigned DestWidth = DL.getTypeSizeInBits(AllocaType);<br>
+ unsigned SrcStoreWidth = DL.getTypeStoreSizeInBits(SV->getType());<br>
+ unsigned DestStoreWidth = DL.getTypeStoreSizeInBits(AllocaType);<br>
if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())<br>
SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));<br>
else if (SV->getType()->isPointerTy())<br>
- SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType()));<br>
+ SV = Builder.CreatePtrToInt(SV, DL.getIntPtrType(SV->getType()));<br>
<br>
// Zero extend or truncate the value if needed.<br>
if (SV->getType() != AllocaType) {<br>
@@ -982,7 +982,7 @@ ConvertScalar_InsertValue(Value *SV, Val<br>
// If this is a big-endian system and the store is narrower than the<br>
// full alloca type, we need to do a shift to get the right bits.<br>
int ShAmt = 0;<br>
- if (TD.isBigEndian()) {<br>
+ if (DL.isBigEndian()) {<br>
// On big-endian machines, the lowest bit is stored at the bit offset<br>
// from the pointer given by getTypeStoreSizeInBits. This matters for<br>
// integers with a bitwidth that is not a multiple of 8.<br>
@@ -1023,7 +1023,7 @@ bool SROA::runOnFunction(Function &F) {<br>
if (skipOptnoneFunction(F))<br>
return false;<br>
<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
<br>
bool Changed = performPromotion(F);<br>
<br>
@@ -1031,7 +1031,7 @@ bool SROA::runOnFunction(Function &F) {<br>
// theoretically needs to. It should be refactored in order to support<br>
// target-independent IR. Until this is done, just skip the actual<br>
// scalar-replacement portion of this pass.<br>
- if (!TD) return Changed;<br>
+ if (!DL) return Changed;<br>
<br>
while (1) {<br>
bool LocalChange = performScalarRepl(F);<br>
@@ -1137,7 +1137,7 @@ public:<br>
///<br>
/// We can do this to a select if its only uses are loads and if the operand to<br>
/// the select can be loaded unconditionally.<br>
-static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {<br>
+static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {<br>
bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();<br>
bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();<br>
<br>
@@ -1149,10 +1149,10 @@ static bool isSafeSelectToSpeculate(Sele<br>
// Both operands to the select need to be dereferencable, either absolutely<br>
// (e.g. allocas) or at this point because we can see other accesses to it.<br>
if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,<br>
- LI->getAlignment(), TD))<br>
+ LI->getAlignment(), DL))<br>
return false;<br>
if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI,<br>
- LI->getAlignment(), TD))<br>
+ LI->getAlignment(), DL))<br>
return false;<br>
}<br>
<br>
@@ -1175,7 +1175,7 @@ static bool isSafeSelectToSpeculate(Sele<br>
///<br>
/// We can do this to a select if its only uses are loads and if the operand to<br>
/// the select can be loaded unconditionally.<br>
-static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {<br>
+static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {<br>
// For now, we can only do this promotion if the load is in the same block as<br>
// the PHI, and if there are no stores between the phi and load.<br>
// TODO: Allow recursive phi users.<br>
@@ -1225,7 +1225,7 @@ static bool isSafePHIToSpeculate(PHINode<br>
// If this pointer is always safe to load, or if we can prove that there is<br>
// already a load in the block, then we can move the load to the pred block.<br>
if (InVal->isDereferenceablePointer() ||<br>
- isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, TD))<br>
+ isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, DL))<br>
continue;<br>
<br>
return false;<br>
@@ -1239,7 +1239,7 @@ static bool isSafePHIToSpeculate(PHINode<br>
/// direct (non-volatile) loads and stores to it. If the alloca is close but<br>
/// not quite there, this will transform the code to allow promotion. As such,<br>
/// it is a non-pure predicate.<br>
-static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {<br>
+static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {<br>
SetVector<Instruction*, SmallVector<Instruction*, 4>,<br>
SmallPtrSet<Instruction*, 4> > InstsToRewrite;<br>
<br>
@@ -1268,12 +1268,12 @@ static bool tryToMakeAllocaBePromotable(<br>
<br>
// This is very rare and we just scrambled the use list of AI, start<br>
// over completely.<br>
- return tryToMakeAllocaBePromotable(AI, TD);<br>
+ return tryToMakeAllocaBePromotable(AI, DL);<br>
}<br>
<br>
// If it is safe to turn "load (select c, AI, ptr)" into a select of two<br>
// loads, then we can transform this by rewriting the select.<br>
- if (!isSafeSelectToSpeculate(SI, TD))<br>
+ if (!isSafeSelectToSpeculate(SI, DL))<br>
return false;<br>
<br>
InstsToRewrite.insert(SI);<br>
@@ -1288,7 +1288,7 @@ static bool tryToMakeAllocaBePromotable(<br>
<br>
// If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads<br>
// in the pred blocks, then we can transform this by rewriting the PHI.<br>
- if (!isSafePHIToSpeculate(PN, TD))<br>
+ if (!isSafePHIToSpeculate(PN, DL))<br>
return false;<br>
<br>
InstsToRewrite.insert(PN);<br>
@@ -1423,7 +1423,7 @@ bool SROA::performPromotion(Function &F)<br>
// the entry node<br>
for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)<br>
if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?<br>
- if (tryToMakeAllocaBePromotable(AI, TD))<br>
+ if (tryToMakeAllocaBePromotable(AI, DL))<br>
Allocas.push_back(AI);<br>
<br>
if (Allocas.empty()) break;<br>
@@ -1499,7 +1499,7 @@ bool SROA::performScalarRepl(Function &F<br>
// transform the allocation instruction if it is an array allocation<br>
// (allocations OF arrays are ok though), and an allocation of a scalar<br>
// value cannot be decomposed at all.<br>
- uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());<br>
+ uint64_t AllocaSize = DL->getTypeAllocSize(AI->getAllocatedType());<br>
<br>
// Do not promote [0 x %struct].<br>
if (AllocaSize == 0) continue;<br>
@@ -1523,7 +1523,7 @@ bool SROA::performScalarRepl(Function &F<br>
// that we can't just check based on the type: the alloca may be of an i32<br>
// but that has pointer arithmetic to set byte 3 of it or something.<br>
if (AllocaInst *NewAI = ConvertToScalarInfo(<br>
- (unsigned)AllocaSize, *TD, ScalarLoadThreshold).TryConvert(AI)) {<br>
+ (unsigned)AllocaSize, *DL, ScalarLoadThreshold).TryConvert(AI)) {<br>
NewAI->takeName(AI);<br>
AI->eraseFromParent();<br>
++NumConverted;<br>
@@ -1625,7 +1625,7 @@ void SROA::isSafeForScalarRepl(Instructi<br>
if (!LI->isSimple())<br>
return MarkUnsafe(Info, User);<br>
Type *LIType = LI->getType();<br>
- isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),<br>
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),<br>
LIType, false, Info, LI, true /*AllowWholeAccess*/);<br>
Info.hasALoadOrStore = true;<br>
<br>
@@ -1635,7 +1635,7 @@ void SROA::isSafeForScalarRepl(Instructi<br>
return MarkUnsafe(Info, User);<br>
<br>
Type *SIType = SI->getOperand(0)->getType();<br>
- isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),<br>
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),<br>
SIType, true, Info, SI, true /*AllowWholeAccess*/);<br>
Info.hasALoadOrStore = true;<br>
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {<br>
@@ -1684,7 +1684,7 @@ void SROA::isSafePHISelectUseForScalarRe<br>
if (!LI->isSimple())<br>
return MarkUnsafe(Info, User);<br>
Type *LIType = LI->getType();<br>
- isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),<br>
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),<br>
LIType, false, Info, LI, false /*AllowWholeAccess*/);<br>
Info.hasALoadOrStore = true;<br>
<br>
@@ -1694,7 +1694,7 @@ void SROA::isSafePHISelectUseForScalarRe<br>
return MarkUnsafe(Info, User);<br>
<br>
Type *SIType = SI->getOperand(0)->getType();<br>
- isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),<br>
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),<br>
SIType, true, Info, SI, false /*AllowWholeAccess*/);<br>
Info.hasALoadOrStore = true;<br>
} else if (isa<PHINode>(User) || isa<SelectInst>(User)) {<br>
@@ -1739,7 +1739,7 @@ void SROA::isSafeGEP(GetElementPtrInst *<br>
// constant part of the offset.<br>
if (NonConstant)<br>
Indices.pop_back();<br>
- Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);<br>
+ Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);<br>
if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset,<br>
NonConstantIdxSize))<br>
MarkUnsafe(Info, GEPI);<br>
@@ -1798,7 +1798,7 @@ void SROA::isSafeMemAccess(uint64_t Offs<br>
bool AllowWholeAccess) {<br>
// Check if this is a load/store of the entire alloca.<br>
if (Offset == 0 && AllowWholeAccess &&<br>
- MemSize == TD->getTypeAllocSize(Info.AI->getAllocatedType())) {<br>
+ MemSize == DL->getTypeAllocSize(Info.AI->getAllocatedType())) {<br>
// This can be safe for MemIntrinsics (where MemOpType is 0) and integer<br>
// loads/stores (which are essentially the same as the MemIntrinsics with<br>
// regard to copying padding between elements). But, if an alloca is<br>
@@ -1835,20 +1835,20 @@ bool SROA::TypeHasComponent(Type *T, uin<br>
Type *EltTy;<br>
uint64_t EltSize;<br>
if (StructType *ST = dyn_cast<StructType>(T)) {<br>
- const StructLayout *Layout = TD->getStructLayout(ST);<br>
+ const StructLayout *Layout = DL->getStructLayout(ST);<br>
unsigned EltIdx = Layout->getElementContainingOffset(Offset);<br>
EltTy = ST->getContainedType(EltIdx);<br>
- EltSize = TD->getTypeAllocSize(EltTy);<br>
+ EltSize = DL->getTypeAllocSize(EltTy);<br>
Offset -= Layout->getElementOffset(EltIdx);<br>
} else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {<br>
EltTy = AT->getElementType();<br>
- EltSize = TD->getTypeAllocSize(EltTy);<br>
+ EltSize = DL->getTypeAllocSize(EltTy);<br>
if (Offset >= AT->getNumElements() * EltSize)<br>
return false;<br>
Offset %= EltSize;<br>
} else if (VectorType *VT = dyn_cast<VectorType>(T)) {<br>
EltTy = VT->getElementType();<br>
- EltSize = TD->getTypeAllocSize(EltTy);<br>
+ EltSize = DL->getTypeAllocSize(EltTy);<br>
if (Offset >= VT->getNumElements() * EltSize)<br>
return false;<br>
Offset %= EltSize;<br>
@@ -1887,7 +1887,7 @@ void SROA::RewriteForScalarRepl(Instruct<br>
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());<br>
uint64_t MemSize = Length->getZExtValue();<br>
if (Offset == 0 &&<br>
- MemSize == TD->getTypeAllocSize(AI->getAllocatedType()))<br>
+ MemSize == DL->getTypeAllocSize(AI->getAllocatedType()))<br>
RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);<br>
// Otherwise the intrinsic can only touch a single element and the<br>
// address operand will be updated, so nothing else needs to be done.<br>
@@ -1923,8 +1923,8 @@ void SROA::RewriteForScalarRepl(Instruct<br>
LI->replaceAllUsesWith(Insert);<br>
DeadInsts.push_back(LI);<br>
} else if (LIType->isIntegerTy() &&<br>
- TD->getTypeAllocSize(LIType) ==<br>
- TD->getTypeAllocSize(AI->getAllocatedType())) {<br>
+ DL->getTypeAllocSize(LIType) ==<br>
+ DL->getTypeAllocSize(AI->getAllocatedType())) {<br>
// If this is a load of the entire alloca to an integer, rewrite it.<br>
RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);<br>
}<br>
@@ -1950,8 +1950,8 @@ void SROA::RewriteForScalarRepl(Instruct<br>
}<br>
DeadInsts.push_back(SI);<br>
} else if (SIType->isIntegerTy() &&<br>
- TD->getTypeAllocSize(SIType) ==<br>
- TD->getTypeAllocSize(AI->getAllocatedType())) {<br>
+ DL->getTypeAllocSize(SIType) ==<br>
+ DL->getTypeAllocSize(AI->getAllocatedType())) {<br>
// If this is a store of the entire alloca from an integer, rewrite it.<br>
RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);<br>
}<br>
@@ -2013,7 +2013,7 @@ uint64_t SROA::FindElementAndOffset(Type<br>
Type *&IdxTy) {<br>
uint64_t Idx = 0;<br>
if (StructType *ST = dyn_cast<StructType>(T)) {<br>
- const StructLayout *Layout = TD->getStructLayout(ST);<br>
+ const StructLayout *Layout = DL->getStructLayout(ST);<br>
Idx = Layout->getElementContainingOffset(Offset);<br>
T = ST->getContainedType(Idx);<br>
Offset -= Layout->getElementOffset(Idx);<br>
@@ -2021,7 +2021,7 @@ uint64_t SROA::FindElementAndOffset(Type<br>
return Idx;<br>
} else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {<br>
T = AT->getElementType();<br>
- uint64_t EltSize = TD->getTypeAllocSize(T);<br>
+ uint64_t EltSize = DL->getTypeAllocSize(T);<br>
Idx = Offset / EltSize;<br>
Offset -= Idx * EltSize;<br>
IdxTy = Type::getInt64Ty(T->getContext());<br>
@@ -2029,7 +2029,7 @@ uint64_t SROA::FindElementAndOffset(Type<br>
}<br>
VectorType *VT = cast<VectorType>(T);<br>
T = VT->getElementType();<br>
- uint64_t EltSize = TD->getTypeAllocSize(T);<br>
+ uint64_t EltSize = DL->getTypeAllocSize(T);<br>
Idx = Offset / EltSize;<br>
Offset -= Idx * EltSize;<br>
IdxTy = Type::getInt64Ty(T->getContext());<br>
@@ -2050,7 +2050,7 @@ void SROA::RewriteGEP(GetElementPtrInst<br>
Value* NonConstantIdx = 0;<br>
if (!GEPI->hasAllConstantIndices())<br>
NonConstantIdx = Indices.pop_back_val();<br>
- Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);<br>
+ Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);<br>
<br>
RewriteForScalarRepl(GEPI, AI, Offset, NewElts);<br>
<br>
@@ -2121,7 +2121,7 @@ void SROA::RewriteLifetimeIntrinsic(Intr<br>
V = Builder.CreateGEP(V, Builder.getInt64(NewOffset));<br>
<br>
IdxTy = NewElts[Idx]->getAllocatedType();<br>
- uint64_t EltSize = TD->getTypeAllocSize(IdxTy) - NewOffset;<br>
+ uint64_t EltSize = DL->getTypeAllocSize(IdxTy) - NewOffset;<br>
if (EltSize > Size) {<br>
EltSize = Size;<br>
Size = 0;<br>
@@ -2137,7 +2137,7 @@ void SROA::RewriteLifetimeIntrinsic(Intr<br>
<br>
for (; Idx != NewElts.size() && Size; ++Idx) {<br>
IdxTy = NewElts[Idx]->getAllocatedType();<br>
- uint64_t EltSize = TD->getTypeAllocSize(IdxTy);<br>
+ uint64_t EltSize = DL->getTypeAllocSize(IdxTy);<br>
if (EltSize > Size) {<br>
EltSize = Size;<br>
Size = 0;<br>
@@ -2229,10 +2229,10 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIn<br>
PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());<br>
Type *OtherTy = OtherPtrTy->getElementType();<br>
if (StructType *ST = dyn_cast<StructType>(OtherTy)) {<br>
- EltOffset = TD->getStructLayout(ST)->getElementOffset(i);<br>
+ EltOffset = DL->getStructLayout(ST)->getElementOffset(i);<br>
} else {<br>
Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();<br>
- EltOffset = TD->getTypeAllocSize(EltTy)*i;<br>
+ EltOffset = DL->getTypeAllocSize(EltTy)*i;<br>
}<br>
<br>
// The alignment of the other pointer is the guaranteed alignment of the<br>
@@ -2273,7 +2273,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIn<br>
Type *ValTy = EltTy->getScalarType();<br>
<br>
// Construct an integer with the right value.<br>
- unsigned EltSize = TD->getTypeSizeInBits(ValTy);<br>
+ unsigned EltSize = DL->getTypeSizeInBits(ValTy);<br>
APInt OneVal(EltSize, CI->getZExtValue());<br>
APInt TotalVal(OneVal);<br>
// Set each byte.<br>
@@ -2303,7 +2303,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIn<br>
// this element.<br>
}<br>
<br>
- unsigned EltSize = TD->getTypeAllocSize(EltTy);<br>
+ unsigned EltSize = DL->getTypeAllocSize(EltTy);<br>
if (!EltSize)<br>
continue;<br>
<br>
@@ -2337,12 +2337,12 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor<br>
// and store the element value to the individual alloca.<br>
Value *SrcVal = SI->getOperand(0);<br>
Type *AllocaEltTy = AI->getAllocatedType();<br>
- uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);<br>
+ uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);<br>
<br>
IRBuilder<> Builder(SI);<br>
<br>
// Handle tail padding by extending the operand<br>
- if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)<br>
+ if (DL->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)<br>
SrcVal = Builder.CreateZExt(SrcVal,<br>
IntegerType::get(SI->getContext(), AllocaSizeBits));<br>
<br>
@@ -2352,15 +2352,15 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor<br>
// There are two forms here: AI could be an array or struct. Both cases<br>
// have different ways to compute the element offset.<br>
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {<br>
- const StructLayout *Layout = TD->getStructLayout(EltSTy);<br>
+ const StructLayout *Layout = DL->getStructLayout(EltSTy);<br>
<br>
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {<br>
// Get the number of bits to shift SrcVal to get the value.<br>
Type *FieldTy = EltSTy->getElementType(i);<br>
uint64_t Shift = Layout->getElementOffsetInBits(i);<br>
<br>
- if (TD->isBigEndian())<br>
- Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);<br>
+ if (DL->isBigEndian())<br>
+ Shift = AllocaSizeBits-Shift-DL->getTypeAllocSizeInBits(FieldTy);<br>
<br>
Value *EltVal = SrcVal;<br>
if (Shift) {<br>
@@ -2369,7 +2369,7 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor<br>
}<br>
<br>
// Truncate down to an integer of the right size.<br>
- uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);<br>
+ uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);<br>
<br>
// Ignore zero sized fields like {}, they obviously contain no data.<br>
if (FieldSizeBits == 0) continue;<br>
@@ -2394,12 +2394,12 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor<br>
} else {<br>
ArrayType *ATy = cast<ArrayType>(AllocaEltTy);<br>
Type *ArrayEltTy = ATy->getElementType();<br>
- uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);<br>
- uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);<br>
+ uint64_t ElementOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);<br>
+ uint64_t ElementSizeBits = DL->getTypeSizeInBits(ArrayEltTy);<br>
<br>
uint64_t Shift;<br>
<br>
- if (TD->isBigEndian())<br>
+ if (DL->isBigEndian())<br>
Shift = AllocaSizeBits-ElementOffset;<br>
else<br>
Shift = 0;<br>
@@ -2433,7 +2433,7 @@ SROA::RewriteStoreUserOfWholeAlloca(Stor<br>
}<br>
new StoreInst(EltVal, DestField, SI);<br>
<br>
- if (TD->isBigEndian())<br>
+ if (DL->isBigEndian())<br>
Shift -= ElementOffset;<br>
else<br>
Shift += ElementOffset;<br>
@@ -2451,7 +2451,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI<br>
// Extract each element out of the NewElts according to its structure offset<br>
// and form the result value.<br>
Type *AllocaEltTy = AI->getAllocatedType();<br>
- uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);<br>
+ uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);<br>
<br>
DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI<br>
<< '\n');<br>
@@ -2461,10 +2461,10 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI<br>
const StructLayout *Layout = 0;<br>
uint64_t ArrayEltBitOffset = 0;<br>
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {<br>
- Layout = TD->getStructLayout(EltSTy);<br>
+ Layout = DL->getStructLayout(EltSTy);<br>
} else {<br>
Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();<br>
- ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);<br>
+ ArrayEltBitOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);<br>
}<br>
<br>
Value *ResultVal =<br>
@@ -2476,7 +2476,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI<br>
Value *SrcField = NewElts[i];<br>
Type *FieldTy =<br>
cast<PointerType>(SrcField->getType())->getElementType();<br>
- uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);<br>
+ uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);<br>
<br>
// Ignore zero sized fields like {}, they obviously contain no data.<br>
if (FieldSizeBits == 0) continue;<br>
@@ -2507,7 +2507,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI<br>
else // Array case.<br>
Shift = i*ArrayEltBitOffset;<br>
<br>
- if (TD->isBigEndian())<br>
+ if (DL->isBigEndian())<br>
Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();<br>
<br>
if (Shift) {<br>
@@ -2524,7 +2524,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI<br>
}<br>
<br>
// Handle tail padding by truncating the result<br>
- if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)<br>
+ if (DL->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)<br>
ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);<br>
<br>
LI->replaceAllUsesWith(ResultVal);<br>
@@ -2534,15 +2534,15 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadI<br>
/// HasPadding - Return true if the specified type has any structure or<br>
/// alignment padding in between the elements that would be split apart<br>
/// by SROA; return false otherwise.<br>
-static bool HasPadding(Type *Ty, const DataLayout &TD) {<br>
+static bool HasPadding(Type *Ty, const DataLayout &DL) {<br>
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {<br>
Ty = ATy->getElementType();<br>
- return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);<br>
+ return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty);<br>
}<br>
<br>
// SROA currently handles only Arrays and Structs.<br>
StructType *STy = cast<StructType>(Ty);<br>
- const StructLayout *SL = TD.getStructLayout(STy);<br>
+ const StructLayout *SL = DL.getStructLayout(STy);<br>
unsigned PrevFieldBitOffset = 0;<br>
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {<br>
unsigned FieldBitOffset = SL->getElementOffsetInBits(i);<br>
@@ -2551,7 +2551,7 @@ static bool HasPadding(Type *Ty, const D<br>
// previous one.<br>
if (i) {<br>
unsigned PrevFieldEnd =<br>
- PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));<br>
+ PrevFieldBitOffset+DL.getTypeSizeInBits(STy->getElementType(i-1));<br>
if (PrevFieldEnd < FieldBitOffset)<br>
return true;<br>
}<br>
@@ -2560,7 +2560,7 @@ static bool HasPadding(Type *Ty, const D<br>
// Check for tail padding.<br>
if (unsigned EltCount = STy->getNumElements()) {<br>
unsigned PrevFieldEnd = PrevFieldBitOffset +<br>
- TD.getTypeSizeInBits(STy->getElementType(EltCount-1));<br>
+ DL.getTypeSizeInBits(STy->getElementType(EltCount-1));<br>
if (PrevFieldEnd < SL->getSizeInBits())<br>
return true;<br>
}<br>
@@ -2587,7 +2587,7 @@ bool SROA::isSafeAllocaToScalarRepl(Allo<br>
// types, but may actually be used. In these cases, we refuse to promote the<br>
// struct.<br>
if (Info.isMemCpySrc && Info.isMemCpyDst &&<br>
- HasPadding(AI->getAllocatedType(), *TD))<br>
+ HasPadding(AI->getAllocatedType(), *DL))<br>
return false;<br>
<br>
// If the alloca never has an access to just *part* of it, but is accessed<br>
<br>
Modified: llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Utils/CloneFunction.cpp Thu Feb 20 18:06:31 2014<br>
@@ -205,17 +205,17 @@ namespace {<br>
bool ModuleLevelChanges;<br>
const char *NameSuffix;<br>
ClonedCodeInfo *CodeInfo;<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
public:<br>
PruningFunctionCloner(Function *newFunc, const Function *oldFunc,<br>
ValueToValueMapTy &valueMap,<br>
bool moduleLevelChanges,<br>
const char *nameSuffix,<br>
ClonedCodeInfo *codeInfo,<br>
- const DataLayout *td)<br>
+ const DataLayout *DL)<br>
: NewFunc(newFunc), OldFunc(oldFunc),<br>
VMap(valueMap), ModuleLevelChanges(moduleLevelChanges),<br>
- NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {<br>
+ NameSuffix(nameSuffix), CodeInfo(codeInfo), DL(DL) {<br>
}<br>
<br>
/// CloneBlock - The specified block is found to be reachable, clone it and<br>
@@ -272,7 +272,7 @@ void PruningFunctionCloner::CloneBlock(c<br>
// If we can simplify this instruction to some other value, simply add<br>
// a mapping to that value rather than inserting a new instruction into<br>
// the basic block.<br>
- if (Value *V = SimplifyInstruction(NewInst, TD)) {<br>
+ if (Value *V = SimplifyInstruction(NewInst, DL)) {<br>
// On the off-chance that this simplifies to an instruction in the old<br>
// function, map it back into the new function.<br>
if (Value *MappedV = VMap.lookup(V))<br>
@@ -368,7 +368,7 @@ void llvm::CloneAndPruneFunctionInto(Fun<br>
SmallVectorImpl<ReturnInst*> &Returns,<br>
const char *NameSuffix,<br>
ClonedCodeInfo *CodeInfo,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
Instruction *TheCall) {<br>
assert(NameSuffix && "NameSuffix cannot be null!");<br>
<br>
@@ -379,7 +379,7 @@ void llvm::CloneAndPruneFunctionInto(Fun<br>
#endif<br>
<br>
PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,<br>
- NameSuffix, CodeInfo, TD);<br>
+ NameSuffix, CodeInfo, DL);<br>
<br>
// Clone the entry block, and anything recursively reachable from it.<br>
std::vector<const BasicBlock*> CloneWorklist;<br>
@@ -509,7 +509,7 @@ void llvm::CloneAndPruneFunctionInto(Fun<br>
// node).<br>
for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx)<br>
if (PHINode *PN = dyn_cast<PHINode>(VMap[PHIToResolve[Idx]]))<br>
- recursivelySimplifyInstruction(PN, TD);<br>
+ recursivelySimplifyInstruction(PN, DL);<br>
<br>
// Now that the inlined function body has been fully constructed, go through<br>
// and zap unconditional fall-through branches. This happen all the time when<br>
<br>
Modified: llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp Thu Feb 20 18:06:31 2014<br>
@@ -90,7 +90,7 @@ namespace {<br>
<br>
class SimplifyCFGOpt {<br>
const TargetTransformInfo &TTI;<br>
- const DataLayout *const TD;<br>
+ const DataLayout *const DL;<br>
Value *isValueEqualityComparison(TerminatorInst *TI);<br>
BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,<br>
std::vector<ValueEqualityComparisonCase> &Cases);<br>
@@ -109,8 +109,8 @@ class SimplifyCFGOpt {<br>
bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder);<br>
<br>
public:<br>
- SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *TD)<br>
- : TTI(TTI), TD(TD) {}<br>
+ SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *DL)<br>
+ : TTI(TTI), DL(DL) {}<br>
bool run(BasicBlock *BB);<br>
};<br>
}<br>
@@ -306,15 +306,15 @@ static bool DominatesMergePoint(Value *V<br>
<br>
/// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr<br>
/// and PointerNullValue. Return NULL if value is not a constant int.<br>
-static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {<br>
+static ConstantInt *GetConstantInt(Value *V, const DataLayout *DL) {<br>
// Normal constant int.<br>
ConstantInt *CI = dyn_cast<ConstantInt>(V);<br>
- if (CI || !TD || !isa<Constant>(V) || !V->getType()->isPointerTy())<br>
+ if (CI || !DL || !isa<Constant>(V) || !V->getType()->isPointerTy())<br>
return CI;<br>
<br>
// This is some kind of pointer constant. Turn it into a pointer-sized<br>
// ConstantInt if possible.<br>
- IntegerType *PtrTy = cast<IntegerType>(TD->getIntPtrType(V->getType()));<br>
+ IntegerType *PtrTy = cast<IntegerType>(DL->getIntPtrType(V->getType()));<br>
<br>
// Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).<br>
if (isa<ConstantPointerNull>(V))<br>
@@ -340,13 +340,13 @@ static ConstantInt *GetConstantInt(Value<br>
/// Values vector.<br>
static Value *<br>
GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,<br>
- const DataLayout *TD, bool isEQ, unsigned &UsedICmps) {<br>
+ const DataLayout *DL, bool isEQ, unsigned &UsedICmps) {<br>
Instruction *I = dyn_cast<Instruction>(V);<br>
if (I == 0) return 0;<br>
<br>
// If this is an icmp against a constant, handle this as one of the cases.<br>
if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {<br>
- if (ConstantInt *C = GetConstantInt(I->getOperand(1), TD)) {<br>
+ if (ConstantInt *C = GetConstantInt(I->getOperand(1), DL)) {<br>
Value *RHSVal;<br>
ConstantInt *RHSC;<br>
<br>
@@ -405,11 +405,11 @@ GatherConstantCompares(Value *V, std::ve<br>
<br>
unsigned NumValsBeforeLHS = Vals.size();<br>
unsigned UsedICmpsBeforeLHS = UsedICmps;<br>
- if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, TD,<br>
+ if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, DL,<br>
isEQ, UsedICmps)) {<br>
unsigned NumVals = Vals.size();<br>
unsigned UsedICmpsBeforeRHS = UsedICmps;<br>
- if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD,<br>
+ if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL,<br>
isEQ, UsedICmps)) {<br>
if (LHS == RHS)<br>
return LHS;<br>
@@ -434,7 +434,7 @@ GatherConstantCompares(Value *V, std::ve<br>
if (Extra == 0 || Extra == I->getOperand(0)) {<br>
Value *OldExtra = Extra;<br>
Extra = I->getOperand(0);<br>
- if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD,<br>
+ if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL,<br>
isEQ, UsedICmps))<br>
return RHS;<br>
assert(Vals.size() == NumValsBeforeLHS);<br>
@@ -472,14 +472,14 @@ Value *SimplifyCFGOpt::isValueEqualityCo<br>
} else if (BranchInst *BI = dyn_cast<BranchInst>(TI))<br>
if (BI->isConditional() && BI->getCondition()->hasOneUse())<br>
if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))<br>
- if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), TD))<br>
+ if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), DL))<br>
CV = ICI->getOperand(0);<br>
<br>
// Unwrap any lossless ptrtoint cast.<br>
- if (TD && CV) {<br>
+ if (DL && CV) {<br>
if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV)) {<br>
Value *Ptr = PTII->getPointerOperand();<br>
- if (PTII->getType() == TD->getIntPtrType(Ptr->getType()))<br>
+ if (PTII->getType() == DL->getIntPtrType(Ptr->getType()))<br>
CV = Ptr;<br>
}<br>
}<br>
@@ -504,7 +504,7 @@ GetValueEqualityComparisonCases(Terminat<br>
ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());<br>
BasicBlock *Succ = BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_NE);<br>
Cases.push_back(ValueEqualityComparisonCase(GetConstantInt(ICI->getOperand(1),<br>
- TD),<br>
+ DL),<br>
Succ));<br>
return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ);<br>
}<br>
@@ -930,8 +930,8 @@ bool SimplifyCFGOpt::FoldValueComparison<br>
Builder.SetInsertPoint(PTI);<br>
// Convert pointer to int before we switch.<br>
if (CV->getType()->isPointerTy()) {<br>
- assert(TD && "Cannot switch on pointer without DataLayout");<br>
- CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()),<br>
+ assert(DL && "Cannot switch on pointer without DataLayout");<br>
+ CV = Builder.CreatePtrToInt(CV, DL->getIntPtrType(CV->getType()),<br>
"magicptr");<br>
}<br>
<br>
@@ -1606,7 +1606,7 @@ static bool BlockIsSimpleEnoughToThreadT<br>
/// that is defined in the same block as the branch and if any PHI entries are<br>
/// constants, thread edges corresponding to that entry to be branches to their<br>
/// ultimate destination.<br>
-static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {<br>
+static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) {<br>
BasicBlock *BB = BI->getParent();<br>
PHINode *PN = dyn_cast<PHINode>(BI->getCondition());<br>
// NOTE: we currently cannot transform this case if the PHI node is used<br>
@@ -1675,7 +1675,7 @@ static bool FoldCondBranchOnPHI(BranchIn<br>
}<br>
<br>
// Check for trivial simplification.<br>
- if (Value *V = SimplifyInstruction(N, TD)) {<br>
+ if (Value *V = SimplifyInstruction(N, DL)) {<br>
TranslateMap[BBI] = V;<br>
delete N; // Instruction folded away, don't need actual inst<br>
} else {<br>
@@ -1696,7 +1696,7 @@ static bool FoldCondBranchOnPHI(BranchIn<br>
}<br>
<br>
// Recurse, simplifying any other constants.<br>
- return FoldCondBranchOnPHI(BI, TD) | true;<br>
+ return FoldCondBranchOnPHI(BI, DL) | true;<br>
}<br>
<br>
return false;<br>
@@ -1704,7 +1704,7 @@ static bool FoldCondBranchOnPHI(BranchIn<br>
<br>
/// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry<br>
/// PHI node, see if we can eliminate it.<br>
-static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {<br>
+static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) {<br>
// Ok, this is a two entry PHI node. Check to see if this is a simple "if<br>
// statement", which has a very simple dominance structure. Basically, we<br>
// are trying to find the condition that is being branched on, which<br>
@@ -1738,7 +1738,7 @@ static bool FoldTwoEntryPHINode(PHINode<br>
<br>
for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {<br>
PHINode *PN = cast<PHINode>(II++);<br>
- if (Value *V = SimplifyInstruction(PN, TD)) {<br>
+ if (Value *V = SimplifyInstruction(PN, DL)) {<br>
PN->replaceAllUsesWith(V);<br>
PN->eraseFromParent();<br>
continue;<br>
@@ -2634,7 +2634,7 @@ static bool SimplifyIndirectBrOnSelect(I<br>
/// the PHI, merging the third icmp into the switch.<br>
static bool TryToSimplifyUncondBranchWithICmpInIt(<br>
ICmpInst *ICI, IRBuilder<> &Builder, const TargetTransformInfo &TTI,<br>
- const DataLayout *TD) {<br>
+ const DataLayout *DL) {<br>
BasicBlock *BB = ICI->getParent();<br>
<br>
// If the block has any PHIs in it or the icmp has multiple uses, it is too<br>
@@ -2662,12 +2662,12 @@ static bool TryToSimplifyUncondBranchWit<br>
assert(VVal && "Should have a unique destination value");<br>
ICI->setOperand(0, VVal);<br>
<br>
- if (Value *V = SimplifyInstruction(ICI, TD)) {<br>
+ if (Value *V = SimplifyInstruction(ICI, DL)) {<br>
ICI->replaceAllUsesWith(V);<br>
ICI->eraseFromParent();<br>
}<br>
// BB is now empty, so it is likely to simplify away.<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
}<br>
<br>
// Ok, the block is reachable from the default dest. If the constant we're<br>
@@ -2683,7 +2683,7 @@ static bool TryToSimplifyUncondBranchWit<br>
ICI->replaceAllUsesWith(V);<br>
ICI->eraseFromParent();<br>
// BB is now empty, so it is likely to simplify away.<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
}<br>
<br>
// The use of the icmp has to be in the 'end' block, by the only PHI node in<br>
@@ -2739,7 +2739,7 @@ static bool TryToSimplifyUncondBranchWit<br>
/// SimplifyBranchOnICmpChain - The specified branch is a conditional branch.<br>
/// Check to see if it is branching on an or/and chain of icmp instructions, and<br>
/// fold it into a switch instruction if so.<br>
-static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,<br>
+static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL,<br>
IRBuilder<> &Builder) {<br>
Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());<br>
if (Cond == 0) return false;<br>
@@ -2755,10 +2755,10 @@ static bool SimplifyBranchOnICmpChain(Br<br>
unsigned UsedICmps = 0;<br>
<br>
if (Cond->getOpcode() == Instruction::Or) {<br>
- CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, true,<br>
+ CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, true,<br>
UsedICmps);<br>
} else if (Cond->getOpcode() == Instruction::And) {<br>
- CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, false,<br>
+ CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, false,<br>
UsedICmps);<br>
TrueWhenEqual = false;<br>
}<br>
@@ -2820,9 +2820,9 @@ static bool SimplifyBranchOnICmpChain(Br<br>
Builder.SetInsertPoint(BI);<br>
// Convert pointer to int before we switch.<br>
if (CompVal->getType()->isPointerTy()) {<br>
- assert(TD && "Cannot switch on pointer without DataLayout");<br>
+ assert(DL && "Cannot switch on pointer without DataLayout");<br>
CompVal = Builder.CreatePtrToInt(CompVal,<br>
- TD->getIntPtrType(CompVal->getType()),<br>
+ DL->getIntPtrType(CompVal->getType()),<br>
"magicptr");<br>
}<br>
<br>
@@ -3453,7 +3453,7 @@ namespace {<br>
ConstantInt *Offset,<br>
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,<br>
Constant *DefaultValue,<br>
- const DataLayout *TD);<br>
+ const DataLayout *DL);<br>
<br>
/// BuildLookup - Build instructions with Builder to retrieve the value at<br>
/// the position given by Index in the lookup table.<br>
@@ -3461,7 +3461,7 @@ namespace {<br>
<br>
/// WouldFitInRegister - Return true if a table with TableSize elements of<br>
/// type ElementType would fit in a target-legal register.<br>
- static bool WouldFitInRegister(const DataLayout *TD,<br>
+ static bool WouldFitInRegister(const DataLayout *DL,<br>
uint64_t TableSize,<br>
const Type *ElementType);<br>
<br>
@@ -3500,7 +3500,7 @@ SwitchLookupTable::SwitchLookupTable(Mod<br>
ConstantInt *Offset,<br>
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,<br>
Constant *DefaultValue,<br>
- const DataLayout *TD)<br>
+ const DataLayout *DL)<br>
: SingleValue(0), BitMap(0), BitMapElementTy(0), Array(0) {<br>
assert(Values.size() && "Can't build lookup table without values!");<br>
assert(TableSize >= Values.size() && "Can't fit values in table!");<br>
@@ -3546,7 +3546,7 @@ SwitchLookupTable::SwitchLookupTable(Mod<br>
}<br>
<br>
// If the type is integer and the table fits in a register, build a bitmap.<br>
- if (WouldFitInRegister(TD, TableSize, ValueType)) {<br>
+ if (WouldFitInRegister(DL, TableSize, ValueType)) {<br>
IntegerType *IT = cast<IntegerType>(ValueType);<br>
APInt TableInt(TableSize * IT->getBitWidth(), 0);<br>
for (uint64_t I = TableSize; I > 0; --I) {<br>
@@ -3611,10 +3611,10 @@ Value *SwitchLookupTable::BuildLookup(Va<br>
llvm_unreachable("Unknown lookup table kind!");<br>
}<br>
<br>
-bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,<br>
+bool SwitchLookupTable::WouldFitInRegister(const DataLayout *DL,<br>
uint64_t TableSize,<br>
const Type *ElementType) {<br>
- if (!TD)<br>
+ if (!DL)<br>
return false;<br>
const IntegerType *IT = dyn_cast<IntegerType>(ElementType);<br>
if (!IT)<br>
@@ -3625,7 +3625,7 @@ bool SwitchLookupTable::WouldFitInRegist<br>
// Avoid overflow, fitsInLegalInteger uses unsigned int for the width.<br>
if (TableSize >= UINT_MAX/IT->getBitWidth())<br>
return false;<br>
- return TD->fitsInLegalInteger(TableSize * IT->getBitWidth());<br>
+ return DL->fitsInLegalInteger(TableSize * IT->getBitWidth());<br>
}<br>
<br>
/// ShouldBuildLookupTable - Determine whether a lookup table should be built<br>
@@ -3634,7 +3634,7 @@ bool SwitchLookupTable::WouldFitInRegist<br>
static bool ShouldBuildLookupTable(SwitchInst *SI,<br>
uint64_t TableSize,<br>
const TargetTransformInfo &TTI,<br>
- const DataLayout *TD,<br>
+ const DataLayout *DL,<br>
const SmallDenseMap<PHINode*, Type*>& ResultTypes) {<br>
if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10)<br>
return false; // TableSize overflowed, or mul below might overflow.<br>
@@ -3650,7 +3650,7 @@ static bool ShouldBuildLookupTable(Switc<br>
<br>
// Saturate this flag to false.<br>
AllTablesFitInRegister = AllTablesFitInRegister &&<br>
- SwitchLookupTable::WouldFitInRegister(TD, TableSize, Ty);<br>
+ SwitchLookupTable::WouldFitInRegister(DL, TableSize, Ty);<br>
<br>
// If both flags saturate, we're done. NOTE: This *only* works with<br>
// saturating flags, and all flags have to saturate first due to the<br>
@@ -3679,7 +3679,7 @@ static bool ShouldBuildLookupTable(Switc<br>
static bool SwitchToLookupTable(SwitchInst *SI,<br>
IRBuilder<> &Builder,<br>
const TargetTransformInfo &TTI,<br>
- const DataLayout* TD) {<br>
+ const DataLayout* DL) {<br>
assert(SI->getNumCases() > 1 && "Degenerate switch?");<br>
<br>
// Only build lookup table when we have a target that supports it.<br>
@@ -3723,7 +3723,7 @@ static bool SwitchToLookupTable(SwitchIn<br>
typedef SmallVector<std::pair<PHINode*, Constant*>, 4> ResultsTy;<br>
ResultsTy Results;<br>
if (!GetCaseResults(SI, CaseVal, CI.getCaseSuccessor(), &CommonDest,<br>
- Results, TD))<br>
+ Results, DL))<br>
return false;<br>
<br>
// Append the result from this case to the list for each phi.<br>
@@ -3748,7 +3748,7 @@ static bool SwitchToLookupTable(SwitchIn<br>
// If the table has holes, we need a constant result for the default case.<br>
SmallVector<std::pair<PHINode*, Constant*>, 4> DefaultResultsList;<br>
if (TableHasHoles && !GetCaseResults(SI, 0, SI->getDefaultDest(), &CommonDest,<br>
- DefaultResultsList, TD))<br>
+ DefaultResultsList, DL))<br>
return false;<br>
<br>
for (size_t I = 0, E = DefaultResultsList.size(); I != E; ++I) {<br>
@@ -3757,7 +3757,7 @@ static bool SwitchToLookupTable(SwitchIn<br>
DefaultResults[PHI] = Result;<br>
}<br>
<br>
- if (!ShouldBuildLookupTable(SI, TableSize, TTI, TD, ResultTypes))<br>
+ if (!ShouldBuildLookupTable(SI, TableSize, TTI, DL, ResultTypes))<br>
return false;<br>
<br>
// Create the BB that does the lookups.<br>
@@ -3801,7 +3801,7 @@ static bool SwitchToLookupTable(SwitchIn<br>
PHINode *PHI = PHIs[I];<br>
<br>
SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultLists[PHI],<br>
- DefaultResults[PHI], TD);<br>
+ DefaultResults[PHI], DL);<br>
<br>
Value *Result = Table.BuildLookup(TableIndex, Builder);<br>
<br>
@@ -3842,12 +3842,12 @@ bool SimplifyCFGOpt::SimplifySwitch(Swit<br>
// see if that predecessor totally determines the outcome of this switch.<br>
if (BasicBlock *OnlyPred = BB->getSinglePredecessor())<br>
if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
Value *Cond = SI->getCondition();<br>
if (SelectInst *Select = dyn_cast<SelectInst>(Cond))<br>
if (SimplifySwitchOnSelect(SI, Select))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
// If the block only contains the switch, see if we can fold the block<br>
// away into any preds.<br>
@@ -3857,22 +3857,22 @@ bool SimplifyCFGOpt::SimplifySwitch(Swit<br>
++BBI;<br>
if (SI == &*BBI)<br>
if (FoldValueComparisonIntoPredecessors(SI, Builder))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
}<br>
<br>
// Try to transform the switch into an icmp and a branch.<br>
if (TurnSwitchRangeIntoICmp(SI, Builder))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
// Remove unreachable cases.<br>
if (EliminateDeadSwitchCases(SI))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
if (ForwardSwitchConditionToPHI(SI))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
- if (SwitchToLookupTable(SI, Builder, TTI, TD))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ if (SwitchToLookupTable(SI, Builder, TTI, DL))<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
return false;<br>
}<br>
@@ -3909,7 +3909,7 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(<br>
<br>
if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) {<br>
if (SimplifyIndirectBrOnSelect(IBI, SI))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
}<br>
return Changed;<br>
}<br>
@@ -3933,7 +3933,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranc<br>
for (++I; isa<DbgInfoIntrinsic>(I); ++I)<br>
;<br>
if (I->isTerminator() &&<br>
- TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, TD))<br>
+ TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, DL))<br>
return true;<br>
}<br>
<br>
@@ -3942,7 +3942,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranc<br>
// predecessor and use logical operations to update the incoming value<br>
// for PHI nodes in common successor.<br>
if (FoldBranchToCommonDest(BI))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
return false;<br>
}<br>
<br>
@@ -3957,7 +3957,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(<br>
// switch.<br>
if (BasicBlock *OnlyPred = BB->getSinglePredecessor())<br>
if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
// This block must be empty, except for the setcond inst, if it exists.<br>
// Ignore dbg intrinsics.<br>
@@ -3967,26 +3967,26 @@ bool SimplifyCFGOpt::SimplifyCondBranch(<br>
++I;<br>
if (&*I == BI) {<br>
if (FoldValueComparisonIntoPredecessors(BI, Builder))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
} else if (&*I == cast<Instruction>(BI->getCondition())){<br>
++I;<br>
// Ignore dbg intrinsics.<br>
while (isa<DbgInfoIntrinsic>(I))<br>
++I;<br>
if (&*I == BI && FoldValueComparisonIntoPredecessors(BI, Builder))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
}<br>
}<br>
<br>
// Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction.<br>
- if (SimplifyBranchOnICmpChain(BI, TD, Builder))<br>
+ if (SimplifyBranchOnICmpChain(BI, DL, Builder))<br>
return true;<br>
<br>
// If this basic block is ONLY a compare and a branch, and if a predecessor<br>
// branches to us and one of our successors, fold the comparison into the<br>
// predecessor and use logical operations to pick the right destination.<br>
if (FoldBranchToCommonDest(BI))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
// We have a conditional branch to two blocks that are only reachable<br>
// from BI. We know that the condbr dominates the two blocks, so see if<br>
@@ -3995,7 +3995,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(<br>
if (BI->getSuccessor(0)->getSinglePredecessor() != 0) {<br>
if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {<br>
if (HoistThenElseCodeToIf(BI))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
} else {<br>
// If Successor #1 has multiple preds, we may be able to conditionally<br>
// execute Successor #0 if it branches to successor #1.<br>
@@ -4003,7 +4003,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(<br>
if (Succ0TI->getNumSuccessors() == 1 &&<br>
Succ0TI->getSuccessor(0) == BI->getSuccessor(1))<br>
if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0)))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
}<br>
} else if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {<br>
// If Successor #0 has multiple preds, we may be able to conditionally<br>
@@ -4012,22 +4012,22 @@ bool SimplifyCFGOpt::SimplifyCondBranch(<br>
if (Succ1TI->getNumSuccessors() == 1 &&<br>
Succ1TI->getSuccessor(0) == BI->getSuccessor(0))<br>
if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1)))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
}<br>
<br>
// If this is a branch on a phi node in the current block, thread control<br>
// through this block if any PHI node entries are constants.<br>
if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition()))<br>
if (PN->getParent() == BI->getParent())<br>
- if (FoldCondBranchOnPHI(BI, TD))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ if (FoldCondBranchOnPHI(BI, DL))<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
// Scan predecessor blocks for conditional branches.<br>
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)<br>
if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))<br>
if (PBI != BI && PBI->isConditional())<br>
if (SimplifyCondBranchToCondBranch(PBI, BI))<br>
- return SimplifyCFG(BB, TTI, TD) | true;<br>
+ return SimplifyCFG(BB, TTI, DL) | true;<br>
<br>
return false;<br>
}<br>
@@ -4139,7 +4139,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB)<br>
// eliminate it, do so now.<br>
if (PHINode *PN = dyn_cast<PHINode>(BB->begin()))<br>
if (PN->getNumIncomingValues() == 2)<br>
- Changed |= FoldTwoEntryPHINode(PN, TD);<br>
+ Changed |= FoldTwoEntryPHINode(PN, DL);<br>
<br>
Builder.SetInsertPoint(BB->getTerminator());<br>
if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {<br>
@@ -4171,6 +4171,6 @@ bool SimplifyCFGOpt::run(BasicBlock *BB)<br>
/// of the CFG. It returns true if a modification was made.<br>
///<br>
bool llvm::SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,<br>
- const DataLayout *TD) {<br>
- return SimplifyCFGOpt(TTI, TD).run(BB);<br>
+ const DataLayout *DL) {<br>
+ return SimplifyCFGOpt(TTI, DL).run(BB);<br>
}<br>
<br>
Modified: llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Utils/SimplifyIndVar.cpp Thu Feb 20 18:06:31 2014<br>
@@ -48,7 +48,7 @@ namespace {<br>
Loop *L;<br>
LoopInfo *LI;<br>
ScalarEvolution *SE;<br>
- const DataLayout *TD; // May be NULL<br>
+ const DataLayout *DL; // May be NULL<br>
<br>
SmallVectorImpl<WeakVH> &DeadInsts;<br>
<br>
@@ -60,7 +60,7 @@ namespace {<br>
L(Loop),<br>
LI(LPM->getAnalysisIfAvailable<LoopInfo>()),<br>
SE(SE),<br>
- TD(LPM->getAnalysisIfAvailable<DataLayout>()),<br>
+ DL(LPM->getAnalysisIfAvailable<DataLayout>()),<br>
DeadInsts(Dead),<br>
Changed(false) {<br>
assert(LI && "IV simplification requires LoopInfo");<br>
<br>
Modified: llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp Thu Feb 20 18:06:31 2014<br>
@@ -43,7 +43,7 @@ namespace {<br>
class LibCallOptimization {<br>
protected:<br>
Function *Caller;<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
const TargetLibraryInfo *TLI;<br>
const LibCallSimplifier *LCS;<br>
LLVMContext* Context;<br>
@@ -63,11 +63,11 @@ public:<br>
/// change the calling convention.<br>
virtual bool ignoreCallingConv() { return false; }<br>
<br>
- Value *optimizeCall(CallInst *CI, const DataLayout *TD,<br>
+ Value *optimizeCall(CallInst *CI, const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
const LibCallSimplifier *LCS, IRBuilder<> &B) {<br>
Caller = CI->getParent()->getParent();<br>
- this->TD = TD;<br>
+ this->DL = DL;<br>
this->TLI = TLI;<br>
this->LCS = LCS;<br>
if (CI->getCalledFunction())<br>
@@ -184,8 +184,8 @@ struct MemCpyChkOpt : public InstFortifi<br>
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||<br>
!FT->getParamType(0)->isPointerTy() ||<br>
!FT->getParamType(1)->isPointerTy() ||<br>
- FT->getParamType(2) != TD->getIntPtrType(Context) ||<br>
- FT->getParamType(3) != TD->getIntPtrType(Context))<br>
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||<br>
+ FT->getParamType(3) != DL->getIntPtrType(Context))<br>
return 0;<br>
<br>
if (isFoldable(3, 2, false)) {<br>
@@ -207,8 +207,8 @@ struct MemMoveChkOpt : public InstFortif<br>
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||<br>
!FT->getParamType(0)->isPointerTy() ||<br>
!FT->getParamType(1)->isPointerTy() ||<br>
- FT->getParamType(2) != TD->getIntPtrType(Context) ||<br>
- FT->getParamType(3) != TD->getIntPtrType(Context))<br>
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||<br>
+ FT->getParamType(3) != DL->getIntPtrType(Context))<br>
return 0;<br>
<br>
if (isFoldable(3, 2, false)) {<br>
@@ -230,8 +230,8 @@ struct MemSetChkOpt : public InstFortifi<br>
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||<br>
!FT->getParamType(0)->isPointerTy() ||<br>
!FT->getParamType(1)->isIntegerTy() ||<br>
- FT->getParamType(2) != TD->getIntPtrType(Context) ||<br>
- FT->getParamType(3) != TD->getIntPtrType(Context))<br>
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||<br>
+ FT->getParamType(3) != DL->getIntPtrType(Context))<br>
return 0;<br>
<br>
if (isFoldable(3, 2, false)) {<br>
@@ -256,7 +256,7 @@ struct StrCpyChkOpt : public InstFortifi<br>
FT->getReturnType() != FT->getParamType(0) ||<br>
FT->getParamType(0) != FT->getParamType(1) ||<br>
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||<br>
- FT->getParamType(2) != TD->getIntPtrType(Context))<br>
+ FT->getParamType(2) != DL->getIntPtrType(Context))<br>
return 0;<br>
<br>
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);<br>
@@ -269,7 +269,7 @@ struct StrCpyChkOpt : public InstFortifi<br>
// TODO: It might be nice to get a maximum length out of the possible<br>
// string lengths for varying.<br>
if (isFoldable(2, 1, true)) {<br>
- Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));<br>
+ Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6));<br>
return Ret;<br>
} else {<br>
// Maybe we can stil fold __strcpy_chk to __memcpy_chk.<br>
@@ -277,12 +277,12 @@ struct StrCpyChkOpt : public InstFortifi<br>
if (Len == 0) return 0;<br>
<br>
// This optimization require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
Value *Ret =<br>
EmitMemCpyChk(Dst, Src,<br>
- ConstantInt::get(TD->getIntPtrType(Context), Len),<br>
- CI->getArgOperand(2), B, TD, TLI);<br>
+ ConstantInt::get(DL->getIntPtrType(Context), Len),<br>
+ CI->getArgOperand(2), B, DL, TLI);<br>
return Ret;<br>
}<br>
return 0;<br>
@@ -301,12 +301,12 @@ struct StpCpyChkOpt : public InstFortifi<br>
FT->getReturnType() != FT->getParamType(0) ||<br>
FT->getParamType(0) != FT->getParamType(1) ||<br>
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||<br>
- FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))<br>
+ FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))<br>
return 0;<br>
<br>
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);<br>
if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)<br>
- Value *StrLen = EmitStrLen(Src, B, TD, TLI);<br>
+ Value *StrLen = EmitStrLen(Src, B, DL, TLI);<br>
return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;<br>
}<br>
<br>
@@ -316,7 +316,7 @@ struct StpCpyChkOpt : public InstFortifi<br>
// TODO: It might be nice to get a maximum length out of the possible<br>
// string lengths for varying.<br>
if (isFoldable(2, 1, true)) {<br>
- Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));<br>
+ Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6));<br>
return Ret;<br>
} else {<br>
// Maybe we can stil fold __stpcpy_chk to __memcpy_chk.<br>
@@ -324,14 +324,14 @@ struct StpCpyChkOpt : public InstFortifi<br>
if (Len == 0) return 0;<br>
<br>
// This optimization require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
Type *PT = FT->getParamType(0);<br>
- Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);<br>
+ Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);<br>
Value *DstEnd = B.CreateGEP(Dst,<br>
- ConstantInt::get(TD->getIntPtrType(PT),<br>
+ ConstantInt::get(DL->getIntPtrType(PT),<br>
Len - 1));<br>
- if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, TD, TLI))<br>
+ if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, DL, TLI))<br>
return 0;<br>
return DstEnd;<br>
}<br>
@@ -351,12 +351,12 @@ struct StrNCpyChkOpt : public InstFortif<br>
FT->getParamType(0) != FT->getParamType(1) ||<br>
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||<br>
!FT->getParamType(2)->isIntegerTy() ||<br>
- FT->getParamType(3) != TD->getIntPtrType(Context))<br>
+ FT->getParamType(3) != DL->getIntPtrType(Context))<br>
return 0;<br>
<br>
if (isFoldable(3, 2, false)) {<br>
Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),<br>
- CI->getArgOperand(2), B, TD, TLI,<br>
+ CI->getArgOperand(2), B, DL, TLI,<br>
Name.substr(2, 7));<br>
return Ret;<br>
}<br>
@@ -392,7 +392,7 @@ struct StrCatOpt : public LibCallOptimiz<br>
return Dst;<br>
<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
return emitStrLenMemCpy(Src, Dst, Len, B);<br>
}<br>
@@ -401,7 +401,7 @@ struct StrCatOpt : public LibCallOptimiz<br>
IRBuilder<> &B) {<br>
// We need to find the end of the destination string. That's where the<br>
// memory is to be moved to. We just generate a call to strlen.<br>
- Value *DstLen = EmitStrLen(Dst, B, TD, TLI);<br>
+ Value *DstLen = EmitStrLen(Dst, B, DL, TLI);<br>
if (!DstLen)<br>
return 0;<br>
<br>
@@ -413,7 +413,7 @@ struct StrCatOpt : public LibCallOptimiz<br>
// We have enough information to now generate the memcpy call to do the<br>
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.<br>
B.CreateMemCpy(CpyDst, Src,<br>
- ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);<br>
+ ConstantInt::get(DL->getIntPtrType(*Context), Len + 1), 1);<br>
return Dst;<br>
}<br>
};<br>
@@ -451,7 +451,7 @@ struct StrNCatOpt : public StrCatOpt {<br>
if (SrcLen == 0 || Len == 0) return Dst;<br>
<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
// We don't optimize this case<br>
if (Len < SrcLen) return 0;<br>
@@ -479,23 +479,23 @@ struct StrChrOpt : public LibCallOptimiz<br>
ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));<br>
if (CharC == 0) {<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
uint64_t Len = GetStringLength(SrcStr);<br>
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.<br>
return 0;<br>
<br>
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.<br>
- ConstantInt::get(TD->getIntPtrType(*Context), Len),<br>
- B, TD, TLI);<br>
+ ConstantInt::get(DL->getIntPtrType(*Context), Len),<br>
+ B, DL, TLI);<br>
}<br>
<br>
// Otherwise, the character is a constant, see if the first argument is<br>
// a string literal. If so, we can constant fold.<br>
StringRef Str;<br>
if (!getConstantStringInfo(SrcStr, Str)) {<br>
- if (TD && CharC->isZero()) // strchr(p, 0) -> p + strlen(p)<br>
- return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, TD, TLI), "strchr");<br>
+ if (DL && CharC->isZero()) // strchr(p, 0) -> p + strlen(p)<br>
+ return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, DL, TLI), "strchr");<br>
return 0;<br>
}<br>
<br>
@@ -531,8 +531,8 @@ struct StrRChrOpt : public LibCallOptimi<br>
StringRef Str;<br>
if (!getConstantStringInfo(SrcStr, Str)) {<br>
// strrchr(s, 0) -> strchr(s, 0)<br>
- if (TD && CharC->isZero())<br>
- return EmitStrChr(SrcStr, '\0', B, TD, TLI);<br>
+ if (DL && CharC->isZero())<br>
+ return EmitStrChr(SrcStr, '\0', B, DL, TLI);<br>
return 0;<br>
}<br>
<br>
@@ -581,11 +581,11 @@ struct StrCmpOpt : public LibCallOptimiz<br>
uint64_t Len2 = GetStringLength(Str2P);<br>
if (Len1 && Len2) {<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
return EmitMemCmp(Str1P, Str2P,<br>
- ConstantInt::get(TD->getIntPtrType(*Context),<br>
- std::min(Len1, Len2)), B, TD, TLI);<br>
+ ConstantInt::get(DL->getIntPtrType(*Context),<br>
+ std::min(Len1, Len2)), B, DL, TLI);<br>
}<br>
<br>
return 0;<br>
@@ -617,8 +617,8 @@ struct StrNCmpOpt : public LibCallOptimi<br>
if (Length == 0) // strncmp(x,y,0) -> 0<br>
return ConstantInt::get(CI->getType(), 0);<br>
<br>
- if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)<br>
- return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI);<br>
+ if (DL && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)<br>
+ return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, DL, TLI);<br>
<br>
StringRef Str1, Str2;<br>
bool HasStr1 = getConstantStringInfo(Str1P, Str1);<br>
@@ -657,7 +657,7 @@ struct StrCpyOpt : public LibCallOptimiz<br>
return Src;<br>
<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
// See if we can get the length of the input string.<br>
uint64_t Len = GetStringLength(Src);<br>
@@ -666,7 +666,7 @@ struct StrCpyOpt : public LibCallOptimiz<br>
// We have enough information to now generate the memcpy call to do the<br>
// copy for us. Make a memcpy to copy the nul byte with align = 1.<br>
B.CreateMemCpy(Dst, Src,<br>
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);<br>
+ ConstantInt::get(DL->getIntPtrType(*Context), Len), 1);<br>
return Dst;<br>
}<br>
};<br>
@@ -682,11 +682,11 @@ struct StpCpyOpt: public LibCallOptimiza<br>
return 0;<br>
<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);<br>
if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)<br>
- Value *StrLen = EmitStrLen(Src, B, TD, TLI);<br>
+ Value *StrLen = EmitStrLen(Src, B, DL, TLI);<br>
return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;<br>
}<br>
<br>
@@ -695,9 +695,9 @@ struct StpCpyOpt: public LibCallOptimiza<br>
if (Len == 0) return 0;<br>
<br>
Type *PT = FT->getParamType(0);<br>
- Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);<br>
+ Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);<br>
Value *DstEnd = B.CreateGEP(Dst,<br>
- ConstantInt::get(TD->getIntPtrType(PT),<br>
+ ConstantInt::get(DL->getIntPtrType(PT),<br>
Len - 1));<br>
<br>
// We have enough information to now generate the memcpy call to do the<br>
@@ -740,7 +740,7 @@ struct StrNCpyOpt : public LibCallOptimi<br>
if (Len == 0) return Dst; // strncpy(x, y, 0) -> x<br>
<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
// Let strncpy handle the zero padding<br>
if (Len > SrcLen+1) return 0;<br>
@@ -748,7 +748,7 @@ struct StrNCpyOpt : public LibCallOptimi<br>
Type *PT = FT->getParamType(0);<br>
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]<br>
B.CreateMemCpy(Dst, Src,<br>
- ConstantInt::get(TD->getIntPtrType(PT), Len), 1);<br>
+ ConstantInt::get(DL->getIntPtrType(PT), Len), 1);<br>
<br>
return Dst;<br>
}<br>
@@ -805,8 +805,8 @@ struct StrPBrkOpt : public LibCallOptimi<br>
}<br>
<br>
// strpbrk(s, "a") -> strchr(s, 'a')<br>
- if (TD && HasS2 && S2.size() == 1)<br>
- return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI);<br>
+ if (DL && HasS2 && S2.size() == 1)<br>
+ return EmitStrChr(CI->getArgOperand(0), S2[0], B, DL, TLI);<br>
<br>
return 0;<br>
}<br>
@@ -885,8 +885,8 @@ struct StrCSpnOpt : public LibCallOptimi<br>
}<br>
<br>
// strcspn(s, "") -> strlen(s)<br>
- if (TD && HasS2 && S2.empty())<br>
- return EmitStrLen(CI->getArgOperand(0), B, TD, TLI);<br>
+ if (DL && HasS2 && S2.empty())<br>
+ return EmitStrLen(CI->getArgOperand(0), B, DL, TLI);<br>
<br>
return 0;<br>
}<br>
@@ -906,12 +906,12 @@ struct StrStrOpt : public LibCallOptimiz<br>
return B.CreateBitCast(CI->getArgOperand(0), CI->getType());<br>
<br>
// fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0<br>
- if (TD && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {<br>
- Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI);<br>
+ if (DL && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {<br>
+ Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, DL, TLI);<br>
if (!StrLen)<br>
return 0;<br>
Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),<br>
- StrLen, B, TD, TLI);<br>
+ StrLen, B, DL, TLI);<br>
if (!StrNCmp)<br>
return 0;<br>
for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();<br>
@@ -949,7 +949,7 @@ struct StrStrOpt : public LibCallOptimiz<br>
<br>
// fold strstr(x, "y") -> strchr(x, 'y').<br>
if (HasStr2 && ToFindStr.size() == 1) {<br>
- Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI);<br>
+ Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, DL, TLI);<br>
return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0;<br>
}<br>
return 0;<br>
@@ -1011,13 +1011,13 @@ struct MemCmpOpt : public LibCallOptimiz<br>
struct MemCpyOpt : public LibCallOptimization {<br>
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
FunctionType *FT = Callee->getFunctionType();<br>
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||<br>
!FT->getParamType(0)->isPointerTy() ||<br>
!FT->getParamType(1)->isPointerTy() ||<br>
- FT->getParamType(2) != TD->getIntPtrType(*Context))<br>
+ FT->getParamType(2) != DL->getIntPtrType(*Context))<br>
return 0;<br>
<br>
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)<br>
@@ -1030,13 +1030,13 @@ struct MemCpyOpt : public LibCallOptimiz<br>
struct MemMoveOpt : public LibCallOptimization {<br>
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
FunctionType *FT = Callee->getFunctionType();<br>
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||<br>
!FT->getParamType(0)->isPointerTy() ||<br>
!FT->getParamType(1)->isPointerTy() ||<br>
- FT->getParamType(2) != TD->getIntPtrType(*Context))<br>
+ FT->getParamType(2) != DL->getIntPtrType(*Context))<br>
return 0;<br>
<br>
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)<br>
@@ -1049,13 +1049,13 @@ struct MemMoveOpt : public LibCallOptimi<br>
struct MemSetOpt : public LibCallOptimization {<br>
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
FunctionType *FT = Callee->getFunctionType();<br>
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||<br>
!FT->getParamType(0)->isPointerTy() ||<br>
!FT->getParamType(1)->isIntegerTy() ||<br>
- FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))<br>
+ FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))<br>
return 0;<br>
<br>
// memset(p, v, n) -> llvm.memset(p, v, n, 1)<br>
@@ -1632,7 +1632,7 @@ struct PrintFOpt : public LibCallOptimiz<br>
<br>
// printf("x") -> putchar('x'), even for '%'.<br>
if (FormatStr.size() == 1) {<br>
- Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, TD, TLI);<br>
+ Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, DL, TLI);<br>
if (CI->use_empty() || !Res) return Res;<br>
return B.CreateIntCast(Res, CI->getType(), true);<br>
}<br>
@@ -1644,7 +1644,7 @@ struct PrintFOpt : public LibCallOptimiz<br>
// pass to be run after this pass, to merge duplicate strings.<br>
FormatStr = FormatStr.drop_back();<br>
Value *GV = B.CreateGlobalString(FormatStr, "str");<br>
- Value *NewCI = EmitPutS(GV, B, TD, TLI);<br>
+ Value *NewCI = EmitPutS(GV, B, DL, TLI);<br>
return (CI->use_empty() || !NewCI) ?<br>
NewCI :<br>
ConstantInt::get(CI->getType(), FormatStr.size()+1);<br>
@@ -1654,7 +1654,7 @@ struct PrintFOpt : public LibCallOptimiz<br>
// printf("%c", chr) --> putchar(chr)<br>
if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&<br>
CI->getArgOperand(1)->getType()->isIntegerTy()) {<br>
- Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD, TLI);<br>
+ Value *Res = EmitPutChar(CI->getArgOperand(1), B, DL, TLI);<br>
<br>
if (CI->use_empty() || !Res) return Res;<br>
return B.CreateIntCast(Res, CI->getType(), true);<br>
@@ -1663,7 +1663,7 @@ struct PrintFOpt : public LibCallOptimiz<br>
// printf("%s\n", str) --> puts(str)<br>
if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&<br>
CI->getArgOperand(1)->getType()->isPointerTy()) {<br>
- return EmitPutS(CI->getArgOperand(1), B, TD, TLI);<br>
+ return EmitPutS(CI->getArgOperand(1), B, DL, TLI);<br>
}<br>
return 0;<br>
}<br>
@@ -1712,11 +1712,11 @@ struct SPrintFOpt : public LibCallOptimi<br>
return 0; // we found a format specifier, bail out.<br>
<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)<br>
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),<br>
- ConstantInt::get(TD->getIntPtrType(*Context), // Copy the<br>
+ ConstantInt::get(DL->getIntPtrType(*Context), // Copy the<br>
FormatStr.size() + 1), 1); // nul byte.<br>
return ConstantInt::get(CI->getType(), FormatStr.size());<br>
}<br>
@@ -1742,12 +1742,12 @@ struct SPrintFOpt : public LibCallOptimi<br>
<br>
if (FormatStr[1] == 's') {<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)<br>
if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0;<br>
<br>
- Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD, TLI);<br>
+ Value *Len = EmitStrLen(CI->getArgOperand(2), B, DL, TLI);<br>
if (!Len)<br>
return 0;<br>
Value *IncLen = B.CreateAdd(Len,<br>
@@ -1812,12 +1812,12 @@ struct FPrintFOpt : public LibCallOptimi<br>
return 0; // We found a format specifier.<br>
<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
return EmitFWrite(CI->getArgOperand(1),<br>
- ConstantInt::get(TD->getIntPtrType(*Context),<br>
+ ConstantInt::get(DL->getIntPtrType(*Context),<br>
FormatStr.size()),<br>
- CI->getArgOperand(0), B, TD, TLI);<br>
+ CI->getArgOperand(0), B, DL, TLI);<br>
}<br>
<br>
// The remaining optimizations require the format string to be "%s" or "%c"<br>
@@ -1830,14 +1830,14 @@ struct FPrintFOpt : public LibCallOptimi<br>
if (FormatStr[1] == 'c') {<br>
// fprintf(F, "%c", chr) --> fputc(chr, F)<br>
if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;<br>
- return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);<br>
+ return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI);<br>
}<br>
<br>
if (FormatStr[1] == 's') {<br>
// fprintf(F, "%s", str) --> fputs(str, F)<br>
if (!CI->getArgOperand(2)->getType()->isPointerTy())<br>
return 0;<br>
- return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);<br>
+ return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI);<br>
}<br>
return 0;<br>
}<br>
@@ -1897,7 +1897,7 @@ struct FWriteOpt : public LibCallOptimiz<br>
// This optimisation is only valid, if the return value is unused.<br>
if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)<br>
Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char");<br>
- Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, TD, TLI);<br>
+ Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, DL, TLI);<br>
return NewCI ? ConstantInt::get(CI->getType(), 1) : 0;<br>
}<br>
<br>
@@ -1911,7 +1911,7 @@ struct FPutsOpt : public LibCallOptimiza<br>
(void) ER.callOptimizer(Callee, CI, B);<br>
<br>
// These optimizations require DataLayout.<br>
- if (!TD) return 0;<br>
+ if (!DL) return 0;<br>
<br>
// Require two pointers. Also, we can't optimize if return value is used.<br>
FunctionType *FT = Callee->getFunctionType();<br>
@@ -1925,8 +1925,8 @@ struct FPutsOpt : public LibCallOptimiza<br>
if (!Len) return 0;<br>
// Known to have no uses (see above).<br>
return EmitFWrite(CI->getArgOperand(0),<br>
- ConstantInt::get(TD->getIntPtrType(*Context), Len-1),<br>
- CI->getArgOperand(1), B, TD, TLI);<br>
+ ConstantInt::get(DL->getIntPtrType(*Context), Len-1),<br>
+ CI->getArgOperand(1), B, DL, TLI);<br>
}<br>
};<br>
<br>
@@ -1946,7 +1946,7 @@ struct PutsOpt : public LibCallOptimizat<br>
<br>
if (Str.empty() && CI->use_empty()) {<br>
// puts("") -> putchar('\n')<br>
- Value *Res = EmitPutChar(B.getInt32('\n'), B, TD, TLI);<br>
+ Value *Res = EmitPutChar(B.getInt32('\n'), B, DL, TLI);<br>
if (CI->use_empty() || !Res) return Res;<br>
return B.CreateIntCast(Res, CI->getType(), true);<br>
}<br>
@@ -1960,7 +1960,7 @@ struct PutsOpt : public LibCallOptimizat<br>
namespace llvm {<br>
<br>
class LibCallSimplifierImpl {<br>
- const DataLayout *TD;<br>
+ const DataLayout *DL;<br>
const TargetLibraryInfo *TLI;<br>
const LibCallSimplifier *LCS;<br>
bool UnsafeFPShrink;<br>
@@ -1970,11 +1970,11 @@ class LibCallSimplifierImpl {<br>
PowOpt Pow;<br>
Exp2Opt Exp2;<br>
public:<br>
- LibCallSimplifierImpl(const DataLayout *TD, const TargetLibraryInfo *TLI,<br>
+ LibCallSimplifierImpl(const DataLayout *DL, const TargetLibraryInfo *TLI,<br>
const LibCallSimplifier *LCS,<br>
bool UnsafeFPShrink = false)<br>
: Cos(UnsafeFPShrink), Pow(UnsafeFPShrink), Exp2(UnsafeFPShrink) {<br>
- this->TD = TD;<br>
+ this->DL = DL;<br>
this->TLI = TLI;<br>
this->LCS = LCS;<br>
this->UnsafeFPShrink = UnsafeFPShrink;<br>
@@ -2233,15 +2233,15 @@ Value *LibCallSimplifierImpl::optimizeCa<br>
LibCallOptimization *LCO = lookupOptimization(CI);<br>
if (LCO) {<br>
IRBuilder<> Builder(CI);<br>
- return LCO->optimizeCall(CI, TD, TLI, LCS, Builder);<br>
+ return LCO->optimizeCall(CI, DL, TLI, LCS, Builder);<br>
}<br>
return 0;<br>
}<br>
<br>
-LibCallSimplifier::LibCallSimplifier(const DataLayout *TD,<br>
+LibCallSimplifier::LibCallSimplifier(const DataLayout *DL,<br>
const TargetLibraryInfo *TLI,<br>
bool UnsafeFPShrink) {<br>
- Impl = new LibCallSimplifierImpl(TD, TLI, this, UnsafeFPShrink);<br>
+ Impl = new LibCallSimplifierImpl(DL, TLI, this, UnsafeFPShrink);<br>
}<br>
<br>
LibCallSimplifier::~LibCallSimplifier() {<br>
<br>
Modified: llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp?rev=201827&r1=201826&r2=201827&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp?rev=201827&r1=201826&r2=201827&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp (original)<br>
+++ llvm/trunk/lib/Transforms/Vectorize/BBVectorize.cpp Thu Feb 20 18:06:31 2014<br>
@@ -201,7 +201,7 @@ namespace {<br>
AA = &P->getAnalysis<AliasAnalysis>();<br>
DT = &P->getAnalysis<DominatorTreeWrapperPass>().getDomTree();<br>
SE = &P->getAnalysis<ScalarEvolution>();<br>
- TD = P->getAnalysisIfAvailable<DataLayout>();<br>
+ DL = P->getAnalysisIfAvailable<DataLayout>();<br>
TTI = IgnoreTargetInfo ? 0 : &P->getAnalysis<TargetTransformInfo>();<br>
}<br>
<br>
@@ -214,7 +214,7 @@ namespace {<br>
AliasAnalysis *AA;<br>
DominatorTree *DT;<br>
ScalarEvolution *SE;<br>
- DataLayout *TD;<br>
+ DataLayout *DL;<br>
const TargetTransformInfo *TTI;<br>
<br>
// FIXME: const correct?<br>
@@ -436,7 +436,7 @@ namespace {<br>
AA = &getAnalysis<AliasAnalysis>();<br>
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();<br>
SE = &getAnalysis<ScalarEvolution>();<br>
- TD = getAnalysisIfAvailable<DataLayout>();<br>
+ DL = getAnalysisIfAvailable<DataLayout>();<br>
TTI = IgnoreTargetInfo ? 0 : &getAnalysis<TargetTransformInfo>();<br>
<br>
return vectorizeBB(BB);<br>
@@ -634,11 +634,11 @@ namespace {<br>
int64_t Offset = IntOff->getSExtValue();<br>
<br>
Type *VTy = IPtr->getType()->getPointerElementType();<br>
- int64_t VTyTSS = (int64_t) TD->getTypeStoreSize(VTy);<br>
+ int64_t VTyTSS = (int64_t) DL->getTypeStoreSize(VTy);<br>
<br>
Type *VTy2 = JPtr->getType()->getPointerElementType();<br>
if (VTy != VTy2 && Offset < 0) {<br>
- int64_t VTy2TSS = (int64_t) TD->getTypeStoreSize(VTy2);<br>
+ int64_t VTy2TSS = (int64_t) DL->getTypeStoreSize(VTy2);<br>
OffsetInElmts = Offset/VTy2TSS;<br>
return (abs64(Offset) % VTy2TSS) == 0;<br>
}<br>
@@ -821,7 +821,7 @@ namespace {<br>
<br>
// It is important to cleanup here so that future iterations of this<br>
// function have less work to do.<br>
- (void) SimplifyInstructionsInBlock(&BB, TD, AA->getTargetLibraryInfo());<br>
+ (void) SimplifyInstructionsInBlock(&BB, DL, AA->getTargetLibraryInfo());<br>
return true;<br>
}<br>
<br>
@@ -876,7 +876,7 @@ namespace {<br>
}<br>
<br>
// We can't vectorize memory operations without target data<br>
- if (TD == 0 && IsSimpleLoadStore)<br>
+ if (DL == 0 && IsSimpleLoadStore)<br>
return false;<br>
<br>
Type *T1, *T2;<br>
@@ -913,7 +913,7 @@ namespace {<br>
if (T2->isX86_FP80Ty() || T2->isPPC_FP128Ty() || T2->isX86_MMXTy())<br>
return false;<br>
<br>
- if ((!Config.VectorizePointers || TD == 0) &&<br>
+ if ((!Config.VectorizePointers || DL == 0) &&<br>
(T1->getScalarType()->isPointerTy() ||<br>
T2->getScalarType()->isPointerTy()))<br>
return false;<br>
@@ -977,7 +977,7 @@ namespace {<br>
// with the lower offset has an alignment suitable for the<br>
// vector type.<br>
<br>
- unsigned VecAlignment = TD->getPrefTypeAlignment(VType);<br>
+ unsigned VecAlignment = DL->getPrefTypeAlignment(VType);<br>
if (BottomAlignment < VecAlignment)<br>
return false;<br>
}<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@cs.uiuc.edu">llvm-commits@cs.uiuc.edu</a><br>
<a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits" target="_blank">http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits</a><br>
</blockquote></div><br></div>