[llvm-commits] [llvm] r166340 - in /llvm/trunk: include/llvm/Analysis/ScalarEvolutionExpander.h include/llvm/Transforms/Scalar.h lib/Analysis/ScalarEvolutionExpander.cpp lib/CodeGen/Passes.cpp lib/Transforms/Scalar/LoopStrengthReduce.cpp lib/Transforms/Utils/LowerInvoke.cpp
Nadav Rotem
nrotem at apple.com
Wed Jan 2 17:59:50 PST 2013
I never investigated it. I should re-apply this patch in parts and see if it still gives the LTO build problems.
On Jan 2, 2013, at 5:41 PM, Hal Finkel <hfinkel at anl.gov> wrote:
> Nadav,
>
> Did anyone ever figure out why this did not work?
>
> Thanks again,
> Hal
>
> ----- Original Message -----
>> From: "Nadav Rotem" <nrotem at apple.com>
>> To: llvm-commits at cs.uiuc.edu
>> Sent: Friday, October 19, 2012 4:28:43 PM
>> Subject: [llvm-commits] [llvm] r166340 - in /llvm/trunk: include/llvm/Analysis/ScalarEvolutionExpander.h
>> include/llvm/Transforms/Scalar.h lib/Analysis/ScalarEvolutionExpander.cpp lib/CodeGen/Passes.cpp
>> lib/Transforms/Scalar/LoopStrengthReduce.cpp lib/Transforms/Utils/LowerInvoke.cpp
>>
>> Author: nadav
>> Date: Fri Oct 19 16:28:43 2012
>> New Revision: 166340
>>
>> URL: http://llvm.org/viewvc/llvm-project?rev=166340&view=rev
>> Log:
>> revert r166264 because the LTO build is still failing
>>
>> Modified:
>> llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h
>> llvm/trunk/include/llvm/Transforms/Scalar.h
>> llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp
>> llvm/trunk/lib/CodeGen/Passes.cpp
>> llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp
>> llvm/trunk/lib/Transforms/Utils/LowerInvoke.cpp
>>
>> Modified: llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h?rev=166340&r1=166339&r2=166340&view=diff
>> ==============================================================================
>> --- llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h
>> (original)
>> +++ llvm/trunk/include/llvm/Analysis/ScalarEvolutionExpander.h Fri
>> Oct 19 16:28:43 2012
>> @@ -22,7 +22,7 @@
>> #include <set>
>>
>> namespace llvm {
>> - class ScalarTargetTransformInfo;
>> + class TargetLowering;
>>
>> /// Return true if the given expression is safe to expand in the
>> sense that
>> /// all materialized values are safe to speculate.
>> @@ -129,7 +129,7 @@
>> /// representative. Return the number of phis eliminated.
>> unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
>> SmallVectorImpl<WeakVH> &DeadInsts,
>> - const ScalarTargetTransformInfo
>> *STTI = NULL);
>> + const TargetLowering *TLI = NULL);
>>
>> /// expandCodeFor - Insert code to directly compute the
>> specified SCEV
>> /// expression into the program. The inserted code is inserted
>> into the
>>
>> Modified: llvm/trunk/include/llvm/Transforms/Scalar.h
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Scalar.h?rev=166340&r1=166339&r2=166340&view=diff
>> ==============================================================================
>> --- llvm/trunk/include/llvm/Transforms/Scalar.h (original)
>> +++ llvm/trunk/include/llvm/Transforms/Scalar.h Fri Oct 19 16:28:43
>> 2012
>> @@ -119,7 +119,7 @@
>> // optional parameter used to consult the target machine whether
>> certain
>> // transformations are profitable.
>> //
>> -Pass *createLoopStrengthReducePass();
>> +Pass *createLoopStrengthReducePass(const TargetLowering *TLI = 0);
>>
>> Pass *createGlobalMergePass(const TargetLowering *TLI = 0);
>>
>> @@ -249,8 +249,9 @@
>> // purpose "my LLVM-to-LLVM pass doesn't support the invoke
>> instruction yet"
>> // lowering pass.
>> //
>> -FunctionPass *createLowerInvokePass();
>> -FunctionPass *createLowerInvokePass(bool useExpensiveEHSupport);
>> +FunctionPass *createLowerInvokePass(const TargetLowering *TLI = 0);
>> +FunctionPass *createLowerInvokePass(const TargetLowering *TLI,
>> + bool useExpensiveEHSupport);
>> extern char &LowerInvokePassID;
>>
>> //===----------------------------------------------------------------------===//
>>
>> Modified: llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp?rev=166340&r1=166339&r2=166340&view=diff
>> ==============================================================================
>> --- llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp (original)
>> +++ llvm/trunk/lib/Analysis/ScalarEvolutionExpander.cpp Fri Oct 19
>> 16:28:43 2012
>> @@ -19,8 +19,8 @@
>> #include "llvm/LLVMContext.h"
>> #include "llvm/Support/Debug.h"
>> #include "llvm/DataLayout.h"
>> +#include "llvm/Target/TargetLowering.h"
>> #include "llvm/ADT/STLExtras.h"
>> -#include "llvm/TargetTransformInfo.h"
>>
>> using namespace llvm;
>>
>> @@ -1599,15 +1599,15 @@
>> /// This does not depend on any SCEVExpander state but should be
>> used in
>> /// the same context that SCEVExpander is used.
>> unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const
>> DominatorTree *DT,
>> - SmallVectorImpl<WeakVH>
>> &DeadInsts,
>> - const
>> ScalarTargetTransformInfo *STTI) {
>> + SmallVectorImpl<WeakVH>
>> &DeadInsts,
>> + const TargetLowering
>> *TLI) {
>> // Find integer phis in order of increasing width.
>> SmallVector<PHINode*, 8> Phis;
>> for (BasicBlock::iterator I = L->getHeader()->begin();
>> PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
>> Phis.push_back(Phi);
>> }
>> - if (STTI)
>> + if (TLI)
>> std::sort(Phis.begin(), Phis.end(), width_descending);
>>
>> unsigned NumElim = 0;
>> @@ -1635,8 +1635,8 @@
>> PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
>> if (!OrigPhiRef) {
>> OrigPhiRef = Phi;
>> - if (Phi->getType()->isIntegerTy() && STTI &&
>> - STTI->isTruncateFree(Phi->getType(),
>> Phis.back()->getType())) {
>> + if (Phi->getType()->isIntegerTy() && TLI
>> + && TLI->isTruncateFree(Phi->getType(),
>> Phis.back()->getType())) {
>> // This phi can be freely truncated to the narrowest phi
>> type. Map the
>> // truncated expression to it so it will be reused for
>> narrow types.
>> const SCEV *TruncExpr =
>>
>> Modified: llvm/trunk/lib/CodeGen/Passes.cpp
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/Passes.cpp?rev=166340&r1=166339&r2=166340&view=diff
>> ==============================================================================
>> --- llvm/trunk/lib/CodeGen/Passes.cpp (original)
>> +++ llvm/trunk/lib/CodeGen/Passes.cpp Fri Oct 19 16:28:43 2012
>> @@ -359,7 +359,7 @@
>>
>> // Run loop strength reduction before anything else.
>> if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
>> - addPass(createLoopStrengthReducePass());
>> + addPass(createLoopStrengthReducePass(getTargetLowering()));
>> if (PrintLSR)
>> addPass(createPrintFunctionPass("\n\n*** Code after LSR
>> ***\n", &dbgs()));
>> }
>> @@ -389,7 +389,7 @@
>> addPass(createDwarfEHPass(TM));
>> break;
>> case ExceptionHandling::None:
>> - addPass(createLowerInvokePass());
>> + addPass(createLowerInvokePass(TM->getTargetLowering()));
>>
>> // The lower invoke pass may create unreachable code. Remove it.
>> addPass(createUnreachableBlockEliminationPass());
>>
>> Modified: llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp?rev=166340&r1=166339&r2=166340&view=diff
>> ==============================================================================
>> --- llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp
>> (original)
>> +++ llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp Fri Oct
>> 19 16:28:43 2012
>> @@ -37,7 +37,7 @@
>> //
>> // TODO: Handle multiple loops at a time.
>> //
>> -// TODO: Should AddrMode::BaseGV be changed to a ConstantExpr
>> +// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a
>> ConstantExpr
>> // instead of a GlobalValue?
>> //
>> // TODO: When truncation is free, truncate ICmp users' operands to
>> make it a
>> @@ -67,7 +67,6 @@
>> #include "llvm/Transforms/Scalar.h"
>> #include "llvm/Transforms/Utils/BasicBlockUtils.h"
>> #include "llvm/Transforms/Utils/Local.h"
>> -#include "llvm/TargetTransformInfo.h"
>> #include "llvm/ADT/SmallBitVector.h"
>> #include "llvm/ADT/SetVector.h"
>> #include "llvm/ADT/DenseSet.h"
>> @@ -75,6 +74,7 @@
>> #include "llvm/Support/CommandLine.h"
>> #include "llvm/Support/ValueHandle.h"
>> #include "llvm/Support/raw_ostream.h"
>> +#include "llvm/Target/TargetLowering.h"
>> #include <algorithm>
>> using namespace llvm;
>>
>> @@ -1118,7 +1118,7 @@
>> enum KindType {
>> Basic, ///< A normal use, with no folding.
>> Special, ///< A special case of basic, allowing -1 scales.
>> - Address, ///< An address use; folding according to
>> ScalarTargetTransformInfo.
>> + Address, ///< An address use; folding according to
>> TargetLowering
>> ICmpZero ///< An equality icmp with both operands folded into
>> one.
>> // TODO: Add a generic icmp too?
>> };
>> @@ -1272,12 +1272,12 @@
>> /// address-mode folding and special icmp tricks.
>> static bool isLegalUse(const AddrMode &AM,
>> LSRUse::KindType Kind, Type *AccessTy,
>> - const ScalarTargetTransformInfo *STTI) {
>> + const TargetLowering *TLI) {
>> switch (Kind) {
>> case LSRUse::Address:
>> // If we have low-level target information, ask the target if it
>> can
>> // completely fold this address.
>> - if (STTI) return STTI->isLegalAddressingMode(AM, AccessTy);
>> + if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy);
>>
>> // Otherwise, just guess that reg+reg addressing is legal.
>> return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1;
>> @@ -1300,7 +1300,7 @@
>> // If we have low-level target information, ask the target if it
>> can fold an
>> // integer immediate on an icmp.
>> if (AM.BaseOffs != 0) {
>> - if (!STTI)
>> + if (!TLI)
>> return false;
>> // We have one of:
>> // ICmpZero BaseReg + Offset => ICmp BaseReg, -Offset
>> @@ -1309,7 +1309,7 @@
>> int64_t Offs = AM.BaseOffs;
>> if (AM.Scale == 0)
>> Offs = -(uint64_t)Offs; // The cast does the right thing
>> with INT64_MIN.
>> - return STTI->isLegalICmpImmediate(Offs);
>> + return TLI->isLegalICmpImmediate(Offs);
>> }
>>
>> // ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
>> @@ -1330,20 +1330,20 @@
>> static bool isLegalUse(AddrMode AM,
>> int64_t MinOffset, int64_t MaxOffset,
>> LSRUse::KindType Kind, Type *AccessTy,
>> - const ScalarTargetTransformInfo *LTTI) {
>> + const TargetLowering *TLI) {
>> // Check for overflow.
>> if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs)
>> !=
>> (MinOffset > 0))
>> return false;
>> AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset;
>> - if (isLegalUse(AM, Kind, AccessTy, LTTI)) {
>> + if (isLegalUse(AM, Kind, AccessTy, TLI)) {
>> AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset;
>> // Check for overflow.
>> if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs)
>> !=
>> (MaxOffset > 0))
>> return false;
>> AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset;
>> - return isLegalUse(AM, Kind, AccessTy, LTTI);
>> + return isLegalUse(AM, Kind, AccessTy, TLI);
>> }
>> return false;
>> }
>> @@ -1352,7 +1352,7 @@
>> GlobalValue *BaseGV,
>> bool HasBaseReg,
>> LSRUse::KindType Kind, Type *AccessTy,
>> - const ScalarTargetTransformInfo *LTTI)
>> {
>> + const TargetLowering *TLI) {
>> // Fast-path: zero is always foldable.
>> if (BaseOffs == 0 && !BaseGV) return true;
>>
>> @@ -1371,14 +1371,14 @@
>> AM.HasBaseReg = true;
>> }
>>
>> - return isLegalUse(AM, Kind, AccessTy, LTTI);
>> + return isLegalUse(AM, Kind, AccessTy, TLI);
>> }
>>
>> static bool isAlwaysFoldable(const SCEV *S,
>> int64_t MinOffset, int64_t MaxOffset,
>> bool HasBaseReg,
>> LSRUse::KindType Kind, Type *AccessTy,
>> - const ScalarTargetTransformInfo *LTTI,
>> + const TargetLowering *TLI,
>> ScalarEvolution &SE) {
>> // Fast-path: zero is always foldable.
>> if (S->isZero()) return true;
>> @@ -1402,7 +1402,7 @@
>> AM.HasBaseReg = HasBaseReg;
>> AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
>>
>> - return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, LTTI);
>> + return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI);
>> }
>>
>> namespace {
>> @@ -1502,7 +1502,7 @@
>> ScalarEvolution &SE;
>> DominatorTree &DT;
>> LoopInfo &LI;
>> - const ScalarTargetTransformInfo *const STTI;
>> + const TargetLowering *const TLI;
>> Loop *const L;
>> bool Changed;
>>
>> @@ -1638,7 +1638,7 @@
>> Pass *P);
>>
>> public:
>> - LSRInstance(const ScalarTargetTransformInfo *ltti, Loop *l, Pass
>> *P);
>> + LSRInstance(const TargetLowering *tli, Loop *l, Pass *P);
>>
>> bool getChanged() const { return Changed; }
>>
>> @@ -1688,10 +1688,11 @@
>> }
>> if (!DestTy) continue;
>>
>> - if (STTI) {
>> + if (TLI) {
>> // If target does not support DestTy natively then do not
>> apply
>> // this transformation.
>> - if (!STTI->isTypeLegal(DestTy)) continue;
>> + EVT DVT = TLI->getValueType(DestTy);
>> + if (!TLI->isTypeLegal(DVT)) continue;
>> }
>>
>> PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
>> @@ -2014,18 +2015,18 @@
>> if (C->getValue().getMinSignedBits() >= 64 ||
>> C->getValue().isMinSignedValue())
>> goto decline_post_inc;
>> - // Without STTI, assume that any stride might be valid,
>> and so any
>> + // Without TLI, assume that any stride might be valid,
>> and so any
>> // use might be shared.
>> - if (!STTI)
>> + if (!TLI)
>> goto decline_post_inc;
>> // Check for possible scaled-address reuse.
>> Type *AccessTy = getAccessType(UI->getUser());
>> AddrMode AM;
>> AM.Scale = C->getSExtValue();
>> - if (STTI->isLegalAddressingMode(AM, AccessTy))
>> + if (TLI->isLegalAddressingMode(AM, AccessTy))
>> goto decline_post_inc;
>> AM.Scale = -AM.Scale;
>> - if (STTI->isLegalAddressingMode(AM, AccessTy))
>> + if (TLI->isLegalAddressingMode(AM, AccessTy))
>> goto decline_post_inc;
>> }
>> }
>> @@ -2096,12 +2097,12 @@
>> // Conservatively assume HasBaseReg is true for now.
>> if (NewOffset < LU.MinOffset) {
>> if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg,
>> - Kind, AccessTy, STTI))
>> + Kind, AccessTy, TLI))
>> return false;
>> NewMinOffset = NewOffset;
>> } else if (NewOffset > LU.MaxOffset) {
>> if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg,
>> - Kind, AccessTy, STTI))
>> + Kind, AccessTy, TLI))
>> return false;
>> NewMaxOffset = NewOffset;
>> }
>> @@ -2130,7 +2131,7 @@
>> int64_t Offset = ExtractImmediate(Expr, SE);
>>
>> // Basic uses can't accept any offset, for example.
>> - if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind,
>> AccessTy, STTI)) {
>> + if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind,
>> AccessTy, TLI)) {
>> Expr = Copy;
>> Offset = 0;
>> }
>> @@ -2395,7 +2396,7 @@
>> /// TODO: Consider IVInc free if it's already used in another
>> chains.
>> static bool
>> isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4>
>> &Users,
>> - ScalarEvolution &SE, const
>> ScalarTargetTransformInfo *STTI) {
>> + ScalarEvolution &SE, const TargetLowering *TLI) {
>> if (StressIVChain)
>> return true;
>>
>> @@ -2653,7 +2654,7 @@
>> for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
>> UsersIdx < NChains; ++UsersIdx) {
>> if (!isProfitableChain(IVChainVec[UsersIdx],
>> - ChainUsersVec[UsersIdx].FarUsers, SE,
>> STTI))
>> + ChainUsersVec[UsersIdx].FarUsers, SE,
>> TLI))
>> continue;
>> // Preserve the chain at UsesIdx.
>> if (ChainIdx != UsersIdx)
>> @@ -2680,8 +2681,7 @@
>>
>> /// Return true if the IVInc can be folded into an addressing mode.
>> static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction
>> *UserInst,
>> - Value *Operand,
>> - const ScalarTargetTransformInfo *STTI)
>> {
>> + Value *Operand, const TargetLowering
>> *TLI) {
>> const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
>> if (!IncConst || !isAddressUse(UserInst, Operand))
>> return false;
>> @@ -2691,7 +2691,7 @@
>>
>> int64_t IncOffset = IncConst->getValue()->getSExtValue();
>> if (!isAlwaysFoldable(IncOffset, /*BaseGV=*/0,
>> /*HaseBaseReg=*/false,
>> - LSRUse::Address, getAccessType(UserInst),
>> STTI))
>> + LSRUse::Address, getAccessType(UserInst),
>> TLI))
>> return false;
>>
>> return true;
>> @@ -2762,7 +2762,7 @@
>>
>> // If an IV increment can't be folded, use it as the next IV
>> value.
>> if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst,
>> IncI->IVOperand,
>> - STTI)) {
>> + TLI)) {
>> assert(IVTy == IVOper->getType() && "inconsistent IV
>> increment type");
>> IVSrc = IVOper;
>> LeftOverExpr = 0;
>> @@ -3108,7 +3108,7 @@
>> // into an immediate field.
>> if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset,
>> Base.getNumRegs() > 1,
>> - LU.Kind, LU.AccessTy, STTI, SE))
>> + LU.Kind, LU.AccessTy, TLI, SE))
>> continue;
>>
>> // Collect all operands except *J.
>> @@ -3122,7 +3122,7 @@
>> if (InnerAddOps.size() == 1 &&
>> isAlwaysFoldable(InnerAddOps[0], LU.MinOffset,
>> LU.MaxOffset,
>> Base.getNumRegs() > 1,
>> - LU.Kind, LU.AccessTy, STTI, SE))
>> + LU.Kind, LU.AccessTy, TLI, SE))
>> continue;
>>
>> const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
>> @@ -3132,9 +3132,9 @@
>>
>> // Add the remaining pieces of the add back into the new
>> formula.
>> const SCEVConstant *InnerSumSC =
>> dyn_cast<SCEVConstant>(InnerSum);
>> - if (STTI && InnerSumSC &&
>> + if (TLI && InnerSumSC &&
>> SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
>> - STTI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
>> + TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
>> InnerSumSC->getValue()->getZExtValue()))
>> {
>> F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
>> InnerSumSC->getValue()->getZExtValue();
>> @@ -3144,8 +3144,8 @@
>>
>> // Add J as its own register, or an unfolded immediate.
>> const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
>> - if (STTI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
>> - STTI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
>> + if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
>> + TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
>> SC->getValue()->getZExtValue()))
>> F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
>> SC->getValue()->getZExtValue();
>> @@ -3205,7 +3205,7 @@
>> Formula F = Base;
>> F.AM.BaseGV = GV;
>> if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
>> - LU.Kind, LU.AccessTy, STTI))
>> + LU.Kind, LU.AccessTy, TLI))
>> continue;
>> F.BaseRegs[i] = G;
>> (void)InsertFormula(LU, LUIdx, F);
>> @@ -3230,7 +3230,7 @@
>> Formula F = Base;
>> F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I;
>> if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I,
>> - LU.Kind, LU.AccessTy, STTI)) {
>> + LU.Kind, LU.AccessTy, TLI)) {
>> // Add the offset to the base register.
>> const SCEV *NewG =
>> SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
>> // If it cancelled out, drop the base register, otherwise
>> update it.
>> @@ -3250,7 +3250,7 @@
>> Formula F = Base;
>> F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm;
>> if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
>> - LU.Kind, LU.AccessTy, STTI))
>> + LU.Kind, LU.AccessTy, TLI))
>> continue;
>> F.BaseRegs[i] = G;
>> (void)InsertFormula(LU, LUIdx, F);
>> @@ -3297,7 +3297,7 @@
>> F.AM.BaseOffs = NewBaseOffs;
>>
>> // Check that this scale is legal.
>> - if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy,
>> STTI))
>> + if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy,
>> TLI))
>> continue;
>>
>> // Compensate for the use having MinOffset built into it.
>> @@ -3353,12 +3353,12 @@
>> Base.AM.HasBaseReg = Base.BaseRegs.size() > 1;
>> // Check whether this scale is going to be legal.
>> if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
>> - LU.Kind, LU.AccessTy, STTI)) {
>> + LU.Kind, LU.AccessTy, TLI)) {
>> // As a special-case, handle special out-of-loop Basic users
>> specially.
>> // TODO: Reconsider this special case.
>> if (LU.Kind == LSRUse::Basic &&
>> isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
>> - LSRUse::Special, LU.AccessTy, STTI) &&
>> + LSRUse::Special, LU.AccessTy, TLI) &&
>> LU.AllFixupsOutsideLoop)
>> LU.Kind = LSRUse::Special;
>> else
>> @@ -3391,8 +3391,8 @@
>>
>> /// GenerateTruncates - Generate reuse formulae from different IV
>> types.
>> void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx,
>> Formula Base) {
>> - // This requires ScalarTargetTransformInfo to tell us which
>> truncates are free.
>> - if (!STTI) return;
>> + // This requires TargetLowering to tell us which truncates are
>> free.
>> + if (!TLI) return;
>>
>> // Don't bother truncating symbolic values.
>> if (Base.AM.BaseGV) return;
>> @@ -3405,7 +3405,7 @@
>> for (SmallSetVector<Type *, 4>::const_iterator
>> I = Types.begin(), E = Types.end(); I != E; ++I) {
>> Type *SrcTy = *I;
>> - if (SrcTy != DstTy && STTI->isTruncateFree(SrcTy, DstTy)) {
>> + if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) {
>> Formula F = Base;
>>
>> if (F.ScaledReg) F.ScaledReg =
>> SE.getAnyExtendExpr(F.ScaledReg, *I);
>> @@ -3561,7 +3561,7 @@
>> Formula NewF = F;
>> NewF.AM.BaseOffs = Offs;
>> if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
>> - LU.Kind, LU.AccessTy, STTI))
>> + LU.Kind, LU.AccessTy, TLI))
>> continue;
>> NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
>>
>> @@ -3586,9 +3586,9 @@
>> Formula NewF = F;
>> NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm;
>> if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
>> - LU.Kind, LU.AccessTy, STTI)) {
>> - if (!STTI ||
>> -
>> !STTI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset
>> + Imm))
>> + LU.Kind, LU.AccessTy, TLI)) {
>> + if (!TLI ||
>> +
>> !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset
>> + Imm))
>> continue;
>> NewF = F;
>> NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset +
>> Imm;
>> @@ -3900,7 +3900,7 @@
>> Formula &F = LUThatHas->Formulae[i];
>> if (!isLegalUse(F.AM,
>> LUThatHas->MinOffset,
>> LUThatHas->MaxOffset,
>> - LUThatHas->Kind,
>> LUThatHas->AccessTy, STTI)) {
>> + LUThatHas->Kind,
>> LUThatHas->AccessTy, TLI)) {
>> DEBUG(dbgs() << " Deleting "; F.print(dbgs());
>> dbgs() << '\n');
>> LUThatHas->DeleteFormula(F);
>> @@ -4589,12 +4589,12 @@
>> Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
>> }
>>
>> -LSRInstance::LSRInstance(const ScalarTargetTransformInfo *stti, Loop
>> *l, Pass *P)
>> +LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass
>> *P)
>> : IU(P->getAnalysis<IVUsers>()),
>> SE(P->getAnalysis<ScalarEvolution>()),
>> DT(P->getAnalysis<DominatorTree>()),
>> LI(P->getAnalysis<LoopInfo>()),
>> - STTI(stti), L(l), Changed(false), IVIncInsertPos(0) {
>> + TLI(tli), L(l), Changed(false), IVIncInsertPos(0) {
>>
>> // If LoopSimplify form is not available, stay out of trouble.
>> if (!L->isLoopSimplifyForm())
>> @@ -4684,7 +4684,7 @@
>> for (SmallVectorImpl<Formula>::const_iterator J =
>> LU.Formulae.begin(),
>> JE = LU.Formulae.end(); J != JE; ++J)
>> assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset,
>> - LU.Kind, LU.AccessTy, STTI) &&
>> + LU.Kind, LU.AccessTy, TLI) &&
>> "Illegal formula generated!");
>> };
>> #endif
>> @@ -4757,13 +4757,13 @@
>> namespace {
>>
>> class LoopStrengthReduce : public LoopPass {
>> - /// ScalarTargetTransformInfo provides target information that is
>> needed
>> - /// for strength reducing loops.
>> - const ScalarTargetTransformInfo *STTI;
>> + /// TLI - Keep a pointer of a TargetLowering to consult for
>> determining
>> + /// transformation profitability.
>> + const TargetLowering *const TLI;
>>
>> public:
>> static char ID; // Pass ID, replacement for typeid
>> - LoopStrengthReduce();
>> + explicit LoopStrengthReduce(const TargetLowering *tli = 0);
>>
>> private:
>> bool runOnLoop(Loop *L, LPPassManager &LPM);
>> @@ -4783,12 +4783,13 @@
>> INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
>> "Loop Strength Reduction", false, false)
>>
>> -Pass *llvm::createLoopStrengthReducePass() {
>> - return new LoopStrengthReduce();
>> +
>> +Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI)
>> {
>> + return new LoopStrengthReduce(TLI);
>> }
>>
>> -LoopStrengthReduce::LoopStrengthReduce()
>> - : LoopPass(ID), STTI(0) {
>> +LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli)
>> + : LoopPass(ID), TLI(tli) {
>> initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
>> }
>>
>> @@ -4814,13 +4815,8 @@
>> bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/)
>> {
>> bool Changed = false;
>>
>> - TargetTransformInfo *TTI =
>> getAnalysisIfAvailable<TargetTransformInfo>();
>> -
>> - if (TTI)
>> - STTI = TTI->getScalarTargetTransformInfo();
>> -
>> // Run the main LSR transformation.
>> - Changed |= LSRInstance(STTI, L, this).getChanged();
>> + Changed |= LSRInstance(TLI, L, this).getChanged();
>>
>> // Remove any extra phis created by processing inner loops.
>> Changed |= DeleteDeadPHIs(L->getHeader());
>> @@ -4831,7 +4827,7 @@
>> Rewriter.setDebugType(DEBUG_TYPE);
>> #endif
>> unsigned numFolded = Rewriter.
>> - replaceCongruentIVs(L, &getAnalysis<DominatorTree>(),
>> DeadInsts, STTI);
>> + replaceCongruentIVs(L, &getAnalysis<DominatorTree>(),
>> DeadInsts, TLI);
>> if (numFolded) {
>> Changed = true;
>> DeleteTriviallyDeadInstructions(DeadInsts);
>>
>> Modified: llvm/trunk/lib/Transforms/Utils/LowerInvoke.cpp
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/LowerInvoke.cpp?rev=166340&r1=166339&r2=166340&view=diff
>> ==============================================================================
>> --- llvm/trunk/lib/Transforms/Utils/LowerInvoke.cpp (original)
>> +++ llvm/trunk/lib/Transforms/Utils/LowerInvoke.cpp Fri Oct 19
>> 16:28:43 2012
>> @@ -45,10 +45,10 @@
>> #include "llvm/Pass.h"
>> #include "llvm/Transforms/Utils/BasicBlockUtils.h"
>> #include "llvm/Transforms/Utils/Local.h"
>> -#include "llvm/TargetTransformInfo.h"
>> #include "llvm/ADT/SmallVector.h"
>> #include "llvm/ADT/Statistic.h"
>> #include "llvm/Support/CommandLine.h"
>> +#include "llvm/Target/TargetLowering.h"
>> #include <csetjmp>
>> #include <set>
>> using namespace llvm;
>> @@ -70,14 +70,15 @@
>> Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn;
>> bool useExpensiveEHSupport;
>>
>> - // We peek in STTI to grab the target's jmp_buf size and
>> alignment
>> - const ScalarTargetTransformInfo *STTI;
>> + // We peek in TLI to grab the target's jmp_buf size and
>> alignment
>> + const TargetLowering *TLI;
>>
>> public:
>> static char ID; // Pass identification, replacement for typeid
>> - explicit LowerInvoke(bool useExpensiveEHSupport =
>> ExpensiveEHSupport)
>> + explicit LowerInvoke(const TargetLowering *tli = NULL,
>> + bool useExpensiveEHSupport =
>> ExpensiveEHSupport)
>> : FunctionPass(ID),
>> useExpensiveEHSupport(useExpensiveEHSupport),
>> - STTI(0) {
>> + TLI(tli) {
>> initializeLowerInvokePass(*PassRegistry::getPassRegistry());
>> }
>> bool doInitialization(Module &M);
>> @@ -107,24 +108,21 @@
>> char &llvm::LowerInvokePassID = LowerInvoke::ID;
>>
>> // Public Interface To the LowerInvoke pass.
>> -FunctionPass *llvm::createLowerInvokePass() {
>> - return new LowerInvoke(ExpensiveEHSupport);
>> +FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI)
>> {
>> + return new LowerInvoke(TLI, ExpensiveEHSupport);
>> }
>> -FunctionPass *llvm::createLowerInvokePass(bool
>> useExpensiveEHSupport) {
>> - return new LowerInvoke(useExpensiveEHSupport);
>> +FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI,
>> + bool
>> useExpensiveEHSupport) {
>> + return new LowerInvoke(TLI, useExpensiveEHSupport);
>> }
>>
>> // doInitialization - Make sure that there is a prototype for abort
>> in the
>> // current module.
>> bool LowerInvoke::doInitialization(Module &M) {
>> - TargetTransformInfo *TTI =
>> getAnalysisIfAvailable<TargetTransformInfo>();
>> - if (TTI)
>> - STTI = TTI->getScalarTargetTransformInfo();
>> -
>> Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
>> if (useExpensiveEHSupport) {
>> // Insert a type for the linked list of jump buffers.
>> - unsigned JBSize = STTI ? STTI->getJumpBufSize() : 0;
>> + unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
>> JBSize = JBSize ? JBSize : 200;
>> Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize);
>>
>> @@ -432,7 +430,7 @@
>> // Create an alloca for the incoming jump buffer ptr and the new
>> jump buffer
>> // that needs to be restored on all exits from the function.
>> This is an
>> // alloca because the value needs to be live across invokes.
>> - unsigned Align = STTI ? STTI->getJumpBufAlignment() : 0;
>> + unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0;
>> AllocaInst *JmpBuf =
>> new AllocaInst(JBLinkTy, 0, Align,
>> "jblink", F.begin()->begin());
>> @@ -577,10 +575,6 @@
>> }
>>
>> bool LowerInvoke::runOnFunction(Function &F) {
>> - TargetTransformInfo *TTI =
>> getAnalysisIfAvailable<TargetTransformInfo>();
>> - if (TTI)
>> - STTI = TTI->getScalarTargetTransformInfo();
>> -
>> if (useExpensiveEHSupport)
>> return insertExpensiveEHSupport(F);
>> else
>>
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>
>
> --
> Hal Finkel
> Postdoctoral Appointee
> Leadership Computing Facility
> Argonne National Laboratory
More information about the llvm-commits
mailing list