[polly] r203607 - Move transformations into own directory
Andreas Simbuerger
simbuerg at fim.uni-passau.de
Tue Mar 11 14:25:59 PDT 2014
Author: simbuerg
Date: Tue Mar 11 16:25:59 2014
New Revision: 203607
URL: http://llvm.org/viewvc/llvm-project?rev=203607&view=rev
Log:
Move transformations into own directory
Move all transformations into their own directory. CMakeLists are
adjusted accordingly.
Added:
polly/trunk/lib/Transform/
polly/trunk/lib/Transform/Canonicalization.cpp
- copied, changed from r203544, polly/trunk/lib/Canonicalization.cpp
polly/trunk/lib/Transform/CodePreparation.cpp
- copied, changed from r203544, polly/trunk/lib/CodePreparation.cpp
polly/trunk/lib/Transform/DeadCodeElimination.cpp
- copied, changed from r203544, polly/trunk/lib/DeadCodeElimination.cpp
polly/trunk/lib/Transform/IndVarSimplify.cpp
- copied, changed from r203544, polly/trunk/lib/IndVarSimplify.cpp
polly/trunk/lib/Transform/IndependentBlocks.cpp
- copied, changed from r203544, polly/trunk/lib/IndependentBlocks.cpp
polly/trunk/lib/Transform/Pluto.cpp
- copied, changed from r203544, polly/trunk/lib/Pluto.cpp
polly/trunk/lib/Transform/Pocc.cpp
- copied, changed from r203544, polly/trunk/lib/Pocc.cpp
polly/trunk/lib/Transform/ScheduleOptimizer.cpp
- copied, changed from r203544, polly/trunk/lib/ScheduleOptimizer.cpp
Removed:
polly/trunk/lib/Canonicalization.cpp
polly/trunk/lib/CodePreparation.cpp
polly/trunk/lib/DeadCodeElimination.cpp
polly/trunk/lib/IndVarSimplify.cpp
polly/trunk/lib/IndependentBlocks.cpp
polly/trunk/lib/Pluto.cpp
polly/trunk/lib/Pocc.cpp
polly/trunk/lib/ScheduleOptimizer.cpp
Modified:
polly/trunk/lib/CMakeLists.txt
Modified: polly/trunk/lib/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/CMakeLists.txt?rev=203607&r1=203606&r2=203607&view=diff
==============================================================================
--- polly/trunk/lib/CMakeLists.txt (original)
+++ polly/trunk/lib/CMakeLists.txt Tue Mar 11 16:25:59 2014
@@ -43,6 +43,7 @@ if (SCOPLIB_FOUND)
endif (SCOPLIB_FOUND)
add_polly_library(LLVMPollyLib
+ RegisterPasses.cpp
Analysis/Dependences.cpp
Analysis/ScopDetection.cpp
Analysis/ScopInfo.cpp
@@ -56,21 +57,20 @@ add_polly_library(LLVMPollyLib
CodeGen/IRBuilder.cpp
CodeGen/Utils.cpp
${GPGPU_CODEGEN_FILES}
+ Exchange/JSONExporter.cpp
+ ${POLLY_SCOPLIB_FILES}
Support/GICHelper.cpp
Support/SCEVValidator.cpp
Support/ScopHelper.cpp
- Exchange/JSONExporter.cpp
${POLLY_JSON_FILES}
${POLLY_OPENSCOP_FILES}
- Canonicalization.cpp
- CodePreparation.cpp
- DeadCodeElimination.cpp
- IndependentBlocks.cpp
- IndVarSimplify.cpp
- Pocc.cpp
- RegisterPasses.cpp
- ScheduleOptimizer.cpp
- ${POLLY_SCOPLIB_FILES}
+ Transform/Canonicalization.cpp
+ Transform/CodePreparation.cpp
+ Transform/DeadCodeElimination.cpp
+ Transform/IndependentBlocks.cpp
+ Transform/IndVarSimplify.cpp
+ Transform/Pocc.cpp
+ Transform/ScheduleOptimizer.cpp
${POLLY_PLUTO_FILES}
)
Removed: polly/trunk/lib/Canonicalization.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Canonicalization.cpp?rev=203606&view=auto
==============================================================================
--- polly/trunk/lib/Canonicalization.cpp (original)
+++ polly/trunk/lib/Canonicalization.cpp (removed)
@@ -1,86 +0,0 @@
-//===---- Canonicalization.cpp - Run canonicalization passes ======-------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Run the set of default canonicalization passes.
-//
-// This pass is mainly used for debugging.
-//
-//===----------------------------------------------------------------------===//
-
-#include "polly/LinkAllPasses.h"
-#include "polly/Canonicalization.h"
-#include "llvm/Transforms/Scalar.h"
-
-using namespace llvm;
-using namespace polly;
-
-void polly::registerCanonicalicationPasses(llvm::PassManagerBase &PM,
- bool SCEVCodegen) {
- PM.add(llvm::createPromoteMemoryToRegisterPass());
- PM.add(llvm::createPromoteMemoryToRegisterPass());
- PM.add(llvm::createInstructionCombiningPass());
- PM.add(llvm::createCFGSimplificationPass());
- PM.add(llvm::createTailCallEliminationPass());
- PM.add(llvm::createCFGSimplificationPass());
- PM.add(llvm::createReassociatePass());
- PM.add(llvm::createLoopRotatePass());
- PM.add(llvm::createInstructionCombiningPass());
-
- if (!SCEVCodegen)
- PM.add(polly::createIndVarSimplifyPass());
-
- PM.add(polly::createCodePreparationPass());
-}
-
-namespace {
-class PollyCanonicalize : public ModulePass {
- PollyCanonicalize(const PollyCanonicalize &) LLVM_DELETED_FUNCTION;
- const PollyCanonicalize &
- operator=(const PollyCanonicalize &) LLVM_DELETED_FUNCTION;
-
-public:
- static char ID;
-
- explicit PollyCanonicalize() : ModulePass(ID) {}
- ~PollyCanonicalize();
-
- /// @name FunctionPass interface.
- //@{
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual void releaseMemory();
- virtual bool runOnModule(Module &M);
- virtual void print(raw_ostream &OS, const Module *) const;
- //@}
-};
-}
-
-PollyCanonicalize::~PollyCanonicalize() {}
-
-void PollyCanonicalize::getAnalysisUsage(AnalysisUsage &AU) const {}
-
-void PollyCanonicalize::releaseMemory() {}
-
-bool PollyCanonicalize::runOnModule(Module &M) {
- PassManager PM;
- registerCanonicalicationPasses(PM);
- PM.run(M);
-
- return true;
-}
-
-void PollyCanonicalize::print(raw_ostream &OS, const Module *) const {}
-
-char PollyCanonicalize::ID = 0;
-
-Pass *polly::createPollyCanonicalizePass() { return new PollyCanonicalize(); }
-
-INITIALIZE_PASS_BEGIN(PollyCanonicalize, "polly-canonicalize",
- "Polly - Run canonicalization passes", false, false)
-INITIALIZE_PASS_END(PollyCanonicalize, "polly-canonicalize",
- "Polly - Run canonicalization passes", false, false)
Removed: polly/trunk/lib/CodePreparation.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/CodePreparation.cpp?rev=203606&view=auto
==============================================================================
--- polly/trunk/lib/CodePreparation.cpp (original)
+++ polly/trunk/lib/CodePreparation.cpp (removed)
@@ -1,182 +0,0 @@
-//===---- CodePreparation.cpp - Code preparation for Scop Detection -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// The Polly code preparation pass is executed before SCoP detection. Its only
-// use is to translate all PHI nodes that can not be expressed by the code
-// generator into explicit memory dependences. Depending of the code generation
-// strategy different PHI nodes are translated:
-//
-// - indvars based code generation:
-//
-// The indvars based code generation requires explicit canonical induction
-// variables. Such variables are generated before scop detection and
-// also before the code preparation pass. All PHI nodes that are not canonical
-// induction variables are not supported by the indvars based code generation
-// and are consequently translated into explict memory accesses.
-//
-// - scev based code generation:
-//
-// The scev based code generation can code generate all PHI nodes that do not
-// reference parameters within the scop. As the code preparation pass is run
-// before scop detection, we can not check this condition, because without
-// a detected scop, we do not know SCEVUnknowns that appear in the SCEV of
-// a PHI node may later be within or outside of the SCoP. Hence, we follow a
-// heuristic and translate all PHI nodes that are either directly SCEVUnknown
-// or SCEVCouldNotCompute. This will hopefully get most of the PHI nodes that
-// are introduced due to conditional control flow, but not the ones that are
-// referencing loop counters.
-//
-// XXX: In the future, we should remove the need for this pass entirely and
-// instead add support for scalar dependences to ScopInfo and code generation.
-//
-//===----------------------------------------------------------------------===//
-
-#include "polly/LinkAllPasses.h"
-#include "polly/CodeGen/BlockGenerators.h"
-#include "polly/Support/ScopHelper.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/RegionInfo.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Transforms/Utils/Local.h"
-
-using namespace llvm;
-using namespace polly;
-
-namespace {
-/// @brief Prepare the IR for the scop detection.
-///
-class CodePreparation : public FunctionPass {
- CodePreparation(const CodePreparation &) LLVM_DELETED_FUNCTION;
- const CodePreparation &
- operator=(const CodePreparation &) LLVM_DELETED_FUNCTION;
-
- LoopInfo *LI;
- ScalarEvolution *SE;
-
- void clear();
-
- bool eliminatePHINodes(Function &F);
-
-public:
- static char ID;
-
- explicit CodePreparation() : FunctionPass(ID) {}
- ~CodePreparation();
-
- /// @name FunctionPass interface.
- //@{
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual void releaseMemory();
- virtual bool runOnFunction(Function &F);
- virtual void print(raw_ostream &OS, const Module *) const;
- //@}
-};
-}
-
-void CodePreparation::clear() {}
-
-CodePreparation::~CodePreparation() { clear(); }
-
-bool CodePreparation::eliminatePHINodes(Function &F) {
- // The PHINodes that will be deleted.
- std::vector<PHINode *> PNtoDelete;
- // The PHINodes that will be preserved.
- std::vector<PHINode *> PreservedPNs;
-
- // Scan the PHINodes in this function.
- for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI)
- for (BasicBlock::iterator II = BI->begin(), IE = BI->getFirstNonPHI();
- II != IE; ++II) {
- PHINode *PN = cast<PHINode>(II);
- if (SCEVCodegen) {
- if (SE->isSCEVable(PN->getType())) {
- const SCEV *S = SE->getSCEV(PN);
- if (!isa<SCEVUnknown>(S) && !isa<SCEVCouldNotCompute>(S)) {
- PreservedPNs.push_back(PN);
- continue;
- }
- }
- } else {
- if (Loop *L = LI->getLoopFor(BI)) {
- // Induction variables will be preserved.
- if (L->getCanonicalInductionVariable() == PN) {
- PreservedPNs.push_back(PN);
- continue;
- }
- }
- }
-
- // As DemotePHIToStack does not support invoke edges, we preserve
- // PHINodes that have invoke edges.
- if (hasInvokeEdge(PN))
- PreservedPNs.push_back(PN);
- else
- PNtoDelete.push_back(PN);
- }
-
- if (PNtoDelete.empty())
- return false;
-
- while (!PNtoDelete.empty()) {
- PHINode *PN = PNtoDelete.back();
- PNtoDelete.pop_back();
-
- DemotePHIToStack(PN);
- }
-
- // Move preserved PHINodes to the beginning of the BasicBlock.
- while (!PreservedPNs.empty()) {
- PHINode *PN = PreservedPNs.back();
- PreservedPNs.pop_back();
-
- BasicBlock *BB = PN->getParent();
- if (PN == BB->begin())
- continue;
-
- PN->moveBefore(BB->begin());
- }
-
- return true;
-}
-
-void CodePreparation::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<LoopInfo>();
- AU.addRequired<ScalarEvolution>();
-
- AU.addPreserved<LoopInfo>();
- AU.addPreserved<RegionInfo>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<DominanceFrontier>();
-}
-
-bool CodePreparation::runOnFunction(Function &F) {
- LI = &getAnalysis<LoopInfo>();
- SE = &getAnalysis<ScalarEvolution>();
-
- splitEntryBlockForAlloca(&F.getEntryBlock(), this);
-
- eliminatePHINodes(F);
-
- return false;
-}
-
-void CodePreparation::releaseMemory() { clear(); }
-
-void CodePreparation::print(raw_ostream &OS, const Module *) const {}
-
-char CodePreparation::ID = 0;
-char &polly::CodePreparationID = CodePreparation::ID;
-
-Pass *polly::createCodePreparationPass() { return new CodePreparation(); }
-
-INITIALIZE_PASS_BEGIN(CodePreparation, "polly-prepare",
- "Polly - Prepare code for polly", false, false)
-INITIALIZE_PASS_DEPENDENCY(LoopInfo)
-INITIALIZE_PASS_END(CodePreparation, "polly-prepare",
- "Polly - Prepare code for polly", false, false)
Removed: polly/trunk/lib/DeadCodeElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/DeadCodeElimination.cpp?rev=203606&view=auto
==============================================================================
--- polly/trunk/lib/DeadCodeElimination.cpp (original)
+++ polly/trunk/lib/DeadCodeElimination.cpp (removed)
@@ -1,157 +0,0 @@
-//===- DeadCodeElimination.cpp - Eliminate dead iteration ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// The polyhedral dead code elimination pass analyses a SCoP to eliminate
-// statement instances that can be proven dead.
-// As a consequence, the code generated for this SCoP may execute a statement
-// less often. This means, a statement may be executed only in certain loop
-// iterations or it may not even be part of the generated code at all.
-//
-// This code:
-//
-// for (i = 0; i < N; i++)
-// arr[i] = 0;
-// for (i = 0; i < N; i++)
-// arr[i] = 10;
-// for (i = 0; i < N; i++)
-// arr[i] = i;
-//
-// is e.g. simplified to:
-//
-// for (i = 0; i < N; i++)
-// arr[i] = i;
-//
-// The idea and the algorithm used was first implemented by Sven Verdoolaege in
-// the 'ppcg' tool.
-//
-//===----------------------------------------------------------------------===//
-
-#include "polly/Dependences.h"
-#include "polly/LinkAllPasses.h"
-#include "polly/ScopInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "isl/set.h"
-#include "isl/map.h"
-#include "isl/union_map.h"
-
-using namespace llvm;
-using namespace polly;
-
-namespace {
-
-cl::opt<int> DCEPreciseSteps(
- "polly-dce-precise-steps",
- cl::desc("The number of precise steps between two approximating "
- "iterations. (A value of -1 schedules another approximation stage "
- "before the actual dead code elimination."),
- cl::init(-1));
-
-class DeadCodeElim : public ScopPass {
-
-public:
- static char ID;
- explicit DeadCodeElim() : ScopPass(ID) {}
-
- virtual bool runOnScop(Scop &S);
-
- void printScop(llvm::raw_ostream &OS) const;
- void getAnalysisUsage(AnalysisUsage &AU) const;
-
-private:
- isl_union_set *getLastWrites(isl_union_map *Writes, isl_union_map *Schedule);
- bool eliminateDeadCode(Scop &S, int PreciseSteps);
-};
-}
-
-char DeadCodeElim::ID = 0;
-
-/// Return the set of iterations that contains the last write for each location.
-isl_union_set *DeadCodeElim::getLastWrites(__isl_take isl_union_map *Writes,
- __isl_take isl_union_map *Schedule) {
- isl_union_map *WriteIterations = isl_union_map_reverse(Writes);
- isl_union_map *WriteTimes =
- isl_union_map_apply_range(WriteIterations, isl_union_map_copy(Schedule));
-
- isl_union_map *LastWriteTimes = isl_union_map_lexmax(WriteTimes);
- isl_union_map *LastWriteIterations = isl_union_map_apply_range(
- LastWriteTimes, isl_union_map_reverse(Schedule));
-
- isl_union_set *Live = isl_union_map_range(LastWriteIterations);
- return isl_union_set_coalesce(Live);
-}
-
-/// Performs polyhedral dead iteration elimination by:
-/// o Assuming that the last write to each location is live.
-/// o Following each RAW dependency from a live iteration backwards and adding
-/// that iteration to the live set.
-///
-/// To ensure the set of live iterations does not get too complex we always
-/// combine a certain number of precise steps with one approximating step that
-/// simplifies the life set with an affine hull.
-bool DeadCodeElim::eliminateDeadCode(Scop &S, int PreciseSteps) {
- Dependences *D = &getAnalysis<Dependences>();
-
- if (!D->hasValidDependences())
- return false;
-
- isl_union_set *Live = this->getLastWrites(S.getWrites(), S.getSchedule());
- isl_union_map *Dep = D->getDependences(Dependences::TYPE_RAW);
- Dep = isl_union_map_reverse(Dep);
-
- if (PreciseSteps == -1)
- Live = isl_union_set_affine_hull(Live);
-
- isl_union_set *OriginalDomain = S.getDomains();
- int Steps = 0;
- while (true) {
- isl_union_set *Extra;
- Steps++;
-
- Extra =
- isl_union_set_apply(isl_union_set_copy(Live), isl_union_map_copy(Dep));
-
- if (isl_union_set_is_subset(Extra, Live)) {
- isl_union_set_free(Extra);
- break;
- }
-
- Live = isl_union_set_union(Live, Extra);
-
- if (Steps > PreciseSteps) {
- Steps = 0;
- Live = isl_union_set_affine_hull(Live);
- }
-
- Live = isl_union_set_intersect(Live, isl_union_set_copy(OriginalDomain));
- }
- isl_union_map_free(Dep);
- isl_union_set_free(OriginalDomain);
-
- return S.restrictDomains(isl_union_set_coalesce(Live));
-}
-
-bool DeadCodeElim::runOnScop(Scop &S) {
- return eliminateDeadCode(S, DCEPreciseSteps);
-}
-
-void DeadCodeElim::printScop(raw_ostream &OS) const {}
-
-void DeadCodeElim::getAnalysisUsage(AnalysisUsage &AU) const {
- ScopPass::getAnalysisUsage(AU);
- AU.addRequired<Dependences>();
-}
-
-Pass *polly::createDeadCodeElimPass() { return new DeadCodeElim(); }
-
-INITIALIZE_PASS_BEGIN(DeadCodeElim, "polly-dce",
- "Polly - Remove dead iterations", false, false)
-INITIALIZE_PASS_DEPENDENCY(Dependences)
-INITIALIZE_PASS_DEPENDENCY(ScopInfo)
-INITIALIZE_PASS_END(DeadCodeElim, "polly-dce", "Polly - Remove dead iterations",
- false, false)
Removed: polly/trunk/lib/IndVarSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/IndVarSimplify.cpp?rev=203606&view=auto
==============================================================================
--- polly/trunk/lib/IndVarSimplify.cpp (original)
+++ polly/trunk/lib/IndVarSimplify.cpp (removed)
@@ -1,2009 +0,0 @@
-//===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This transformation analyzes and transforms the induction variables (and
-// computations derived from them) into simpler forms suitable for subsequent
-// analysis and transformation.
-//
-// If the trip count of a loop is computable, this pass also makes the following
-// changes:
-// 1. The exit condition for the loop is canonicalized to compare the
-// induction value against the exit value. This turns loops like:
-// 'for (i = 7; i*i < 1000; ++i)' into 'for (i = 0; i != 25; ++i)'
-// 2. Any use outside of the loop of an expression derived from the indvar
-// is changed to compute the derived value outside of the loop, eliminating
-// the dependence on the exit value of the induction variable. If the only
-// purpose of the loop is to compute the exit value of some derived
-// expression, this transformation will make the loop dead.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "indvars"
-
-#include "polly/LinkAllPasses.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/IVUsers.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/ScalarEvolutionExpander.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CFG.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Type.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/SimplifyIndVar.h"
-using namespace llvm;
-
-STATISTIC(NumRemoved, "Number of aux indvars removed");
-STATISTIC(NumWidened, "Number of indvars widened");
-STATISTIC(NumInserted, "Number of canonical indvars added");
-STATISTIC(NumReplaced, "Number of exit values replaced");
-STATISTIC(NumLFTR, "Number of loop exit tests replaced");
-STATISTIC(NumElimExt, "Number of IV sign/zero extends eliminated");
-STATISTIC(NumElimIV, "Number of congruent IVs eliminated");
-
-static const bool EnableIVRewrite = true;
-static const bool VerifyIndvars = false;
-
-namespace {
-class PollyIndVarSimplify : public LoopPass {
- IVUsers *IU;
- LoopInfo *LI;
- ScalarEvolution *SE;
- DominatorTree *DT;
- const DataLayout *TD;
-
- SmallVector<WeakVH, 16> DeadInsts;
- bool Changed;
-
-public:
- static char ID; // Pass identification, replacement for typeid
- PollyIndVarSimplify()
- : LoopPass(ID), IU(0), LI(0), SE(0), DT(0), TD(0), Changed(false) {
- initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
- }
-
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<LoopInfo>();
- AU.addRequired<ScalarEvolution>();
- AU.addRequiredID(LoopSimplifyID);
- AU.addRequiredID(LCSSAID);
- if (EnableIVRewrite)
- AU.addRequired<IVUsers>();
- AU.addPreserved<ScalarEvolution>();
- AU.addPreservedID(LoopSimplifyID);
- AU.addPreservedID(LCSSAID);
- if (EnableIVRewrite)
- AU.addPreserved<IVUsers>();
- AU.setPreservesCFG();
- }
-
-private:
- virtual void releaseMemory() { DeadInsts.clear(); }
-
- bool isValidRewrite(Value *FromVal, Value *ToVal);
-
- void HandleFloatingPointIV(Loop *L, PHINode *PH);
- void RewriteNonIntegerIVs(Loop *L);
-
- void SimplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LPPassManager &LPM);
-
- void RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter);
-
- void RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter);
-
- Value *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount,
- PHINode *IndVar, SCEVExpander &Rewriter);
-
- void SinkUnusedInvariants(Loop *L);
-};
-}
-
-char PollyIndVarSimplify::ID = 0;
-Pass *polly::createIndVarSimplifyPass() { return new PollyIndVarSimplify(); }
-
-/// isValidRewrite - Return true if the SCEV expansion generated by the
-/// rewriter can replace the original value. SCEV guarantees that it
-/// produces the same value, but the way it is produced may be illegal IR.
-/// Ideally, this function will only be called for verification.
-bool PollyIndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) {
- // If an SCEV expression subsumed multiple pointers, its expansion could
- // reassociate the GEP changing the base pointer. This is illegal because the
- // final address produced by a GEP chain must be inbounds relative to its
- // underlying object. Otherwise basic alias analysis, among other things,
- // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid
- // producing an expression involving multiple pointers. Until then, we must
- // bail out here.
- //
- // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject
- // because it understands lcssa phis while SCEV does not.
- Value *FromPtr = FromVal;
- Value *ToPtr = ToVal;
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(FromVal)) {
- FromPtr = GEP->getPointerOperand();
- }
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(ToVal)) {
- ToPtr = GEP->getPointerOperand();
- }
- if (FromPtr != FromVal || ToPtr != ToVal) {
- // Quickly check the common case
- if (FromPtr == ToPtr)
- return true;
-
- // SCEV may have rewritten an expression that produces the GEP's pointer
- // operand. That's ok as long as the pointer operand has the same base
- // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the
- // base of a recurrence. This handles the case in which SCEV expansion
- // converts a pointer type recurrence into a nonrecurrent pointer base
- // indexed by an integer recurrence.
-
- // If the GEP base pointer is a vector of pointers, abort.
- if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy())
- return false;
-
- const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr));
- const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr));
- if (FromBase == ToBase)
- return true;
-
- DEBUG(dbgs() << "INDVARS: GEP rewrite bail out " << *FromBase
- << " != " << *ToBase << "\n");
-
- return false;
- }
- return true;
-}
-
-/// Determine the insertion point for this user. By default, insert immediately
-/// before the user. SCEVExpander or LICM will hoist loop invariants out of the
-/// loop. For PHI nodes, there may be multiple uses, so compute the nearest
-/// common dominator for the incoming blocks.
-static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
- DominatorTree *DT) {
- PHINode *PHI = dyn_cast<PHINode>(User);
- if (!PHI)
- return User;
-
- Instruction *InsertPt = 0;
- for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
- if (PHI->getIncomingValue(i) != Def)
- continue;
-
- BasicBlock *InsertBB = PHI->getIncomingBlock(i);
- if (!InsertPt) {
- InsertPt = InsertBB->getTerminator();
- continue;
- }
- InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
- InsertPt = InsertBB->getTerminator();
- }
- assert(InsertPt && "Missing phi operand");
- assert((!isa<Instruction>(Def) ||
- DT->dominates(cast<Instruction>(Def), InsertPt)) &&
- "def does not dominate all uses");
- return InsertPt;
-}
-
-//===----------------------------------------------------------------------===//
-// RewriteNonIntegerIVs and helpers. Prefer integer IVs.
-//===----------------------------------------------------------------------===//
-
-/// ConvertToSInt - Convert APF to an integer, if possible.
-static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) {
- bool isExact = false;
- if (&APF.getSemantics() == &APFloat::PPCDoubleDouble)
- return false;
- // See if we can convert this to an int64_t
- uint64_t UIntVal;
- if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero,
- &isExact) != APFloat::opOK ||
- !isExact)
- return false;
- IntVal = UIntVal;
- return true;
-}
-
-/// HandleFloatingPointIV - If the loop has floating induction variable
-/// then insert corresponding integer induction variable if possible.
-/// For example,
-/// for(double i = 0; i < 10000; ++i)
-/// bar(i)
-/// is converted into
-/// for(int i = 0; i < 10000; ++i)
-/// bar((double)i);
-///
-void PollyIndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
- unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
- unsigned BackEdge = IncomingEdge ^ 1;
-
- // Check incoming value.
- ConstantFP *InitValueVal =
- dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge));
-
- int64_t InitValue;
- if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue))
- return;
-
- // Check IV increment. Reject this PN if increment operation is not
- // an add or increment value can not be represented by an integer.
- BinaryOperator *Incr =
- dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge));
- if (Incr == 0 || Incr->getOpcode() != Instruction::FAdd)
- return;
-
- // If this is not an add of the PHI with a constantfp, or if the constant fp
- // is not an integer, bail out.
- ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1));
- int64_t IncValue;
- if (IncValueVal == 0 || Incr->getOperand(0) != PN ||
- !ConvertToSInt(IncValueVal->getValueAPF(), IncValue))
- return;
-
- // Check Incr uses. One user is PN and the other user is an exit condition
- // used by the conditional terminator.
- Value::user_iterator IncrUse = Incr->user_begin();
- Instruction *U1 = cast<Instruction>(*IncrUse++);
- if (IncrUse == Incr->user_end())
- return;
- Instruction *U2 = cast<Instruction>(*IncrUse++);
- if (IncrUse != Incr->user_end())
- return;
-
- // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't
- // only used by a branch, we can't transform it.
- FCmpInst *Compare = dyn_cast<FCmpInst>(U1);
- if (!Compare)
- Compare = dyn_cast<FCmpInst>(U2);
- if (Compare == 0 || !Compare->hasOneUse() ||
- !isa<BranchInst>(Compare->user_back()))
- return;
-
- BranchInst *TheBr = cast<BranchInst>(Compare->user_back());
-
- // We need to verify that the branch actually controls the iteration count
- // of the loop. If not, the new IV can overflow and no one will notice.
- // The branch block must be in the loop and one of the successors must be out
- // of the loop.
- assert(TheBr->isConditional() && "Can't use fcmp if not conditional");
- if (!L->contains(TheBr->getParent()) ||
- (L->contains(TheBr->getSuccessor(0)) &&
- L->contains(TheBr->getSuccessor(1))))
- return;
-
- // If it isn't a comparison with an integer-as-fp (the exit value), we can't
- // transform it.
- ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1));
- int64_t ExitValue;
- if (ExitValueVal == 0 ||
- !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue))
- return;
-
- // Find new predicate for integer comparison.
- CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE;
- switch (Compare->getPredicate()) {
- default:
- return; // Unknown comparison.
- case CmpInst::FCMP_OEQ:
- case CmpInst::FCMP_UEQ:
- NewPred = CmpInst::ICMP_EQ;
- break;
- case CmpInst::FCMP_ONE:
- case CmpInst::FCMP_UNE:
- NewPred = CmpInst::ICMP_NE;
- break;
- case CmpInst::FCMP_OGT:
- case CmpInst::FCMP_UGT:
- NewPred = CmpInst::ICMP_SGT;
- break;
- case CmpInst::FCMP_OGE:
- case CmpInst::FCMP_UGE:
- NewPred = CmpInst::ICMP_SGE;
- break;
- case CmpInst::FCMP_OLT:
- case CmpInst::FCMP_ULT:
- NewPred = CmpInst::ICMP_SLT;
- break;
- case CmpInst::FCMP_OLE:
- case CmpInst::FCMP_ULE:
- NewPred = CmpInst::ICMP_SLE;
- break;
- }
-
- // We convert the floating point induction variable to a signed i32 value if
- // we can. This is only safe if the comparison will not overflow in a way
- // that won't be trapped by the integer equivalent operations. Check for this
- // now.
- // TODO: We could use i64 if it is native and the range requires it.
-
- // The start/stride/exit values must all fit in signed i32.
- if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue))
- return;
-
- // If not actually striding (add x, 0.0), avoid touching the code.
- if (IncValue == 0)
- return;
-
- // Positive and negative strides have different safety conditions.
- if (IncValue > 0) {
- // If we have a positive stride, we require the init to be less than the
- // exit value.
- if (InitValue >= ExitValue)
- return;
-
- uint32_t Range = uint32_t(ExitValue - InitValue);
- // Check for infinite loop, either:
- // while (i <= Exit) or until (i > Exit)
- if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) {
- if (++Range == 0)
- return; // Range overflows.
- }
-
- unsigned Leftover = Range % uint32_t(IncValue);
-
- // If this is an equality comparison, we require that the strided value
- // exactly land on the exit value, otherwise the IV condition will wrap
- // around and do things the fp IV wouldn't.
- if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
- Leftover != 0)
- return;
-
- // If the stride would wrap around the i32 before exiting, we can't
- // transform the IV.
- if (Leftover != 0 && int32_t(ExitValue + IncValue) < ExitValue)
- return;
-
- } else {
- // If we have a negative stride, we require the init to be greater than the
- // exit value.
- if (InitValue <= ExitValue)
- return;
-
- uint32_t Range = uint32_t(InitValue - ExitValue);
- // Check for infinite loop, either:
- // while (i >= Exit) or until (i < Exit)
- if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) {
- if (++Range == 0)
- return; // Range overflows.
- }
-
- unsigned Leftover = Range % uint32_t(-IncValue);
-
- // If this is an equality comparison, we require that the strided value
- // exactly land on the exit value, otherwise the IV condition will wrap
- // around and do things the fp IV wouldn't.
- if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
- Leftover != 0)
- return;
-
- // If the stride would wrap around the i32 before exiting, we can't
- // transform the IV.
- if (Leftover != 0 && int32_t(ExitValue + IncValue) > ExitValue)
- return;
- }
-
- IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext());
-
- // Insert new integer induction variable.
- PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName() + ".int", PN);
- NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue),
- PN->getIncomingBlock(IncomingEdge));
-
- Value *NewAdd =
- BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue),
- Incr->getName() + ".int", Incr);
- NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge));
-
- ICmpInst *NewCompare =
- new ICmpInst(TheBr, NewPred, NewAdd, ConstantInt::get(Int32Ty, ExitValue),
- Compare->getName());
-
- // In the following deletions, PN may become dead and may be deleted.
- // Use a WeakVH to observe whether this happens.
- WeakVH WeakPH = PN;
-
- // Delete the old floating point exit comparison. The branch starts using the
- // new comparison.
- NewCompare->takeName(Compare);
- Compare->replaceAllUsesWith(NewCompare);
- RecursivelyDeleteTriviallyDeadInstructions(Compare);
-
- // Delete the old floating point increment.
- Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
- RecursivelyDeleteTriviallyDeadInstructions(Incr);
-
- // If the FP induction variable still has uses, this is because something else
- // in the loop uses its value. In order to canonicalize the induction
- // variable, we chose to eliminate the IV and rewrite it in terms of an
- // int->fp cast.
- //
- // We give preference to sitofp over uitofp because it is faster on most
- // platforms.
- if (WeakPH) {
- Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv",
- PN->getParent()->getFirstInsertionPt());
- PN->replaceAllUsesWith(Conv);
- RecursivelyDeleteTriviallyDeadInstructions(PN);
- }
-
- // Add a new IVUsers entry for the newly-created integer PHI.
- if (IU)
- IU->AddUsersIfInteresting(NewPHI);
-
- Changed = true;
-}
-
-void PollyIndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
- // First step. Check to see if there are any floating-point recurrences.
- // If there are, change them into integer recurrences, permitting analysis by
- // the SCEV routines.
- //
- BasicBlock *Header = L->getHeader();
-
- SmallVector<WeakVH, 8> PHIs;
- for (BasicBlock::iterator I = Header->begin();
- PHINode *PN = dyn_cast<PHINode>(I); ++I)
- PHIs.push_back(PN);
-
- for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
- if (PHINode *PN = dyn_cast_or_null<PHINode>(&*PHIs[i]))
- HandleFloatingPointIV(L, PN);
-
- // If the loop previously had floating-point IV, ScalarEvolution
- // may not have been able to compute a trip count. Now that we've done some
- // re-writing, the trip count may be computable.
- if (Changed)
- SE->forgetLoop(L);
-}
-
-//===----------------------------------------------------------------------===//
-// RewriteLoopExitValues - Optimize IV users outside the loop.
-// As a side effect, reduces the amount of IV processing within the loop.
-//===----------------------------------------------------------------------===//
-
-/// RewriteLoopExitValues - Check to see if this loop has a computable
-/// loop-invariant execution count. If so, this means that we can compute the
-/// final value of any expressions that are recurrent in the loop, and
-/// substitute the exit values from the loop into any instructions outside of
-/// the loop that use the final values of the current expressions.
-///
-/// This is mostly redundant with the regular IndVarSimplify activities that
-/// happen later, except that it's more powerful in some cases, because it's
-/// able to brute-force evaluate arbitrary instructions as long as they have
-/// constant operands at the beginning of the loop.
-void PollyIndVarSimplify::RewriteLoopExitValues(Loop *L,
- SCEVExpander &Rewriter) {
- // Verify the input to the pass in already in LCSSA form.
- assert(L->isLCSSAForm(*DT));
-
- SmallVector<BasicBlock *, 8> ExitBlocks;
- L->getUniqueExitBlocks(ExitBlocks);
-
- // Find all values that are computed inside the loop, but used outside of it.
- // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan
- // the exit blocks of the loop to find them.
- for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
- BasicBlock *ExitBB = ExitBlocks[i];
-
- // If there are no PHI nodes in this exit block, then no values defined
- // inside the loop are used on this path, skip it.
- PHINode *PN = dyn_cast<PHINode>(ExitBB->begin());
- if (!PN)
- continue;
-
- unsigned NumPreds = PN->getNumIncomingValues();
-
- // Iterate over all of the PHI nodes.
- BasicBlock::iterator BBI = ExitBB->begin();
- while ((PN = dyn_cast<PHINode>(BBI++))) {
- if (PN->use_empty())
- continue; // dead use, don't replace it
-
- // SCEV only supports integer expressions for now.
- if (!PN->getType()->isIntegerTy() && !PN->getType()->isPointerTy())
- continue;
-
- // It's necessary to tell ScalarEvolution about this explicitly so that
- // it can walk the def-use list and forget all SCEVs, as it may not be
- // watching the PHI itself. Once the new exit value is in place, there
- // may not be a def-use connection between the loop and every instruction
- // which got a SCEVAddRecExpr for that loop.
- SE->forgetValue(PN);
-
- // Iterate over all of the values in all the PHI nodes.
- for (unsigned i = 0; i != NumPreds; ++i) {
- // If the value being merged in is not integer or is not defined
- // in the loop, skip it.
- Value *InVal = PN->getIncomingValue(i);
- if (!isa<Instruction>(InVal))
- continue;
-
- // If this pred is for a subloop, not L itself, skip it.
- if (LI->getLoopFor(PN->getIncomingBlock(i)) != L)
- continue; // The Block is in a subloop, skip it.
-
- // Check that InVal is defined in the loop.
- Instruction *Inst = cast<Instruction>(InVal);
- if (!L->contains(Inst))
- continue;
-
- // Okay, this instruction has a user outside of the current loop
- // and varies predictably *inside* the loop. Evaluate the value it
- // contains when the loop exits, if possible.
- const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop());
- if (!SE->isLoopInvariant(ExitValue, L))
- continue;
-
- Value *ExitVal = Rewriter.expandCodeFor(ExitValue, PN->getType(), Inst);
-
- DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n'
- << " LoopVal = " << *Inst << "\n");
-
- if (!isValidRewrite(Inst, ExitVal)) {
- DeadInsts.push_back(ExitVal);
- continue;
- }
- Changed = true;
- ++NumReplaced;
-
- PN->setIncomingValue(i, ExitVal);
-
- // If this instruction is dead now, delete it.
- RecursivelyDeleteTriviallyDeadInstructions(Inst);
-
- if (NumPreds == 1) {
- // Completely replace a single-pred PHI. This is safe, because the
- // NewVal won't be variant in the loop, so we don't need an LCSSA phi
- // node anymore.
- PN->replaceAllUsesWith(ExitVal);
- RecursivelyDeleteTriviallyDeadInstructions(PN);
- }
- }
- if (NumPreds != 1) {
- // Clone the PHI and delete the original one. This lets IVUsers and
- // any other maps purge the original user from their records.
- PHINode *NewPN = cast<PHINode>(PN->clone());
- NewPN->takeName(PN);
- NewPN->insertBefore(PN);
- PN->replaceAllUsesWith(NewPN);
- PN->eraseFromParent();
- }
- }
- }
-
- // The insertion point instruction may have been deleted; clear it out
- // so that the rewriter doesn't trip over it later.
- Rewriter.clearInsertPoint();
-}
-
-//===----------------------------------------------------------------------===//
-// Rewrite IV users based on a canonical IV.
-// Only for use with -enable-iv-rewrite.
-//===----------------------------------------------------------------------===//
-
-/// FIXME: It is an extremely bad idea to indvar substitute anything more
-/// complex than affine induction variables. Doing so will put expensive
-/// polynomial evaluations inside of the loop, and the str reduction pass
-/// currently can only reduce affine polynomials. For now just disable
-/// indvar subst on anything more complex than an affine addrec, unless
-/// it can be expanded to a trivial value.
-static bool isSafe(const SCEV *S, const Loop *L, ScalarEvolution *SE) {
- // Loop-invariant values are safe.
- if (SE->isLoopInvariant(S, L))
- return true;
-
- // Affine addrecs are safe. Non-affine are not, because LSR doesn't know how
- // to transform them into efficient code.
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
- return AR->isAffine();
-
- // An add is safe it all its operands are safe.
- if (const SCEVCommutativeExpr *Commutative =
- dyn_cast<SCEVCommutativeExpr>(S)) {
- for (SCEVCommutativeExpr::op_iterator I = Commutative->op_begin(),
- E = Commutative->op_end();
- I != E; ++I)
- if (!isSafe(*I, L, SE))
- return false;
- return true;
- }
-
- // A cast is safe if its operand is.
- if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
- return isSafe(C->getOperand(), L, SE);
-
- // A udiv is safe if its operands are.
- if (const SCEVUDivExpr *UD = dyn_cast<SCEVUDivExpr>(S))
- return isSafe(UD->getLHS(), L, SE) && isSafe(UD->getRHS(), L, SE);
-
- // SCEVUnknown is always safe.
- if (isa<SCEVUnknown>(S))
- return true;
-
- // Nothing else is safe.
- return false;
-}
-
-void PollyIndVarSimplify::RewriteIVExpressions(Loop *L,
- SCEVExpander &Rewriter) {
- // Rewrite all induction variable expressions in terms of the canonical
- // induction variable.
- //
- // If there were induction variables of other sizes or offsets, manually
- // add the offsets to the primary induction variable and cast, avoiding
- // the need for the code evaluation methods to insert induction variables
- // of different sizes.
- for (IVUsers::iterator UI = IU->begin(), E = IU->end(); UI != E; ++UI) {
- Value *Op = UI->getOperandValToReplace();
- Type *UseTy = Op->getType();
- Instruction *User = UI->getUser();
-
- // Compute the final addrec to expand into code.
- const SCEV *AR = IU->getReplacementExpr(*UI);
-
- // Evaluate the expression out of the loop, if possible.
- if (!L->contains(UI->getUser())) {
- const SCEV *ExitVal = SE->getSCEVAtScope(AR, L->getParentLoop());
- if (SE->isLoopInvariant(ExitVal, L))
- AR = ExitVal;
- }
-
- // FIXME: It is an extremely bad idea to indvar substitute anything more
- // complex than affine induction variables. Doing so will put expensive
- // polynomial evaluations inside of the loop, and the str reduction pass
- // currently can only reduce affine polynomials. For now just disable
- // indvar subst on anything more complex than an affine addrec, unless
- // it can be expanded to a trivial value.
- if (!isSafe(AR, L, SE))
- continue;
-
- // Determine the insertion point for this user. By default, insert
- // immediately before the user. The SCEVExpander class will automatically
- // hoist loop invariants out of the loop. For PHI nodes, there may be
- // multiple uses, so compute the nearest common dominator for the
- // incoming blocks.
- Instruction *InsertPt = getInsertPointForUses(User, Op, DT);
-
- // Now expand it into actual Instructions and patch it into place.
- Value *NewVal = Rewriter.expandCodeFor(AR, UseTy, InsertPt);
-
- DEBUG(dbgs() << "INDVARS: Rewrote IV '" << *AR << "' " << *Op << '\n'
- << " into = " << *NewVal << "\n");
-
- if (!isValidRewrite(Op, NewVal)) {
- DeadInsts.push_back(NewVal);
- continue;
- }
- // Inform ScalarEvolution that this value is changing. The change doesn't
- // affect its value, but it does potentially affect which use lists the
- // value will be on after the replacement, which affects ScalarEvolution's
- // ability to walk use lists and drop dangling pointers when a value is
- // deleted.
- SE->forgetValue(User);
-
- // Patch the new value into place.
- if (Op->hasName())
- NewVal->takeName(Op);
- if (Instruction *NewValI = dyn_cast<Instruction>(NewVal))
- NewValI->setDebugLoc(User->getDebugLoc());
- User->replaceUsesOfWith(Op, NewVal);
- UI->setOperandValToReplace(NewVal);
-
- ++NumRemoved;
- Changed = true;
-
- // The old value may be dead now.
- DeadInsts.push_back(Op);
- }
-}
-
-//===----------------------------------------------------------------------===//
-// IV Widening - Extend the width of an IV to cover its widest uses.
-//===----------------------------------------------------------------------===//
-
-namespace {
-// Collect information about induction variables that are used by sign/zero
-// extend operations. This information is recorded by CollectExtend and
-// provides the input to WidenIV.
-struct WideIVInfo {
- PHINode *NarrowIV;
- Type *WidestNativeType; // Widest integer type created [sz]ext
- bool IsSigned; // Was an sext user seen before a zext?
-
- WideIVInfo() : NarrowIV(0), WidestNativeType(0), IsSigned(false) {}
-};
-
-class WideIVVisitor : public IVVisitor {
- ScalarEvolution *SE;
- const DataLayout *TD;
-
-public:
- WideIVInfo WI;
-
- WideIVVisitor(PHINode *NarrowIV, ScalarEvolution *SCEV,
- const DataLayout *TData)
- : SE(SCEV), TD(TData) {
- WI.NarrowIV = NarrowIV;
- }
-
- // Implement the interface used by simplifyUsersOfIV.
- virtual void visitCast(CastInst *Cast);
-};
-}
-
-/// visitCast - Update information about the induction variable that is
-/// extended by this sign or zero extend operation. This is used to determine
-/// the final width of the IV before actually widening it.
-void WideIVVisitor::visitCast(CastInst *Cast) {
- bool IsSigned = Cast->getOpcode() == Instruction::SExt;
- if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)
- return;
-
- Type *Ty = Cast->getType();
- uint64_t Width = SE->getTypeSizeInBits(Ty);
- if (TD && !TD->isLegalInteger(Width))
- return;
-
- if (!WI.WidestNativeType) {
- WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
- WI.IsSigned = IsSigned;
- return;
- }
-
- // We extend the IV to satisfy the sign of its first user, arbitrarily.
- if (WI.IsSigned != IsSigned)
- return;
-
- if (Width > SE->getTypeSizeInBits(WI.WidestNativeType))
- WI.WidestNativeType = SE->getEffectiveSCEVType(Ty);
-}
-
-namespace {
-
-/// NarrowIVDefUse - Record a link in the Narrow IV def-use chain along with the
-/// WideIV that computes the same value as the Narrow IV def. This avoids
-/// caching Use* pointers.
-struct NarrowIVDefUse {
- Instruction *NarrowDef;
- Instruction *NarrowUse;
- Instruction *WideDef;
-
- NarrowIVDefUse() : NarrowDef(0), NarrowUse(0), WideDef(0) {}
-
- NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD)
- : NarrowDef(ND), NarrowUse(NU), WideDef(WD) {}
-};
-
-/// WidenIV - The goal of this transform is to remove sign and zero extends
-/// without creating any new induction variables. To do this, it creates a new
-/// phi of the wider type and redirects all users, either removing extends or
-/// inserting truncs whenever we stop propagating the type.
-///
-class WidenIV {
- // Parameters
- PHINode *OrigPhi;
- Type *WideType;
- bool IsSigned;
-
- // Context
- LoopInfo *LI;
- Loop *L;
- ScalarEvolution *SE;
- DominatorTree *DT;
-
- // Result
- PHINode *WidePhi;
- Instruction *WideInc;
- const SCEV *WideIncExpr;
- SmallVectorImpl<WeakVH> &DeadInsts;
-
- SmallPtrSet<Instruction *, 16> Widened;
- SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
-
-public:
- WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
- DominatorTree *DTree, SmallVectorImpl<WeakVH> &DI)
- : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType),
- IsSigned(WI.IsSigned), LI(LInfo),
- L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree), WidePhi(0),
- WideInc(0), WideIncExpr(0), DeadInsts(DI) {
- assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
- }
-
- PHINode *CreateWideIV(SCEVExpander &Rewriter);
-
-protected:
- Value *getExtend(Value *NarrowOper, Type *WideType, bool IsSigned,
- Instruction *Use);
-
- Instruction *CloneIVUser(NarrowIVDefUse DU);
-
- const SCEVAddRecExpr *GetWideRecurrence(Instruction *NarrowUse);
-
- const SCEVAddRecExpr *GetExtendedOperandRecurrence(NarrowIVDefUse DU);
-
- Instruction *WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter);
-
- void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
-};
-} // anonymous namespace
-
-/// isLoopInvariant - Perform a quick domtree based check for loop invariance
-/// assuming that V is used within the loop. LoopInfo::isLoopInvariant() seems
-/// gratuitous for this purpose.
-static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT) {
- Instruction *Inst = dyn_cast<Instruction>(V);
- if (!Inst)
- return true;
-
- return DT->properlyDominates(Inst->getParent(), L->getHeader());
-}
-
-Value *WidenIV::getExtend(Value *NarrowOper, Type *WideType, bool IsSigned,
- Instruction *Use) {
- // Set the debug location and conservative insertion point.
- IRBuilder<> Builder(Use);
- // Hoist the insertion point into loop preheaders as far as possible.
- for (const Loop *L = LI->getLoopFor(Use->getParent());
- L && L->getLoopPreheader() && isLoopInvariant(NarrowOper, L, DT);
- L = L->getParentLoop())
- Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
-
- return IsSigned ? Builder.CreateSExt(NarrowOper, WideType)
- : Builder.CreateZExt(NarrowOper, WideType);
-}
-
-/// CloneIVUser - Instantiate a wide operation to replace a narrow
-/// operation. This only needs to handle operations that can evaluation to
-/// SCEVAddRec. It can safely return 0 for any operation we decide not to clone.
-Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) {
- unsigned Opcode = DU.NarrowUse->getOpcode();
- switch (Opcode) {
- default:
- return 0;
- case Instruction::Add:
- case Instruction::Mul:
- case Instruction::UDiv:
- case Instruction::Sub:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- DEBUG(dbgs() << "Cloning IVUser: " << *DU.NarrowUse << "\n");
-
- // Replace NarrowDef operands with WideDef. Otherwise, we don't know
- // anything about the narrow operand yet so must insert a [sz]ext. It is
- // probably loop invariant and will be folded or hoisted. If it actually
- // comes from a widened IV, it should be removed during a future call to
- // WidenIVUse.
- Value *LHS = (DU.NarrowUse->getOperand(0) == DU.NarrowDef)
- ? DU.WideDef
- : getExtend(DU.NarrowUse->getOperand(0), WideType,
- IsSigned, DU.NarrowUse);
- Value *RHS = (DU.NarrowUse->getOperand(1) == DU.NarrowDef)
- ? DU.WideDef
- : getExtend(DU.NarrowUse->getOperand(1), WideType,
- IsSigned, DU.NarrowUse);
-
- BinaryOperator *NarrowBO = cast<BinaryOperator>(DU.NarrowUse);
- BinaryOperator *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS,
- RHS, NarrowBO->getName());
- IRBuilder<> Builder(DU.NarrowUse);
- Builder.Insert(WideBO);
- if (const OverflowingBinaryOperator *OBO =
- dyn_cast<OverflowingBinaryOperator>(NarrowBO)) {
- if (OBO->hasNoUnsignedWrap())
- WideBO->setHasNoUnsignedWrap();
- if (OBO->hasNoSignedWrap())
- WideBO->setHasNoSignedWrap();
- }
- return WideBO;
- }
- llvm_unreachable(0);
-}
-
-/// No-wrap operations can transfer sign extension of their result to their
-/// operands. Generate the SCEV value for the widened operation without
-/// actually modifying the IR yet. If the expression after extending the
-/// operands is an AddRec for this loop, return it.
-const SCEVAddRecExpr *WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
- // Handle the common case of add<nsw/nuw>
- if (DU.NarrowUse->getOpcode() != Instruction::Add)
- return 0;
-
- // One operand (NarrowDef) has already been extended to WideDef. Now determine
- // if extending the other will lead to a recurrence.
- unsigned ExtendOperIdx = DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
- assert(DU.NarrowUse->getOperand(1 - ExtendOperIdx) == DU.NarrowDef &&
- "bad DU");
-
- const SCEV *ExtendOperExpr = 0;
- const OverflowingBinaryOperator *OBO =
- cast<OverflowingBinaryOperator>(DU.NarrowUse);
- if (IsSigned && OBO->hasNoSignedWrap())
- ExtendOperExpr = SE->getSignExtendExpr(
- SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
- else if (!IsSigned && OBO->hasNoUnsignedWrap())
- ExtendOperExpr = SE->getZeroExtendExpr(
- SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
- else
- return 0;
-
- // When creating this AddExpr, don't apply the current operations NSW or NUW
- // flags. This instruction may be guarded by control flow that the no-wrap
- // behavior depends on. Non-control-equivalent instructions can be mapped to
- // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
- // semantics to those operations.
- const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(
- SE->getAddExpr(SE->getSCEV(DU.WideDef), ExtendOperExpr));
-
- if (!AddRec || AddRec->getLoop() != L)
- return 0;
- return AddRec;
-}
-
-/// GetWideRecurrence - Is this instruction potentially interesting from
-/// IVUsers' perspective after widening it's type? In other words, can the
-/// extend be safely hoisted out of the loop with SCEV reducing the value to a
-/// recurrence on the same loop. If so, return the sign or zero extended
-/// recurrence. Otherwise return NULL.
-const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) {
- if (!SE->isSCEVable(NarrowUse->getType()))
- return 0;
-
- const SCEV *NarrowExpr = SE->getSCEV(NarrowUse);
- if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
- SE->getTypeSizeInBits(WideType)) {
- // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
- // index. So don't follow this use.
- return 0;
- }
-
- const SCEV *WideExpr = IsSigned ? SE->getSignExtendExpr(NarrowExpr, WideType)
- : SE->getZeroExtendExpr(NarrowExpr, WideType);
- const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
- if (!AddRec || AddRec->getLoop() != L)
- return 0;
- return AddRec;
-}
-
-/// WidenIVUse - Determine whether an individual user of the narrow IV can be
-/// widened. If so, return the wide clone of the user.
-Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
-
- // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
- if (isa<PHINode>(DU.NarrowUse) &&
- LI->getLoopFor(DU.NarrowUse->getParent()) != L)
- return 0;
-
- // Our raison d'etre! Eliminate sign and zero extension.
- if (IsSigned ? isa<SExtInst>(DU.NarrowUse) : isa<ZExtInst>(DU.NarrowUse)) {
- Value *NewDef = DU.WideDef;
- if (DU.NarrowUse->getType() != WideType) {
- unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
- unsigned IVWidth = SE->getTypeSizeInBits(WideType);
- if (CastWidth < IVWidth) {
- // The cast isn't as wide as the IV, so insert a Trunc.
- IRBuilder<> Builder(DU.NarrowUse);
- NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType());
- } else {
- // A wider extend was hidden behind a narrower one. This may induce
- // another round of IV widening in which the intermediate IV becomes
- // dead. It should be very rare.
- DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
- << " not wide enough to subsume " << *DU.NarrowUse
- << "\n");
- DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
- NewDef = DU.NarrowUse;
- }
- }
- if (NewDef != DU.NarrowUse) {
- DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
- << " replaced by " << *DU.WideDef << "\n");
- ++NumElimExt;
- DU.NarrowUse->replaceAllUsesWith(NewDef);
- DeadInsts.push_back(DU.NarrowUse);
- }
- // Now that the extend is gone, we want to expose it's uses for potential
- // further simplification. We don't need to directly inform SimplifyIVUsers
- // of the new users, because their parent IV will be processed later as a
- // new loop phi. If we preserved IVUsers analysis, we would also want to
- // push the uses of WideDef here.
-
- // No further widening is needed. The deceased [sz]ext had done it for us.
- return 0;
- }
-
- // Does this user itself evaluate to a recurrence after widening?
- const SCEVAddRecExpr *WideAddRec = GetWideRecurrence(DU.NarrowUse);
- if (!WideAddRec) {
- WideAddRec = GetExtendedOperandRecurrence(DU);
- }
- if (!WideAddRec) {
- // This user does not evaluate to a recurence after widening, so don't
- // follow it. Instead insert a Trunc to kill off the original use,
- // eventually isolating the original narrow IV so it can be removed.
- IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT));
- Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType());
- DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
- return 0;
- }
- // Assume block terminators cannot evaluate to a recurrence. We can't to
- // insert a Trunc after a terminator if there happens to be a critical edge.
- assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() &&
- "SCEV is not expected to evaluate a block terminator");
-
- // Reuse the IV increment that SCEVExpander created as long as it dominates
- // NarrowUse.
- Instruction *WideUse = 0;
- if (WideAddRec == WideIncExpr && Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
- WideUse = WideInc;
- else {
- WideUse = CloneIVUser(DU);
- if (!WideUse)
- return 0;
- }
- // Evaluation of WideAddRec ensured that the narrow expression could be
- // extended outside the loop without overflow. This suggests that the wide use
- // evaluates to the same expression as the extended narrow use, but doesn't
- // absolutely guarantee it. Hence the following failsafe check. In rare cases
- // where it fails, we simply throw away the newly created wide use.
- if (WideAddRec != SE->getSCEV(WideUse)) {
- DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
- << *SE->getSCEV(WideUse) << " != " << *WideAddRec << "\n");
- DeadInsts.push_back(WideUse);
- return 0;
- }
-
- // Returning WideUse pushes it on the worklist.
- return WideUse;
-}
-
-/// pushNarrowIVUsers - Add eligible users of NarrowDef to NarrowIVUsers.
-///
-void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
- for (User *U : NarrowDef->users()) {
- Instruction *NarrowUser = cast<Instruction>(U);
-
- // Handle data flow merges and bizarre phi cycles.
- if (!Widened.insert(NarrowUser))
- continue;
-
- NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUser, WideDef));
- }
-}
-
-/// CreateWideIV - Process a single induction variable. First use the
-/// SCEVExpander to create a wide induction variable that evaluates to the same
-/// recurrence as the original narrow IV. Then use a worklist to forward
-/// traverse the narrow IV's def-use chain. After WidenIVUse has processed all
-/// interesting IV users, the narrow IV will be isolated for removal by
-/// DeleteDeadPHIs.
-///
-/// It would be simpler to delete uses as they are processed, but we must avoid
-/// invalidating SCEV expressions.
-///
-PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
- // Is this phi an induction variable?
- const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
- if (!AddRec)
- return NULL;
-
- // Widen the induction variable expression.
- const SCEV *WideIVExpr = IsSigned ? SE->getSignExtendExpr(AddRec, WideType)
- : SE->getZeroExtendExpr(AddRec, WideType);
-
- assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
- "Expect the new IV expression to preserve its type");
-
- // Can the IV be extended outside the loop without overflow?
- AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
- if (!AddRec || AddRec->getLoop() != L)
- return NULL;
-
- // An AddRec must have loop-invariant operands. Since this AddRec is
- // materialized by a loop header phi, the expression cannot have any post-loop
- // operands, so they must dominate the loop header.
- assert(
- SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
- SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) &&
- "Loop header phi recurrence inputs do not dominate the loop");
-
- // The rewriter provides a value for the desired IV expression. This may
- // either find an existing phi or materialize a new one. Either way, we
- // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
- // of the phi-SCC dominates the loop entry.
- Instruction *InsertPt = L->getHeader()->begin();
- WidePhi = cast<PHINode>(Rewriter.expandCodeFor(AddRec, WideType, InsertPt));
-
- // Remembering the WideIV increment generated by SCEVExpander allows
- // WidenIVUse to reuse it when widening the narrow IV's increment. We don't
- // employ a general reuse mechanism because the call above is the only call to
- // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
- if (BasicBlock *LatchBlock = L->getLoopLatch()) {
- WideInc = cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
- WideIncExpr = SE->getSCEV(WideInc);
- }
-
- DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
- ++NumWidened;
-
- // Traverse the def-use chain using a worklist starting at the original IV.
- assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state");
-
- Widened.insert(OrigPhi);
- pushNarrowIVUsers(OrigPhi, WidePhi);
-
- while (!NarrowIVUsers.empty()) {
- NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
-
- // Process a def-use edge. This may replace the use, so don't hold a
- // use_iterator across it.
- Instruction *WideUse = WidenIVUse(DU, Rewriter);
-
- // Follow all def-use edges from the previous narrow use.
- if (WideUse)
- pushNarrowIVUsers(DU.NarrowUse, WideUse);
-
- // WidenIVUse may have removed the def-use edge.
- if (DU.NarrowDef->use_empty())
- DeadInsts.push_back(DU.NarrowDef);
- }
- return WidePhi;
-}
-
-//===----------------------------------------------------------------------===//
-// Simplification of IV users based on SCEV evaluation.
-//===----------------------------------------------------------------------===//
-
-/// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV
-/// users. Each successive simplification may push more users which may
-/// themselves be candidates for simplification.
-///
-/// Sign/Zero extend elimination is interleaved with IV simplification.
-///
-void PollyIndVarSimplify::SimplifyAndExtend(Loop *L, SCEVExpander &Rewriter,
- LPPassManager &LPM) {
- SmallVector<WideIVInfo, 8> WideIVs;
-
- SmallVector<PHINode *, 8> LoopPhis;
- for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
- LoopPhis.push_back(cast<PHINode>(I));
- }
- // Each round of simplification iterates through the SimplifyIVUsers worklist
- // for all current phis, then determines whether any IVs can be
- // widened. Widening adds new phis to LoopPhis, inducing another round of
- // simplification on the wide IVs.
- while (!LoopPhis.empty()) {
- // Evaluate as many IV expressions as possible before widening any IVs. This
- // forces SCEV to set no-wrap flags before evaluating sign/zero
- // extension. The first time SCEV attempts to normalize sign/zero extension,
- // the result becomes final. So for the most predictable results, we delay
- // evaluation of sign/zero extend evaluation until needed, and avoid running
- // other SCEV based analysis prior to SimplifyAndExtend.
- do {
- PHINode *CurrIV = LoopPhis.pop_back_val();
-
- // Information about sign/zero extensions of CurrIV.
- WideIVVisitor WIV(CurrIV, SE, TD);
-
- Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &WIV);
-
- if (WIV.WI.WidestNativeType) {
- WideIVs.push_back(WIV.WI);
- }
- } while (!LoopPhis.empty());
-
- for (; !WideIVs.empty(); WideIVs.pop_back()) {
- WidenIV Widener(WideIVs.back(), LI, SE, DT, DeadInsts);
- if (PHINode *WidePhi = Widener.CreateWideIV(Rewriter)) {
- Changed = true;
- LoopPhis.push_back(WidePhi);
- }
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// LinearFunctionTestReplace and its kin. Rewrite the loop exit condition.
-//===----------------------------------------------------------------------===//
-
-/// Check for expressions that ScalarEvolution generates to compute
-/// BackedgeTakenInfo. If these expressions have not been reduced, then
-/// expanding them may incur additional cost (albeit in the loop preheader).
-static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
- SmallPtrSet<const SCEV *, 8> &Processed,
- ScalarEvolution *SE) {
- if (!Processed.insert(S))
- return false;
-
- // If the backedge-taken count is a UDiv, it's very likely a UDiv that
- // ScalarEvolution's HowFarToZero or HowManyLessThans produced to compute a
- // precise expression, rather than a UDiv from the user's code. If we can't
- // find a UDiv in the code with some simple searching, assume the former and
- // forego rewriting the loop.
- if (isa<SCEVUDivExpr>(S)) {
- ICmpInst *OrigCond = dyn_cast<ICmpInst>(BI->getCondition());
- if (!OrigCond)
- return true;
- const SCEV *R = SE->getSCEV(OrigCond->getOperand(1));
- R = SE->getMinusSCEV(R, SE->getConstant(R->getType(), 1));
- if (R != S) {
- const SCEV *L = SE->getSCEV(OrigCond->getOperand(0));
- L = SE->getMinusSCEV(L, SE->getConstant(L->getType(), 1));
- if (L != S)
- return true;
- }
- }
-
- if (EnableIVRewrite)
- return false;
-
- // Recurse past add expressions, which commonly occur in the
- // BackedgeTakenCount. They may already exist in program code, and if not,
- // they are not too expensive rematerialize.
- if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I) {
- if (isHighCostExpansion(*I, BI, Processed, SE))
- return true;
- }
- return false;
- }
-
- // HowManyLessThans uses a Max expression whenever the loop is not guarded by
- // the exit condition.
- if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
- return true;
-
- // If we haven't recognized an expensive SCEV pattern, assume it's an
- // expression produced by program code.
- return false;
-}
-
-/// canExpandBackedgeTakenCount - Return true if this loop's backedge taken
-/// count expression can be safely and cheaply expanded into an instruction
-/// sequence that can be used by LinearFunctionTestReplace.
-///
-/// TODO: This fails for pointer-type loop counters with greater than one byte
-/// strides, consequently preventing LFTR from running. For the purpose of LFTR
-/// we could skip this check in the case that the LFTR loop counter (chosen by
-/// FindLoopCounter) is also pointer type. Instead, we could directly convert
-/// the loop test to an inequality test by checking the target data's alignment
-/// of element types (given that the initial pointer value originates from or is
-/// used by ABI constrained operation, as opposed to inttoptr/ptrtoint).
-/// However, we don't yet have a strong motivation for converting loop tests
-/// into inequality tests.
-static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
- const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
- if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) ||
- BackedgeTakenCount->isZero())
- return false;
-
- if (!L->getExitingBlock())
- return false;
-
- // Can't rewrite non-branch yet.
- BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
- if (!BI)
- return false;
-
- SmallPtrSet<const SCEV *, 8> Processed;
- if (isHighCostExpansion(BackedgeTakenCount, BI, Processed, SE))
- return false;
-
- return true;
-}
-
-/// getBackedgeIVType - Get the widest type used by the loop test after peeking
-/// through Truncs.
-///
-/// TODO: Unnecessary when ForceLFTR is removed.
-static Type *getBackedgeIVType(Loop *L) {
- if (!L->getExitingBlock())
- return 0;
-
- // Can't rewrite non-branch yet.
- BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
- if (!BI)
- return 0;
-
- ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
- if (!Cond)
- return 0;
-
- Type *Ty = 0;
- for (User::op_iterator OI = Cond->op_begin(), OE = Cond->op_end(); OI != OE;
- ++OI) {
- assert((!Ty || Ty == (*OI)->getType()) && "bad icmp operand types");
- TruncInst *Trunc = dyn_cast<TruncInst>(*OI);
- if (!Trunc)
- continue;
-
- return Trunc->getSrcTy();
- }
- return Ty;
-}
-
-/// getLoopPhiForCounter - Return the loop header phi IFF IncV adds a loop
-/// invariant value to the phi.
-static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
- Instruction *IncI = dyn_cast<Instruction>(IncV);
- if (!IncI)
- return 0;
-
- switch (IncI->getOpcode()) {
- case Instruction::Add:
- case Instruction::Sub:
- break;
- case Instruction::GetElementPtr:
- // An IV counter must preserve its type.
- if (IncI->getNumOperands() == 2)
- break;
- default:
- return 0;
- }
-
- PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0));
- if (Phi && Phi->getParent() == L->getHeader()) {
- if (isLoopInvariant(IncI->getOperand(1), L, DT))
- return Phi;
- return 0;
- }
- if (IncI->getOpcode() == Instruction::GetElementPtr)
- return 0;
-
- // Allow add/sub to be commuted.
- Phi = dyn_cast<PHINode>(IncI->getOperand(1));
- if (Phi && Phi->getParent() == L->getHeader()) {
- if (isLoopInvariant(IncI->getOperand(0), L, DT))
- return Phi;
- }
- return 0;
-}
-
-/// needsLFTR - LinearFunctionTestReplace policy. Return true unless we can show
-/// that the current exit test is already sufficiently canonical.
-static bool needsLFTR(Loop *L, DominatorTree *DT) {
- assert(L->getExitingBlock() && "expected loop exit");
-
- BasicBlock *LatchBlock = L->getLoopLatch();
- // Don't bother with LFTR if the loop is not properly simplified.
- if (!LatchBlock)
- return false;
-
- BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
- assert(BI && "expected exit branch");
-
- // Do LFTR to simplify the exit condition to an ICMP.
- ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
- if (!Cond)
- return true;
-
- // Do LFTR to simplify the exit ICMP to EQ/NE
- ICmpInst::Predicate Pred = Cond->getPredicate();
- if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ)
- return true;
-
- // Look for a loop invariant RHS
- Value *LHS = Cond->getOperand(0);
- Value *RHS = Cond->getOperand(1);
- if (!isLoopInvariant(RHS, L, DT)) {
- if (!isLoopInvariant(LHS, L, DT))
- return true;
- std::swap(LHS, RHS);
- }
- // Look for a simple IV counter LHS
- PHINode *Phi = dyn_cast<PHINode>(LHS);
- if (!Phi)
- Phi = getLoopPhiForCounter(LHS, L, DT);
-
- if (!Phi)
- return true;
-
- // Do LFTR if the exit condition's IV is *not* a simple counter.
- Value *IncV = Phi->getIncomingValueForBlock(L->getLoopLatch());
- return Phi != getLoopPhiForCounter(IncV, L, DT);
-}
-
-/// AlmostDeadIV - Return true if this IV has any uses other than the (soon to
-/// be rewritten) loop exit test.
-static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
- int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
- Value *IncV = Phi->getIncomingValue(LatchIdx);
-
- for (User *U : Phi->users())
- if (U != Cond && U != IncV)
- return false;
-
- for (User *U : IncV->users())
- if (U != Cond && U != Phi)
- return false;
-
- return true;
-}
-
-/// FindLoopCounter - Find an affine IV in canonical form.
-///
-/// BECount may be an i8* pointer type. The pointer difference is already
-/// valid count without scaling the address stride, so it remains a pointer
-/// expression as far as SCEV is concerned.
-///
-/// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount
-///
-/// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride.
-/// This is difficult in general for SCEV because of potential overflow. But we
-/// could at least handle constant BECounts.
-static PHINode *FindLoopCounter(Loop *L, const SCEV *BECount,
- ScalarEvolution *SE, DominatorTree *DT,
- const DataLayout *TD) {
- uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
-
- Value *Cond =
- cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition();
-
- // Loop over all of the PHI nodes, looking for a simple counter.
- PHINode *BestPhi = 0;
- const SCEV *BestInit = 0;
- BasicBlock *LatchBlock = L->getLoopLatch();
- assert(LatchBlock && "needsLFTR should guarantee a loop latch");
-
- for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
- PHINode *Phi = cast<PHINode>(I);
- if (!SE->isSCEVable(Phi->getType()))
- continue;
-
- // Avoid comparing an integer IV against a pointer Limit.
- if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy())
- continue;
-
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi));
- if (!AR || AR->getLoop() != L || !AR->isAffine())
- continue;
-
- // AR may be a pointer type, while BECount is an integer type.
- // AR may be wider than BECount. With eq/ne tests overflow is immaterial.
- // AR may not be a narrower type, or we may never exit.
- uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());
- if (PhiWidth < BCWidth || (TD && !TD->isLegalInteger(PhiWidth)))
- continue;
-
- const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
- if (!Step || !Step->isOne())
- continue;
-
- int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
- Value *IncV = Phi->getIncomingValue(LatchIdx);
- if (getLoopPhiForCounter(IncV, L, DT) != Phi)
- continue;
-
- const SCEV *Init = AR->getStart();
-
- if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) {
- // Don't force a live loop counter if another IV can be used.
- if (AlmostDeadIV(Phi, LatchBlock, Cond))
- continue;
-
- // Prefer to count-from-zero. This is a more "canonical" counter form. It
- // also prefers integer to pointer IVs.
- if (BestInit->isZero() != Init->isZero()) {
- if (BestInit->isZero())
- continue;
- }
- // If two IVs both count from zero or both count from nonzero then the
- // narrower is likely a dead phi that has been widened. Use the wider phi
- // to allow the other to be eliminated.
- if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType()))
- continue;
- }
- BestPhi = Phi;
- BestInit = Init;
- }
- return BestPhi;
-}
-
-/// genLoopLimit - Help LinearFunctionTestReplace by generating a value that
-/// holds the RHS of the new loop test.
-static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
- SCEVExpander &Rewriter, ScalarEvolution *SE) {
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
- assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
- const SCEV *IVInit = AR->getStart();
-
- // IVInit may be a pointer while IVCount is an integer when FindLoopCounter
- // finds a valid pointer IV. Sign extend BECount in order to materialize a
- // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing
- // the existing GEPs whenever possible.
- if (IndVar->getType()->isPointerTy() && !IVCount->getType()->isPointerTy()) {
-
- Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType());
- const SCEV *IVOffset = SE->getTruncateOrSignExtend(IVCount, OfsTy);
-
- // Expand the code for the iteration count.
- assert(SE->isLoopInvariant(IVOffset, L) &&
- "Computed iteration count is not loop invariant!");
- BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
- Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI);
-
- Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader());
- assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter");
- // We could handle pointer IVs other than i8*, but we need to compensate for
- // gep index scaling. See canExpandBackedgeTakenCount comments.
- assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()),
- cast<PointerType>(GEPBase->getType())
- ->getElementType())->isOne() &&
- "unit stride pointer IV must be i8*");
-
- IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
- return Builder.CreateGEP(GEPBase, GEPOffset, "lftr.limit");
- } else {
- // In any other case, convert both IVInit and IVCount to integers before
- // comparing. This may result in SCEV expension of pointers, but in practice
- // SCEV will fold the pointer arithmetic away as such:
- // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc).
- //
- // Valid Cases: (1) both integers is most common; (2) both may be pointers
- // for simple memset-style loops; (3) IVInit is an integer and IVCount is a
- // pointer may occur when enable-iv-rewrite generates a canonical IV on top
- // of case #2.
-
- const SCEV *IVLimit = 0;
- // For unit stride, IVCount = Start + BECount with 2's complement overflow.
- // For non-zero Start, compute IVCount here.
- if (AR->getStart()->isZero())
- IVLimit = IVCount;
- else {
- assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride");
- const SCEV *IVInit = AR->getStart();
-
- // For integer IVs, truncate the IV before computing IVInit + BECount.
- if (SE->getTypeSizeInBits(IVInit->getType()) >
- SE->getTypeSizeInBits(IVCount->getType()))
- IVInit = SE->getTruncateExpr(IVInit, IVCount->getType());
-
- IVLimit = SE->getAddExpr(IVInit, IVCount);
- }
- // Expand the code for the iteration count.
- BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
- IRBuilder<> Builder(BI);
- assert(SE->isLoopInvariant(IVLimit, L) &&
- "Computed iteration count is not loop invariant!");
- // Ensure that we generate the same type as IndVar, or a smaller integer
- // type. In the presence of null pointer values, we have an integer type
- // SCEV expression (IVInit) for a pointer type IV value (IndVar).
- Type *LimitTy = IVCount->getType()->isPointerTy() ? IndVar->getType()
- : IVCount->getType();
- return Rewriter.expandCodeFor(IVLimit, LimitTy, BI);
- }
-}
-
-/// LinearFunctionTestReplace - This method rewrites the exit condition of the
-/// loop to be a canonical != comparison against the incremented loop induction
-/// variable. This pass is able to rewrite the exit tests of any loop where the
-/// SCEV analysis can determine a loop-invariant trip count of the loop, which
-/// is actually a much broader range than just linear tests.
-Value *PollyIndVarSimplify::LinearFunctionTestReplace(
- Loop *L, const SCEV *BackedgeTakenCount, PHINode *IndVar,
- SCEVExpander &Rewriter) {
- assert(canExpandBackedgeTakenCount(L, SE) && "precondition");
-
- // LFTR can ignore IV overflow and truncate to the width of
- // BECount. This avoids materializing the add(zext(add)) expression.
- Type *CntTy =
- !EnableIVRewrite ? BackedgeTakenCount->getType() : IndVar->getType();
-
- const SCEV *IVCount = BackedgeTakenCount;
-
- // If the exiting block is the same as the backedge block, we prefer to
- // compare against the post-incremented value, otherwise we must compare
- // against the preincremented value.
- Value *CmpIndVar;
- if (L->getExitingBlock() == L->getLoopLatch()) {
- // Add one to the "backedge-taken" count to get the trip count.
- // If this addition may overflow, we have to be more pessimistic and
- // cast the induction variable before doing the add.
- const SCEV *N =
- SE->getAddExpr(IVCount, SE->getConstant(IVCount->getType(), 1));
- if (CntTy == IVCount->getType())
- IVCount = N;
- else {
- const SCEV *Zero = SE->getConstant(IVCount->getType(), 0);
- if ((isa<SCEVConstant>(N) && !N->isZero()) ||
- SE->isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, N, Zero)) {
- // No overflow. Cast the sum.
- IVCount = SE->getTruncateOrZeroExtend(N, CntTy);
- } else {
- // Potential overflow. Cast before doing the add.
- IVCount = SE->getTruncateOrZeroExtend(IVCount, CntTy);
- IVCount = SE->getAddExpr(IVCount, SE->getConstant(CntTy, 1));
- }
- }
- // The BackedgeTaken expression contains the number of times that the
- // backedge branches to the loop header. This is one less than the
- // number of times the loop executes, so use the incremented indvar.
- CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock());
- } else {
- // We must use the preincremented value...
- IVCount = SE->getTruncateOrZeroExtend(IVCount, CntTy);
- CmpIndVar = IndVar;
- }
-
- Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
- assert(ExitCnt->getType()->isPointerTy() ==
- IndVar->getType()->isPointerTy() &&
- "genLoopLimit missed a cast");
-
- // Insert a new icmp_ne or icmp_eq instruction before the branch.
- BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator());
- ICmpInst::Predicate P;
- if (L->contains(BI->getSuccessor(0)))
- P = ICmpInst::ICMP_NE;
- else
- P = ICmpInst::ICMP_EQ;
-
- DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n"
- << " LHS:" << *CmpIndVar << '\n' << " op:\t"
- << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n"
- << " RHS:\t" << *ExitCnt << "\n"
- << " IVCount:\t" << *IVCount << "\n");
-
- IRBuilder<> Builder(BI);
- if (SE->getTypeSizeInBits(CmpIndVar->getType()) >
- SE->getTypeSizeInBits(ExitCnt->getType())) {
- CmpIndVar =
- Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(), "lftr.wideiv");
- }
-
- Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond");
- Value *OrigCond = BI->getCondition();
- // It's tempting to use replaceAllUsesWith here to fully replace the old
- // comparison, but that's not immediately safe, since users of the old
- // comparison may not be dominated by the new comparison. Instead, just
- // update the branch to use the new comparison; in the common case this
- // will make old comparison dead.
- BI->setCondition(Cond);
- DeadInsts.push_back(OrigCond);
-
- ++NumLFTR;
- Changed = true;
- return Cond;
-}
-
-//===----------------------------------------------------------------------===//
-// SinkUnusedInvariants. A late subpass to cleanup loop preheaders.
-//===----------------------------------------------------------------------===//
-
-/// If there's a single exit block, sink any loop-invariant values that
-/// were defined in the preheader but not used inside the loop into the
-/// exit block to reduce register pressure in the loop.
-void PollyIndVarSimplify::SinkUnusedInvariants(Loop *L) {
- BasicBlock *ExitBlock = L->getExitBlock();
- if (!ExitBlock)
- return;
-
- BasicBlock *Preheader = L->getLoopPreheader();
- if (!Preheader)
- return;
-
- Instruction *InsertPt = ExitBlock->getFirstInsertionPt();
- BasicBlock::iterator I = Preheader->getTerminator();
- while (I != Preheader->begin()) {
- --I;
- // New instructions were inserted at the end of the preheader.
- if (isa<PHINode>(I))
- break;
-
- // Don't move instructions which might have side effects, since the side
- // effects need to complete before instructions inside the loop. Also don't
- // move instructions which might read memory, since the loop may modify
- // memory. Note that it's okay if the instruction might have undefined
- // behavior: LoopSimplify guarantees that the preheader dominates the exit
- // block.
- if (I->mayHaveSideEffects() || I->mayReadFromMemory())
- continue;
-
- // Skip debug info intrinsics.
- if (isa<DbgInfoIntrinsic>(I))
- continue;
-
- // Skip landingpad instructions.
- if (isa<LandingPadInst>(I))
- continue;
-
- // Don't sink alloca: we never want to sink static alloca's out of the
- // entry block, and correctly sinking dynamic alloca's requires
- // checks for stacksave/stackrestore intrinsics.
- // FIXME: Refactor this check somehow?
- if (isa<AllocaInst>(I))
- continue;
-
- // Determine if there is a use in or before the loop (direct or
- // otherwise).
- bool UsedInLoop = false;
- for (Use &U : I->uses()) {
- Instruction *UI = cast<Instruction>(U.getUser());
- BasicBlock *UseBB = UI->getParent();
- if (PHINode *P = dyn_cast<PHINode>(UI)) {
- unsigned i = PHINode::getIncomingValueNumForOperand(U.getOperandNo());
- UseBB = P->getIncomingBlock(i);
- }
- if (UseBB == Preheader || L->contains(UseBB)) {
- UsedInLoop = true;
- break;
- }
- }
-
- // If there is, the def must remain in the preheader.
- if (UsedInLoop)
- continue;
-
- // Otherwise, sink it to the exit block.
- Instruction *ToMove = I;
- bool Done = false;
-
- if (I != Preheader->begin()) {
- // Skip debug info intrinsics.
- do {
- --I;
- } while (isa<DbgInfoIntrinsic>(I) && I != Preheader->begin());
-
- if (isa<DbgInfoIntrinsic>(I) && I == Preheader->begin())
- Done = true;
- } else {
- Done = true;
- }
-
- ToMove->moveBefore(InsertPt);
- if (Done)
- break;
- InsertPt = ToMove;
- }
-}
-
-//===----------------------------------------------------------------------===//
-// IndVarSimplify driver. Manage several subpasses of IV simplification.
-//===----------------------------------------------------------------------===//
-
-bool PollyIndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
- // If LoopSimplify form is not available, stay out of trouble. Some notes:
- // - LSR currently only supports LoopSimplify-form loops. Indvars'
- // canonicalization can be a pessimization without LSR to "clean up"
- // afterwards.
- // - We depend on having a preheader; in particular,
- // Loop::getCanonicalInductionVariable only supports loops with preheaders,
- // and we're in trouble if we can't find the induction variable even when
- // we've manually inserted one.
- if (!L->isLoopSimplifyForm())
- return false;
-
- if (EnableIVRewrite)
- IU = &getAnalysis<IVUsers>();
- LI = &getAnalysis<LoopInfo>();
- SE = &getAnalysis<ScalarEvolution>();
- DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- TD = DLP ? &DLP->getDataLayout() : 0;
-
- DeadInsts.clear();
- Changed = false;
-
- // If there are any floating-point recurrences, attempt to
- // transform them to use integer recurrences.
- RewriteNonIntegerIVs(L);
-
- const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
-
- // Create a rewriter object which we'll use to transform the code with.
- SCEVExpander Rewriter(*SE, "indvars");
-#ifndef NDEBUG
- Rewriter.setDebugType(DEBUG_TYPE);
-#endif
-
- // Eliminate redundant IV users.
- //
- // Simplification works best when run before other consumers of SCEV. We
- // attempt to avoid evaluating SCEVs for sign/zero extend operations until
- // other expressions involving loop IVs have been evaluated. This helps SCEV
- // set no-wrap flags before normalizing sign/zero extension.
- if (!EnableIVRewrite) {
- Rewriter.disableCanonicalMode();
- SimplifyAndExtend(L, Rewriter, LPM);
- }
-
- // Check to see if this loop has a computable loop-invariant execution count.
- // If so, this means that we can compute the final value of any expressions
- // that are recurrent in the loop, and substitute the exit values from the
- // loop into any instructions outside of the loop that use the final values of
- // the current expressions.
- //
- if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount))
- RewriteLoopExitValues(L, Rewriter);
-
- // Eliminate redundant IV users.
- // FIXME: Disabled as the function was removed from LLVM trunk. We may get
- // along with this, as Polly does not need a lot of simplifications,
- // but just a canonical induction variable. In the near future, we
- // should remove the need of canonical induction variables all
- // together.
- // if (EnableIVRewrite)
- // Changed |= simplifyIVUsers(IU, SE, &LPM, DeadInsts);
-
- // Eliminate redundant IV cycles.
- if (!EnableIVRewrite)
- NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts);
-
- // Compute the type of the largest recurrence expression, and decide whether
- // a canonical induction variable should be inserted.
- Type *LargestType = 0;
- bool NeedCannIV = false;
- bool ExpandBECount = canExpandBackedgeTakenCount(L, SE);
- if (EnableIVRewrite && ExpandBECount) {
- // If we have a known trip count and a single exit block, we'll be
- // rewriting the loop exit test condition below, which requires a
- // canonical induction variable.
- NeedCannIV = true;
- Type *Ty = BackedgeTakenCount->getType();
- if (!EnableIVRewrite) {
- // In this mode, SimplifyIVUsers may have already widened the IV used by
- // the backedge test and inserted a Trunc on the compare's operand. Get
- // the wider type to avoid creating a redundant narrow IV only used by the
- // loop test.
- LargestType = getBackedgeIVType(L);
- }
- if (!LargestType ||
- SE->getTypeSizeInBits(Ty) > SE->getTypeSizeInBits(LargestType))
- LargestType = SE->getEffectiveSCEVType(Ty);
- }
- if (EnableIVRewrite) {
- for (IVUsers::const_iterator I = IU->begin(), E = IU->end(); I != E; ++I) {
- NeedCannIV = true;
- Type *Ty =
- SE->getEffectiveSCEVType(I->getOperandValToReplace()->getType());
- if (!LargestType ||
- SE->getTypeSizeInBits(Ty) > SE->getTypeSizeInBits(LargestType))
- LargestType = Ty;
- }
- }
-
- // Now that we know the largest of the induction variable expressions
- // in this loop, insert a canonical induction variable of the largest size.
- PHINode *IndVar = 0;
- if (NeedCannIV) {
- // Check to see if the loop already has any canonical-looking induction
- // variables. If any are present and wider than the planned canonical
- // induction variable, temporarily remove them, so that the Rewriter
- // doesn't attempt to reuse them.
- SmallVector<PHINode *, 2> OldCannIVs;
- while (PHINode *OldCannIV = L->getCanonicalInductionVariable()) {
- if (SE->getTypeSizeInBits(OldCannIV->getType()) >
- SE->getTypeSizeInBits(LargestType))
- OldCannIV->removeFromParent();
- else
- break;
- OldCannIVs.push_back(OldCannIV);
- }
-
- IndVar = Rewriter.getOrInsertCanonicalInductionVariable(L, LargestType);
-
- ++NumInserted;
- Changed = true;
- DEBUG(dbgs() << "INDVARS: New CanIV: " << *IndVar << '\n');
-
- // Now that the official induction variable is established, reinsert
- // any old canonical-looking variables after it so that the IR remains
- // consistent. They will be deleted as part of the dead-PHI deletion at
- // the end of the pass.
- while (!OldCannIVs.empty()) {
- PHINode *OldCannIV = OldCannIVs.pop_back_val();
- OldCannIV->insertBefore(L->getHeader()->getFirstInsertionPt());
- }
- } else if (!EnableIVRewrite && ExpandBECount && needsLFTR(L, DT)) {
- IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD);
- }
- // If we have a trip count expression, rewrite the loop's exit condition
- // using it. We can currently only handle loops with a single exit.
- Value *NewICmp = 0;
- if (ExpandBECount && IndVar) {
- // Check preconditions for proper SCEVExpander operation. SCEV does not
- // express SCEVExpander's dependencies, such as LoopSimplify. Instead any
- // pass that uses the SCEVExpander must do it. This does not work well for
- // loop passes because SCEVExpander makes assumptions about all loops, while
- // LoopPassManager only forces the current loop to be simplified.
- //
- // FIXME: SCEV expansion has no way to bail out, so the caller must
- // explicitly check any assumptions made by SCEV. Brittle.
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount);
- if (!AR || AR->getLoop()->getLoopPreheader())
- NewICmp =
- LinearFunctionTestReplace(L, BackedgeTakenCount, IndVar, Rewriter);
- }
- // Rewrite IV-derived expressions.
- if (EnableIVRewrite)
- RewriteIVExpressions(L, Rewriter);
-
- // Clear the rewriter cache, because values that are in the rewriter's cache
- // can be deleted in the loop below, causing the AssertingVH in the cache to
- // trigger.
- Rewriter.clear();
-
- // Now that we're done iterating through lists, clean up any instructions
- // which are now dead.
- while (!DeadInsts.empty())
- if (Instruction *Inst =
- dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
- RecursivelyDeleteTriviallyDeadInstructions(Inst);
-
- // The Rewriter may not be used from this point on.
-
- // Loop-invariant instructions in the preheader that aren't used in the
- // loop may be sunk below the loop to reduce register pressure.
- SinkUnusedInvariants(L);
-
- // For completeness, inform IVUsers of the IV use in the newly-created
- // loop exit test instruction.
- if (IU && NewICmp) {
- ICmpInst *NewICmpInst = dyn_cast<ICmpInst>(NewICmp);
- if (NewICmpInst)
- IU->AddUsersIfInteresting(cast<Instruction>(NewICmpInst->getOperand(0)));
- }
- // Clean up dead instructions.
- Changed |= DeleteDeadPHIs(L->getHeader());
- // Check a post-condition.
- assert(L->isLCSSAForm(*DT) &&
- "Indvars did not leave the loop in lcssa form!");
-
-// Verify that LFTR, and any other change have not interfered with SCEV's
-// ability to compute trip count.
-#ifndef NDEBUG
- if (!EnableIVRewrite && VerifyIndvars &&
- !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
- SE->forgetLoop(L);
- const SCEV *NewBECount = SE->getBackedgeTakenCount(L);
- if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) <
- SE->getTypeSizeInBits(NewBECount->getType()))
- NewBECount =
- SE->getTruncateOrNoop(NewBECount, BackedgeTakenCount->getType());
- else
- BackedgeTakenCount =
- SE->getTruncateOrNoop(BackedgeTakenCount, NewBECount->getType());
- assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV");
- }
-#endif
-
- return Changed;
-}
-
-INITIALIZE_PASS_BEGIN(PollyIndVarSimplify, "polly-indvars",
- "Induction Variable Simplification (Polly version)",
- false, false);
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
-INITIALIZE_PASS_DEPENDENCY(LoopInfo);
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution);
-INITIALIZE_PASS_DEPENDENCY(LoopSimplify);
-INITIALIZE_PASS_DEPENDENCY(LCSSA);
-INITIALIZE_PASS_DEPENDENCY(IVUsers);
-INITIALIZE_PASS_END(PollyIndVarSimplify, "polly-indvars",
- "Induction Variable Simplification (Polly version)", false,
- false)
Removed: polly/trunk/lib/IndependentBlocks.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/IndependentBlocks.cpp?rev=203606&view=auto
==============================================================================
--- polly/trunk/lib/IndependentBlocks.cpp (original)
+++ polly/trunk/lib/IndependentBlocks.cpp (removed)
@@ -1,574 +0,0 @@
-//===------ IndependentBlocks.cpp - Create Independent Blocks in Regions --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Create independent blocks in the regions detected by ScopDetection.
-//
-//===----------------------------------------------------------------------===//
-//
-#include "polly/LinkAllPasses.h"
-#include "polly/Options.h"
-#include "polly/CodeGen/BlockGenerators.h"
-#include "polly/CodeGen/Cloog.h"
-#include "polly/ScopDetection.h"
-#include "polly/Support/ScopHelper.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/RegionInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Support/CommandLine.h"
-#define DEBUG_TYPE "polly-independent"
-#include "llvm/Support/Debug.h"
-
-#include <vector>
-
-using namespace polly;
-using namespace llvm;
-
-static cl::opt<bool> DisableIntraScopScalarToArray(
- "disable-polly-intra-scop-scalar-to-array",
- cl::desc("Do not rewrite scalar to array to generate independent blocks"),
- cl::Hidden, cl::init(false), cl::cat(PollyCategory));
-
-namespace {
-struct IndependentBlocks : public FunctionPass {
- RegionInfo *RI;
- ScalarEvolution *SE;
- ScopDetection *SD;
- LoopInfo *LI;
-
- BasicBlock *AllocaBlock;
-
- static char ID;
-
- IndependentBlocks() : FunctionPass(ID) {}
-
- // Create new code for every instruction operator that can be expressed by a
- // SCEV. Like this there are just two types of instructions left:
- //
- // 1. Instructions that only reference loop ivs or parameters outside the
- // region.
- //
- // 2. Instructions that are not used for any memory modification. (These
- // will be ignored later on.)
- //
- // Blocks containing only these kind of instructions are called independent
- // blocks as they can be scheduled arbitrarily.
- bool createIndependentBlocks(BasicBlock *BB, const Region *R);
- bool createIndependentBlocks(const Region *R);
-
- // Elimination on the Scop to eliminate the scalar dependences come with
- // trivially dead instructions.
- bool eliminateDeadCode(const Region *R);
-
- //===--------------------------------------------------------------------===//
- /// Non trivial scalar dependences checking functions.
- /// Non trivial scalar dependences occur when the def and use are located in
- /// different BBs and we can not move them into the same one. This will
- /// prevent use from schedule BBs arbitrarily.
- ///
- /// @brief This function checks if a scalar value that is part of the
- /// Scop is used outside of the Scop.
- ///
- /// @param Use The use of the instruction.
- /// @param R The maximum region in the Scop.
- ///
- /// @return Return true if the Use of an instruction and the instruction
- /// itself form a non trivial scalar dependence.
- static bool isEscapeUse(const Value *Use, const Region *R);
-
- /// @brief This function just checks if a Value is either defined in the same
- /// basic block or outside the region, such that there are no scalar
- /// dependences between basic blocks that are both part of the same
- /// region.
- ///
- /// @param Operand The operand of the instruction.
- /// @param CurBB The BasicBlock that contains the instruction.
- /// @param R The maximum region in the Scop.
- ///
- /// @return Return true if the Operand of an instruction and the instruction
- /// itself form a non trivial scalar (true) dependence.
- bool isEscapeOperand(const Value *Operand, const BasicBlock *CurBB,
- const Region *R) const;
-
- //===--------------------------------------------------------------------===//
- /// Operand tree moving functions.
- /// Trivial scalar dependences can eliminate by move the def to the same BB
- /// that containing use.
- ///
- /// @brief Check if the instruction can be moved to another place safely.
- ///
- /// @param Inst The instruction.
- ///
- /// @return Return true if the instruction can be moved safely, false
- /// otherwise.
- static bool isSafeToMove(Instruction *Inst);
-
- typedef std::map<Instruction *, Instruction *> ReplacedMapType;
-
- /// @brief Move all safe to move instructions in the Operand Tree (DAG) to
- /// eliminate trivial scalar dependences.
- ///
- /// @param Inst The root of the operand Tree.
- /// @param R The maximum region in the Scop.
- /// @param ReplacedMap The map that mapping original instruction to the moved
- /// instruction.
- /// @param InsertPos The insert position of the moved instructions.
- void moveOperandTree(Instruction *Inst, const Region *R,
- ReplacedMapType &ReplacedMap, Instruction *InsertPos);
-
- bool isIndependentBlock(const Region *R, BasicBlock *BB) const;
- bool areAllBlocksIndependent(const Region *R) const;
-
- // Split the exit block to hold load instructions.
- bool splitExitBlock(Region *R);
- bool onlyUsedInRegion(Instruction *Inst, const Region *R);
- bool translateScalarToArray(BasicBlock *BB, const Region *R);
- bool translateScalarToArray(Instruction *Inst, const Region *R);
- bool translateScalarToArray(const Region *R);
-
- bool runOnFunction(Function &F);
- void verifyAnalysis() const;
- void verifyScop(const Region *R) const;
- void getAnalysisUsage(AnalysisUsage &AU) const;
-};
-}
-
-bool IndependentBlocks::isSafeToMove(Instruction *Inst) {
- if (Inst->mayReadFromMemory() || Inst->mayWriteToMemory())
- return false;
-
- return isSafeToSpeculativelyExecute(Inst);
-}
-
-void IndependentBlocks::moveOperandTree(Instruction *Inst, const Region *R,
- ReplacedMapType &ReplacedMap,
- Instruction *InsertPos) {
- BasicBlock *CurBB = Inst->getParent();
-
- // Depth first traverse the operand tree (or operand dag, because we will
- // stop at PHINodes, so there are no cycle).
- typedef Instruction::op_iterator ChildIt;
- std::vector<std::pair<Instruction *, ChildIt>> WorkStack;
-
- WorkStack.push_back(std::make_pair(Inst, Inst->op_begin()));
- DenseSet<Instruction *> VisitedSet;
-
- while (!WorkStack.empty()) {
- Instruction *CurInst = WorkStack.back().first;
- ChildIt It = WorkStack.back().second;
- DEBUG(dbgs() << "Checking Operand of Node:\n" << *CurInst << "\n------>\n");
- if (It == CurInst->op_end()) {
- // Insert the new instructions in topological order.
- if (!CurInst->getParent()) {
- CurInst->insertBefore(InsertPos);
- SE->forgetValue(CurInst);
- }
-
- WorkStack.pop_back();
- } else {
- // for each node N,
- Instruction *Operand = dyn_cast<Instruction>(*It);
- ++WorkStack.back().second;
-
- // Can not move no instruction value.
- if (Operand == 0)
- continue;
-
- DEBUG(dbgs() << "For Operand:\n" << *Operand << "\n--->");
-
- // If the Scop Region does not contain N, skip it and all its operands and
- // continue: because we reach a "parameter".
- // FIXME: we must keep the predicate instruction inside the Scop,
- // otherwise it will be translated to a load instruction, and we can not
- // handle load as affine predicate at this moment.
- if (!R->contains(Operand) && !isa<TerminatorInst>(CurInst)) {
- DEBUG(dbgs() << "Out of region.\n");
- continue;
- }
-
- if (canSynthesize(Operand, LI, SE, R)) {
- DEBUG(dbgs() << "is IV.\n");
- continue;
- }
-
- // We can not move the operand, a non trivial scalar dependence found!
- if (!isSafeToMove(Operand)) {
- DEBUG(dbgs() << "Can not move!\n");
- continue;
- }
-
- // Do not need to move instruction if it is contained in the same BB with
- // the root instruction.
- if (Operand->getParent() == CurBB) {
- DEBUG(dbgs() << "No need to move.\n");
- // Try to move its operand, but do not visit an instuction twice.
- if (VisitedSet.insert(Operand).second)
- WorkStack.push_back(std::make_pair(Operand, Operand->op_begin()));
- continue;
- }
-
- // Now we need to move Operand to CurBB.
- // Check if we already moved it.
- ReplacedMapType::iterator At = ReplacedMap.find(Operand);
- if (At != ReplacedMap.end()) {
- DEBUG(dbgs() << "Moved.\n");
- Instruction *MovedOp = At->second;
- It->set(MovedOp);
- SE->forgetValue(MovedOp);
- } else {
- // Note that NewOp is not inserted in any BB now, we will insert it when
- // it popped form the work stack, so it will be inserted in topological
- // order.
- Instruction *NewOp = Operand->clone();
- NewOp->setName(Operand->getName() + ".moved.to." + CurBB->getName());
- DEBUG(dbgs() << "Move to " << *NewOp << "\n");
- It->set(NewOp);
- ReplacedMap.insert(std::make_pair(Operand, NewOp));
- SE->forgetValue(Operand);
-
- // Process its operands, but do not visit an instuction twice.
- if (VisitedSet.insert(NewOp).second)
- WorkStack.push_back(std::make_pair(NewOp, NewOp->op_begin()));
- }
- }
- }
-
- SE->forgetValue(Inst);
-}
-
-bool IndependentBlocks::createIndependentBlocks(BasicBlock *BB,
- const Region *R) {
- std::vector<Instruction *> WorkList;
- for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE; ++II)
- if (!isSafeToMove(II) && !canSynthesize(II, LI, SE, R))
- WorkList.push_back(II);
-
- ReplacedMapType ReplacedMap;
- Instruction *InsertPos = BB->getFirstNonPHIOrDbg();
-
- for (std::vector<Instruction *>::iterator I = WorkList.begin(),
- E = WorkList.end();
- I != E; ++I)
- moveOperandTree(*I, R, ReplacedMap, InsertPos);
-
- // The BB was changed if we replaced any operand.
- return !ReplacedMap.empty();
-}
-
-bool IndependentBlocks::createIndependentBlocks(const Region *R) {
- bool Changed = false;
-
- for (const auto &BB : R->blocks())
- Changed |= createIndependentBlocks(BB, R);
-
- return Changed;
-}
-
-bool IndependentBlocks::eliminateDeadCode(const Region *R) {
- std::vector<Instruction *> WorkList;
-
- // Find all trivially dead instructions.
- for (const auto &BB : R->blocks())
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (isInstructionTriviallyDead(I))
- WorkList.push_back(I);
-
- if (WorkList.empty())
- return false;
-
- // Delete them so the cross BB scalar dependences come with them will
- // also be eliminated.
- while (!WorkList.empty()) {
- RecursivelyDeleteTriviallyDeadInstructions(WorkList.back());
- WorkList.pop_back();
- }
-
- return true;
-}
-
-bool IndependentBlocks::isEscapeUse(const Value *Use, const Region *R) {
- // Non-instruction user will never escape.
- if (!isa<Instruction>(Use))
- return false;
-
- return !R->contains(cast<Instruction>(Use));
-}
-
-bool IndependentBlocks::isEscapeOperand(const Value *Operand,
- const BasicBlock *CurBB,
- const Region *R) const {
- const Instruction *OpInst = dyn_cast<Instruction>(Operand);
-
- // Non-instruction operand will never escape.
- if (OpInst == 0)
- return false;
-
- // Induction variables are valid operands.
- if (canSynthesize(OpInst, LI, SE, R))
- return false;
-
- // A value from a different BB is used in the same region.
- return R->contains(OpInst) && (OpInst->getParent() != CurBB);
-}
-
-bool IndependentBlocks::splitExitBlock(Region *R) {
- // Split the exit BB to place the load instruction of escaped users.
- BasicBlock *ExitBB = R->getExit();
- Region *ExitRegion = RI->getRegionFor(ExitBB);
-
- if (ExitBB != ExitRegion->getEntry())
- return false;
-
- BasicBlock *NewExit = createSingleExitEdge(R, this);
-
- std::vector<Region *> toUpdate;
- toUpdate.push_back(R);
-
- while (!toUpdate.empty()) {
- Region *Reg = toUpdate.back();
- toUpdate.pop_back();
-
- for (Region::iterator I = Reg->begin(), E = Reg->end(); I != E; ++I) {
- Region *SubR = *I;
-
- if (SubR->getExit() == ExitBB)
- toUpdate.push_back(SubR);
- }
-
- Reg->replaceExit(NewExit);
- }
-
- RI->setRegionFor(NewExit, R->getParent());
- return true;
-}
-
-bool IndependentBlocks::translateScalarToArray(const Region *R) {
- bool Changed = false;
-
- for (const auto &BB : R->blocks())
- Changed |= translateScalarToArray(BB, R);
-
- return Changed;
-}
-
-// Returns true when Inst is only used inside region R.
-bool IndependentBlocks::onlyUsedInRegion(Instruction *Inst, const Region *R) {
- for (User *U : Inst->users())
- if (Instruction *UI = dyn_cast<Instruction>(U))
- if (isEscapeUse(UI, R))
- return false;
-
- return true;
-}
-
-bool IndependentBlocks::translateScalarToArray(Instruction *Inst,
- const Region *R) {
- if (canSynthesize(Inst, LI, SE, R) && onlyUsedInRegion(Inst, R))
- return false;
-
- SmallVector<Instruction *, 4> LoadInside, LoadOutside;
- for (User *U : Inst->users())
- // Inst is referenced outside or referenced as an escaped operand.
- if (Instruction *UI = dyn_cast<Instruction>(U)) {
- if (isEscapeUse(UI, R))
- LoadOutside.push_back(UI);
-
- if (DisableIntraScopScalarToArray)
- continue;
-
- if (canSynthesize(UI, LI, SE, R))
- continue;
-
- BasicBlock *UParent = UI->getParent();
- if (R->contains(UParent) && isEscapeOperand(Inst, UParent, R))
- LoadInside.push_back(UI);
- }
-
- if (LoadOutside.empty() && LoadInside.empty())
- return false;
-
- // Create the alloca.
- AllocaInst *Slot = new AllocaInst(
- Inst->getType(), 0, Inst->getName() + ".s2a", AllocaBlock->begin());
- assert(!isa<InvokeInst>(Inst) && "Unexpect Invoke in Scop!");
-
- // Store right after Inst, and make sure the position is after all phi nodes.
- BasicBlock::iterator StorePos;
- if (isa<PHINode>(Inst)) {
- StorePos = Inst->getParent()->getFirstNonPHI();
- } else {
- StorePos = Inst;
- StorePos++;
- }
- (void)new StoreInst(Inst, Slot, StorePos);
-
- if (!LoadOutside.empty()) {
- LoadInst *ExitLoad = new LoadInst(Slot, Inst->getName() + ".loadoutside",
- false, R->getExit()->getFirstNonPHI());
-
- while (!LoadOutside.empty()) {
- Instruction *U = LoadOutside.pop_back_val();
- SE->forgetValue(U);
- U->replaceUsesOfWith(Inst, ExitLoad);
- }
- }
-
- while (!LoadInside.empty()) {
- Instruction *U = LoadInside.pop_back_val();
- assert(!isa<PHINode>(U) && "Can not handle PHI node inside!");
- SE->forgetValue(U);
- LoadInst *L = new LoadInst(Slot, Inst->getName() + ".loadarray", false, U);
- U->replaceUsesOfWith(Inst, L);
- }
-
- return true;
-}
-
-bool IndependentBlocks::translateScalarToArray(BasicBlock *BB,
- const Region *R) {
- bool changed = false;
-
- SmallVector<Instruction *, 32> Insts;
- for (BasicBlock::iterator II = BB->begin(), IE = --BB->end(); II != IE; ++II)
- Insts.push_back(II);
-
- while (!Insts.empty()) {
- Instruction *Inst = Insts.pop_back_val();
- changed |= translateScalarToArray(Inst, R);
- }
-
- return changed;
-}
-
-bool IndependentBlocks::isIndependentBlock(const Region *R,
- BasicBlock *BB) const {
- for (BasicBlock::iterator II = BB->begin(), IE = --BB->end(); II != IE;
- ++II) {
- Instruction *Inst = &*II;
-
- if (canSynthesize(Inst, LI, SE, R))
- continue;
-
- // A value inside the Scop is referenced outside.
- for (User *U : Inst->users()) {
- if (isEscapeUse(U, R)) {
- DEBUG(dbgs() << "Instruction not independent:\n");
- DEBUG(dbgs() << "Instruction used outside the Scop!\n");
- DEBUG(Inst->print(dbgs()));
- DEBUG(dbgs() << "\n");
- return false;
- }
- }
-
- if (DisableIntraScopScalarToArray)
- continue;
-
- for (Instruction::op_iterator OI = Inst->op_begin(), OE = Inst->op_end();
- OI != OE; ++OI) {
- if (isEscapeOperand(*OI, BB, R)) {
- DEBUG(dbgs() << "Instruction in function '";
- BB->getParent()->printAsOperand(dbgs(), false);
- dbgs() << "' not independent:\n");
- DEBUG(dbgs() << "Uses invalid operator\n");
- DEBUG(Inst->print(dbgs()));
- DEBUG(dbgs() << "\n");
- DEBUG(dbgs() << "Invalid operator is: ";
- (*OI)->printAsOperand(dbgs(), false); dbgs() << "\n");
- return false;
- }
- }
- }
-
- return true;
-}
-
-bool IndependentBlocks::areAllBlocksIndependent(const Region *R) const {
- for (const auto &BB : R->blocks())
- if (!isIndependentBlock(R, BB))
- return false;
-
- return true;
-}
-
-void IndependentBlocks::getAnalysisUsage(AnalysisUsage &AU) const {
- // FIXME: If we set preserves cfg, the cfg only passes do not need to
- // be "addPreserved"?
- AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<DominanceFrontier>();
- AU.addPreserved<PostDominatorTree>();
- AU.addRequired<RegionInfo>();
- AU.addPreserved<RegionInfo>();
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
- AU.addRequired<ScalarEvolution>();
- AU.addPreserved<ScalarEvolution>();
- AU.addRequired<ScopDetection>();
- AU.addPreserved<ScopDetection>();
-#ifdef CLOOG_FOUND
- AU.addPreserved<CloogInfo>();
-#endif
-}
-
-bool IndependentBlocks::runOnFunction(llvm::Function &F) {
- bool Changed = false;
-
- RI = &getAnalysis<RegionInfo>();
- LI = &getAnalysis<LoopInfo>();
- SD = &getAnalysis<ScopDetection>();
- SE = &getAnalysis<ScalarEvolution>();
-
- AllocaBlock = &F.getEntryBlock();
-
- DEBUG(dbgs() << "Run IndepBlock on " << F.getName() << '\n');
-
- for (ScopDetection::iterator I = SD->begin(), E = SD->end(); I != E; ++I) {
- const Region *R = *I;
- Changed |= createIndependentBlocks(R);
- Changed |= eliminateDeadCode(R);
- // This may change the RegionTree.
- Changed |= splitExitBlock(const_cast<Region *>(R));
- }
-
- DEBUG(dbgs() << "Before Scalar to Array------->\n");
- DEBUG(F.dump());
-
- for (ScopDetection::iterator I = SD->begin(), E = SD->end(); I != E; ++I)
- Changed |= translateScalarToArray(*I);
-
- DEBUG(dbgs() << "After Independent Blocks------------->\n");
- DEBUG(F.dump());
-
- verifyAnalysis();
-
- return Changed;
-}
-
-void IndependentBlocks::verifyAnalysis() const {
- for (ScopDetection::const_iterator I = SD->begin(), E = SD->end(); I != E;
- ++I)
- verifyScop(*I);
-}
-
-void IndependentBlocks::verifyScop(const Region *R) const {
- assert(areAllBlocksIndependent(R) && "Cannot generate independent blocks");
-}
-
-char IndependentBlocks::ID = 0;
-char &polly::IndependentBlocksID = IndependentBlocks::ID;
-
-Pass *polly::createIndependentBlocksPass() { return new IndependentBlocks(); }
-
-INITIALIZE_PASS_BEGIN(IndependentBlocks, "polly-independent",
- "Polly - Create independent blocks", false, false);
-INITIALIZE_PASS_DEPENDENCY(LoopInfo);
-INITIALIZE_PASS_DEPENDENCY(RegionInfo);
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution);
-INITIALIZE_PASS_DEPENDENCY(ScopDetection);
-INITIALIZE_PASS_END(IndependentBlocks, "polly-independent",
- "Polly - Create independent blocks", false, false)
Removed: polly/trunk/lib/Pluto.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Pluto.cpp?rev=203606&view=auto
==============================================================================
--- polly/trunk/lib/Pluto.cpp (original)
+++ polly/trunk/lib/Pluto.cpp (removed)
@@ -1,184 +0,0 @@
-//===- Pluto.cpp - Calculate an optimized schedule ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Use libpluto to optimize the schedule.
-//
-//===----------------------------------------------------------------------===//
-
-#include "polly/Config/config.h"
-
-#ifdef PLUTO_FOUND
-#include "polly/CodeGen/CodeGeneration.h"
-#include "polly/Dependences.h"
-#include "polly/LinkAllPasses.h"
-#include "polly/Options.h"
-#include "polly/ScopInfo.h"
-#include "polly/Support/GICHelper.h"
-
-#define DEBUG_TYPE "polly-opt-pluto"
-#include "llvm/Support/Debug.h"
-
-#include "pluto/libpluto.h"
-#include "isl/map.h"
-
-using namespace llvm;
-using namespace polly;
-
-static cl::opt<bool> EnableTiling("polly-pluto-tile", cl::desc("Enable tiling"),
- cl::Hidden, cl::init(false),
- cl::cat(PollyCategory));
-
-namespace {
-/// Convert an int into a string.
-static std::string convertInt(int number) {
- if (number == 0)
- return "0";
- std::string temp = "";
- std::string returnvalue = "";
- while (number > 0) {
- temp += number % 10 + 48;
- number /= 10;
- }
- for (unsigned i = 0; i < temp.length(); i++)
- returnvalue += temp[temp.length() - i - 1];
- return returnvalue;
-}
-
-class PlutoOptimizer : public ScopPass {
-
-public:
- static char ID;
- explicit PlutoOptimizer() : ScopPass(ID) {}
-
- virtual bool runOnScop(Scop &S);
- void printScop(llvm::raw_ostream &OS) const;
- void getAnalysisUsage(AnalysisUsage &AU) const;
- static void extendScattering(Scop &S, unsigned NewDimensions);
-};
-}
-
-char PlutoOptimizer::ID = 0;
-
-static int getSingleMap(__isl_take isl_map *map, void *user) {
- isl_map **singleMap = (isl_map **)user;
- *singleMap = map;
-
- return 0;
-}
-
-void PlutoOptimizer::extendScattering(Scop &S, unsigned NewDimensions) {
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI) {
- ScopStmt *Stmt = *SI;
- unsigned OldDimensions = Stmt->getNumScattering();
- isl_space *Space;
- isl_map *Map, *New;
-
- Space = isl_space_alloc(Stmt->getIslCtx(), 0, OldDimensions, NewDimensions);
- Map = isl_map_universe(Space);
-
- for (unsigned i = 0; i < OldDimensions; i++)
- Map = isl_map_equate(Map, isl_dim_in, i, isl_dim_out, i);
-
- for (unsigned i = OldDimensions; i < NewDimensions; i++)
- Map = isl_map_fix_si(Map, isl_dim_out, i, 0);
-
- Map = isl_map_align_params(Map, S.getParamSpace());
- New = isl_map_apply_range(Stmt->getScattering(), Map);
- Stmt->setScattering(New);
- }
-}
-
-bool PlutoOptimizer::runOnScop(Scop &S) {
- isl_union_set *Domain;
- isl_union_map *Deps, *ToPlutoNames, *Schedule;
- PlutoOptions *Options;
-
- Dependences *D = &getAnalysis<Dependences>();
-
- int DependencesKinds =
- Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
-
- Deps = D->getDependences(DependencesKinds);
- Domain = S.getDomains();
- ToPlutoNames = isl_union_map_empty(S.getParamSpace());
-
- int counter = 0;
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI) {
- ScopStmt *Stmt = *SI;
- std::string Name = "S_" + convertInt(counter);
- isl_map *Identity = isl_map_identity(isl_space_map_from_domain_and_range(
- Stmt->getDomainSpace(), Stmt->getDomainSpace()));
- Identity = isl_map_set_tuple_name(Identity, isl_dim_out, Name.c_str());
- ToPlutoNames = isl_union_map_add_map(ToPlutoNames, Identity);
- counter++;
- }
-
- Deps = isl_union_map_apply_domain(Deps, isl_union_map_copy(ToPlutoNames));
- Deps = isl_union_map_apply_range(Deps, isl_union_map_copy(ToPlutoNames));
- Domain = isl_union_set_apply(Domain, isl_union_map_copy(ToPlutoNames));
-
- Options = pluto_options_alloc();
- Options->fuse = 0;
- Options->tile = EnableTiling;
-
- DEBUG(dbgs() << "Domain: " << stringFromIslObj(Domain) << "\n";
- dbgs() << "Dependences: " << stringFromIslObj(Deps) << "\n";);
- Schedule = pluto_schedule(Domain, Deps, Options);
- pluto_options_free(Options);
-
- isl_union_set_free(Domain);
- isl_union_map_free(Deps);
-
- if (!Schedule)
- return false;
-
- Schedule =
- isl_union_map_apply_domain(Schedule, isl_union_map_reverse(ToPlutoNames));
-
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI) {
- ScopStmt *Stmt = *SI;
- isl_set *Domain = Stmt->getDomain();
- isl_union_map *StmtBand;
- StmtBand = isl_union_map_intersect_domain(isl_union_map_copy(Schedule),
- isl_union_set_from_set(Domain));
- isl_map *StmtSchedule;
- isl_union_map_foreach_map(StmtBand, getSingleMap, &StmtSchedule);
- Stmt->setScattering(StmtSchedule);
- isl_union_map_free(StmtBand);
- }
-
- isl_union_map_free(Schedule);
-
- unsigned MaxScatDims = 0;
-
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI)
- MaxScatDims = std::max((*SI)->getNumScattering(), MaxScatDims);
-
- extendScattering(S, MaxScatDims);
- return false;
-}
-
-void PlutoOptimizer::printScop(raw_ostream &OS) const {}
-
-void PlutoOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
- ScopPass::getAnalysisUsage(AU);
- AU.addRequired<Dependences>();
-}
-
-Pass *polly::createPlutoOptimizerPass() { return new PlutoOptimizer(); }
-
-INITIALIZE_PASS_BEGIN(PlutoOptimizer, "polly-opt-pluto",
- "Polly - Optimize schedule of SCoP (Pluto)", false,
- false);
-INITIALIZE_PASS_DEPENDENCY(Dependences);
-INITIALIZE_PASS_DEPENDENCY(ScopInfo);
-INITIALIZE_PASS_END(PlutoOptimizer, "polly-opt-pluto",
- "Polly - Optimize schedule of SCoP (Pluto)", false, false)
-
-#endif // PLUTO_FOUND
Removed: polly/trunk/lib/Pocc.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Pocc.cpp?rev=203606&view=auto
==============================================================================
--- polly/trunk/lib/Pocc.cpp (original)
+++ polly/trunk/lib/Pocc.cpp (removed)
@@ -1,284 +0,0 @@
-//===- Pocc.cpp - Pocc interface ----------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Pocc[1] interface.
-//
-// Pocc, the polyhedral compilation collection is a collection of polyhedral
-// tools. It is used as an optimizer in polly
-//
-// [1] http://www-roc.inria.fr/~pouchet/software/pocc/
-//
-//===----------------------------------------------------------------------===//
-
-#include "polly/LinkAllPasses.h"
-
-#ifdef SCOPLIB_FOUND
-#include "polly/CodeGen/CodeGeneration.h"
-#include "polly/Dependences.h"
-#include "polly/Options.h"
-#include "polly/ScheduleOptimizer.h"
-#include "polly/ScopInfo.h"
-
-#define DEBUG_TYPE "polly-opt-pocc"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/Program.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/system_error.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallString.h"
-
-#include "polly/ScopLib.h"
-
-#include "isl/space.h"
-#include "isl/map.h"
-#include "isl/constraint.h"
-
-using namespace llvm;
-using namespace polly;
-
-static cl::opt<std::string> PlutoFuse("pluto-fuse", cl::desc(""), cl::Hidden,
- cl::value_desc("Set fuse mode of Pluto"),
- cl::init("maxfuse"),
- cl::cat(PollyCategory));
-
-namespace {
-
-class Pocc : public ScopPass {
- SmallString<128> PlutoStderr;
- SmallString<128> PlutoStdout;
- std::vector<const char *> arguments;
-
-public:
- static char ID;
- explicit Pocc() : ScopPass(ID) {}
-
- std::string getFileName(Region *R) const;
- virtual bool runOnScop(Scop &S);
- void printScop(llvm::raw_ostream &OS) const;
- void getAnalysisUsage(AnalysisUsage &AU) const;
-
-private:
- bool runTransform(Scop &S);
-};
-}
-
-char Pocc::ID = 0;
-bool Pocc::runTransform(Scop &S) {
- Dependences *D = &getAnalysis<Dependences>();
-
- // Create the scop file.
- SmallString<128> TempDir;
- SmallString<128> ScopFile;
- llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/true, TempDir);
- ScopFile = TempDir;
- llvm::sys::path::append(ScopFile, "polly.scop");
-
- FILE *F = fopen(ScopFile.c_str(), "w");
-
- arguments.clear();
-
- if (!F) {
- errs() << "Cannot open file: " << TempDir.c_str() << "\n";
- errs() << "Skipping export.\n";
- return false;
- }
-
- ScopLib scoplib(&S);
- scoplib.print(F);
- fclose(F);
-
- // Execute pocc
- std::string pocc = sys::FindProgramByName("pocc");
-
- arguments.push_back("pocc");
- arguments.push_back("--read-scop");
- arguments.push_back(ScopFile.c_str());
- arguments.push_back("--pluto-tile-scat");
- arguments.push_back("--candl-dep-isl-simp");
- arguments.push_back("--cloogify-scheds");
- arguments.push_back("--output-scop");
- arguments.push_back("--pluto");
- arguments.push_back("--pluto-bounds");
- arguments.push_back("10");
- arguments.push_back("--pluto-fuse");
-
- arguments.push_back(PlutoFuse.c_str());
-
- if (!DisablePollyTiling)
- arguments.push_back("--pluto-tile");
-
- if (PollyVectorizerChoice != VECTORIZER_NONE)
- arguments.push_back("--pluto-prevector");
-
- arguments.push_back(0);
-
- PlutoStdout = TempDir;
- llvm::sys::path::append(PlutoStdout, "pluto.stdout");
- PlutoStderr = TempDir;
- llvm::sys::path::append(PlutoStderr, "pluto.stderr");
-
- std::vector<llvm::StringRef> Redirect;
- Redirect.push_back(0);
- Redirect.push_back(PlutoStdout.c_str());
- Redirect.push_back(PlutoStderr.c_str());
-
- sys::ExecuteAndWait(pocc, &arguments[0], 0,
- (const llvm::StringRef **)&Redirect[0]);
-
- // Read the created scop file
- SmallString<128> NewScopFile;
- NewScopFile = TempDir;
- llvm::sys::path::append(NewScopFile, "polly.pocc.c.scop");
-
- FILE *poccFile = fopen(NewScopFile.c_str(), "r");
- ScopLib newScoplib(&S, poccFile, D);
-
- if (!newScoplib.updateScattering()) {
- errs() << "Failure when calculating the optimization with "
- "the following command: ";
- for (std::vector<const char *>::const_iterator AI = arguments.begin(),
- AE = arguments.end();
- AI != AE; ++AI)
- if (*AI)
- errs() << " " << *AI;
- errs() << "\n";
- return false;
- } else
- fclose(poccFile);
-
- if (PollyVectorizerChoice == VECTORIZER_NONE)
- return false;
-
- // Find the innermost dimension that is not a constant dimension. This
- // dimension will be vectorized.
- unsigned scatterDims = S.getScatterDim();
- int lastLoop = scatterDims - 1;
-
- while (lastLoop) {
- bool isSingleValued = true;
-
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI) {
- isl_map *scat = (*SI)->getScattering();
- isl_map *projected = isl_map_project_out(scat, isl_dim_out, lastLoop,
- scatterDims - lastLoop);
-
- if (!isl_map_is_bijective(projected)) {
- isSingleValued = false;
- break;
- }
- }
-
- if (!isSingleValued)
- break;
-
- lastLoop--;
- }
-
- // Strip mine the innermost loop.
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI) {
- isl_map *scat = (*SI)->getScattering();
- int scatDims = (*SI)->getNumScattering();
- isl_space *Space = isl_space_alloc(S.getIslCtx(), S.getNumParams(),
- scatDims, scatDims + 1);
- isl_basic_map *map = isl_basic_map_universe(isl_space_copy(Space));
- isl_local_space *LSpace = isl_local_space_from_space(Space);
-
- for (int i = 0; i <= lastLoop - 1; i++) {
- isl_constraint *c = isl_equality_alloc(isl_local_space_copy(LSpace));
-
- isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
- isl_constraint_set_coefficient_si(c, isl_dim_out, i, -1);
-
- map = isl_basic_map_add_constraint(map, c);
- }
-
- for (int i = lastLoop; i < scatDims; i++) {
- isl_constraint *c = isl_equality_alloc(isl_local_space_copy(LSpace));
-
- isl_constraint_set_coefficient_si(c, isl_dim_in, i, 1);
- isl_constraint_set_coefficient_si(c, isl_dim_out, i + 1, -1);
-
- map = isl_basic_map_add_constraint(map, c);
- }
-
- isl_constraint *c;
-
- int vectorWidth = 4;
- c = isl_inequality_alloc(isl_local_space_copy(LSpace));
- isl_constraint_set_coefficient_si(c, isl_dim_out, lastLoop, -vectorWidth);
- isl_constraint_set_coefficient_si(c, isl_dim_out, lastLoop + 1, 1);
- map = isl_basic_map_add_constraint(map, c);
-
- c = isl_inequality_alloc(LSpace);
- isl_constraint_set_coefficient_si(c, isl_dim_out, lastLoop, vectorWidth);
- isl_constraint_set_coefficient_si(c, isl_dim_out, lastLoop + 1, -1);
- isl_constraint_set_constant_si(c, vectorWidth - 1);
- map = isl_basic_map_add_constraint(map, c);
-
- isl_map *transform = isl_map_from_basic_map(map);
- transform = isl_map_set_tuple_name(transform, isl_dim_out, "scattering");
- transform = isl_map_set_tuple_name(transform, isl_dim_in, "scattering");
-
- scat = isl_map_apply_range(scat, isl_map_copy(transform));
- (*SI)->setScattering(scat);
- }
-
- return false;
-}
-bool Pocc::runOnScop(Scop &S) {
- bool Result = runTransform(S);
- DEBUG(printScop(dbgs()));
-
- return Result;
-}
-
-void Pocc::printScop(raw_ostream &OS) const {
- OwningPtr<MemoryBuffer> stdoutBuffer;
- OwningPtr<MemoryBuffer> stderrBuffer;
-
- OS << "Command line: ";
-
- for (std::vector<const char *>::const_iterator AI = arguments.begin(),
- AE = arguments.end();
- AI != AE; ++AI)
- if (*AI)
- OS << " " << *AI;
-
- OS << "\n";
-
- if (error_code ec = MemoryBuffer::getFile(PlutoStdout.str(), stdoutBuffer))
- OS << "Could not open pocc stdout file: " + ec.message() << "\n";
- else {
- OS << "pocc stdout: " << stdoutBuffer->getBufferIdentifier() << "\n";
- OS << stdoutBuffer->getBuffer() << "\n";
- }
-
- if (error_code ec = MemoryBuffer::getFile(PlutoStderr.str(), stderrBuffer))
- OS << "Could not open pocc stderr file: " + ec.message() << "\n";
- else {
- OS << "pocc stderr: " << PlutoStderr << "\n";
- OS << stderrBuffer->getBuffer() << "\n";
- }
-}
-
-void Pocc::getAnalysisUsage(AnalysisUsage &AU) const {
- ScopPass::getAnalysisUsage(AU);
- AU.addRequired<Dependences>();
-}
-
-Pass *polly::createPoccPass() { return new Pocc(); }
-
-INITIALIZE_PASS_BEGIN(Pocc, "polly-opt-pocc",
- "Polly - Optimize the scop using pocc", false, false);
-INITIALIZE_PASS_DEPENDENCY(Dependences);
-INITIALIZE_PASS_DEPENDENCY(ScopInfo);
-INITIALIZE_PASS_END(Pocc, "polly-opt-pocc",
- "Polly - Optimize the scop using pocc", false, false)
-#endif /* SCOPLIB_FOUND */
Removed: polly/trunk/lib/ScheduleOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/ScheduleOptimizer.cpp?rev=203606&view=auto
==============================================================================
--- polly/trunk/lib/ScheduleOptimizer.cpp (original)
+++ polly/trunk/lib/ScheduleOptimizer.cpp (removed)
@@ -1,610 +0,0 @@
-//===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass the isl to calculate a schedule that is optimized for parallelism
-// and tileablility. The algorithm used in isl is an optimized version of the
-// algorithm described in following paper:
-//
-// U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
-// A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
-// In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
-// Design and Implementation, PLDI â08, pages 101â113. ACM, 2008.
-//===----------------------------------------------------------------------===//
-
-#include "polly/ScheduleOptimizer.h"
-#include "isl/aff.h"
-#include "isl/band.h"
-#include "isl/constraint.h"
-#include "isl/map.h"
-#include "isl/options.h"
-#include "isl/schedule.h"
-#include "isl/space.h"
-#include "polly/CodeGen/CodeGeneration.h"
-#include "polly/Dependences.h"
-#include "polly/LinkAllPasses.h"
-#include "polly/Options.h"
-#include "polly/ScopInfo.h"
-
-#define DEBUG_TYPE "polly-opt-isl"
-#include "llvm/Support/Debug.h"
-
-using namespace llvm;
-using namespace polly;
-
-namespace polly {
-bool DisablePollyTiling;
-}
-static cl::opt<bool, true>
-DisableTiling("polly-no-tiling", cl::desc("Disable tiling in the scheduler"),
- cl::location(polly::DisablePollyTiling), cl::init(false),
- cl::cat(PollyCategory));
-
-static cl::opt<std::string>
-OptimizeDeps("polly-opt-optimize-only",
- cl::desc("Only a certain kind of dependences (all/raw)"),
- cl::Hidden, cl::init("all"), cl::cat(PollyCategory));
-
-static cl::opt<std::string>
-SimplifyDeps("polly-opt-simplify-deps",
- cl::desc("Dependences should be simplified (yes/no)"), cl::Hidden,
- cl::init("yes"), cl::cat(PollyCategory));
-
-static cl::opt<int>
-MaxConstantTerm("polly-opt-max-constant-term",
- cl::desc("The maximal constant term allowed (-1 is unlimited)"),
- cl::Hidden, cl::init(20), cl::cat(PollyCategory));
-
-static cl::opt<int>
-MaxCoefficient("polly-opt-max-coefficient",
- cl::desc("The maximal coefficient allowed (-1 is unlimited)"),
- cl::Hidden, cl::init(20), cl::cat(PollyCategory));
-
-static cl::opt<std::string>
-FusionStrategy("polly-opt-fusion",
- cl::desc("The fusion strategy to choose (min/max)"), cl::Hidden,
- cl::init("min"), cl::cat(PollyCategory));
-
-static cl::opt<std::string>
-MaximizeBandDepth("polly-opt-maximize-bands",
- cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
- cl::init("yes"), cl::cat(PollyCategory));
-
-namespace {
-
-class IslScheduleOptimizer : public ScopPass {
-
-public:
- static char ID;
- explicit IslScheduleOptimizer() : ScopPass(ID) { LastSchedule = NULL; }
-
- ~IslScheduleOptimizer() { isl_schedule_free(LastSchedule); }
-
- virtual bool runOnScop(Scop &S);
- void printScop(llvm::raw_ostream &OS) const;
- void getAnalysisUsage(AnalysisUsage &AU) const;
-
-private:
- isl_schedule *LastSchedule;
-
- static void extendScattering(Scop &S, unsigned NewDimensions);
-
- /// @brief Create a map that describes a n-dimensonal tiling.
- ///
- /// getTileMap creates a map from a n-dimensional scattering space into an
- /// 2*n-dimensional scattering space. The map describes a rectangular
- /// tiling.
- ///
- /// Example:
- /// scheduleDimensions = 2, parameterDimensions = 1, tileSize = 32
- ///
- /// tileMap := [p0] -> {[s0, s1] -> [t0, t1, s0, s1]:
- /// t0 % 32 = 0 and t0 <= s0 < t0 + 32 and
- /// t1 % 32 = 0 and t1 <= s1 < t1 + 32}
- ///
- /// Before tiling:
- ///
- /// for (i = 0; i < N; i++)
- /// for (j = 0; j < M; j++)
- /// S(i,j)
- ///
- /// After tiling:
- ///
- /// for (t_i = 0; t_i < N; i+=32)
- /// for (t_j = 0; t_j < M; j+=32)
- /// for (i = t_i; i < min(t_i + 32, N); i++) | Unknown that N % 32 = 0
- /// for (j = t_j; j < t_j + 32; j++) | Known that M % 32 = 0
- /// S(i,j)
- ///
- static isl_basic_map *getTileMap(isl_ctx *ctx, int scheduleDimensions,
- isl_space *SpaceModel, int tileSize = 32);
-
- /// @brief Get the schedule for this band.
- ///
- /// Polly applies transformations like tiling on top of the isl calculated
- /// value. This can influence the number of scheduling dimension. The
- /// number of schedule dimensions is returned in the parameter 'Dimension'.
- static isl_union_map *getScheduleForBand(isl_band *Band, int *Dimensions);
-
- /// @brief Create a map that pre-vectorizes one scheduling dimension.
- ///
- /// getPrevectorMap creates a map that maps each input dimension to the same
- /// output dimension, except for the dimension DimToVectorize.
- /// DimToVectorize is strip mined by 'VectorWidth' and the newly created
- /// point loop of DimToVectorize is moved to the innermost level.
- ///
- /// Example (DimToVectorize=0, ScheduleDimensions=2, VectorWidth=4):
- ///
- /// | Before transformation
- /// |
- /// | A[i,j] -> [i,j]
- /// |
- /// | for (i = 0; i < 128; i++)
- /// | for (j = 0; j < 128; j++)
- /// | A(i,j);
- ///
- /// Prevector map:
- /// [i,j] -> [it,j,ip] : it % 4 = 0 and it <= ip <= it + 3 and i = ip
- ///
- /// | After transformation:
- /// |
- /// | A[i,j] -> [it,j,ip] : it % 4 = 0 and it <= ip <= it + 3 and i = ip
- /// |
- /// | for (it = 0; it < 128; it+=4)
- /// | for (j = 0; j < 128; j++)
- /// | for (ip = max(0,it); ip < min(128, it + 3); ip++)
- /// | A(ip,j);
- ///
- /// The goal of this transformation is to create a trivially vectorizable
- /// loop. This means a parallel loop at the innermost level that has a
- /// constant number of iterations corresponding to the target vector width.
- ///
- /// This transformation creates a loop at the innermost level. The loop has
- /// a constant number of iterations, if the number of loop iterations at
- /// DimToVectorize can be divided by VectorWidth. The default VectorWidth is
- /// currently constant and not yet target specific. This function does not
- /// reason about parallelism.
- static isl_map *getPrevectorMap(isl_ctx *ctx, int DimToVectorize,
- int ScheduleDimensions, int VectorWidth = 4);
-
- /// @brief Get the scheduling map for a list of bands.
- ///
- /// Walk recursively the forest of bands to combine the schedules of the
- /// individual bands to the overall schedule. In case tiling is requested,
- /// the individual bands are tiled.
- static isl_union_map *getScheduleForBandList(isl_band_list *BandList);
-
- static isl_union_map *getScheduleMap(isl_schedule *Schedule);
-
- bool doFinalization() {
- isl_schedule_free(LastSchedule);
- LastSchedule = NULL;
- return true;
- }
-};
-}
-
-char IslScheduleOptimizer::ID = 0;
-
-void IslScheduleOptimizer::extendScattering(Scop &S, unsigned NewDimensions) {
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI) {
- ScopStmt *Stmt = *SI;
- unsigned OldDimensions = Stmt->getNumScattering();
- isl_space *Space;
- isl_map *Map, *New;
-
- Space = isl_space_alloc(Stmt->getIslCtx(), 0, OldDimensions, NewDimensions);
- Map = isl_map_universe(Space);
-
- for (unsigned i = 0; i < OldDimensions; i++)
- Map = isl_map_equate(Map, isl_dim_in, i, isl_dim_out, i);
-
- for (unsigned i = OldDimensions; i < NewDimensions; i++)
- Map = isl_map_fix_si(Map, isl_dim_out, i, 0);
-
- Map = isl_map_align_params(Map, S.getParamSpace());
- New = isl_map_apply_range(Stmt->getScattering(), Map);
- Stmt->setScattering(New);
- }
-}
-
-isl_basic_map *IslScheduleOptimizer::getTileMap(isl_ctx *ctx,
- int scheduleDimensions,
- isl_space *SpaceModel,
- int tileSize) {
- // We construct
- //
- // tileMap := [p0] -> {[s0, s1] -> [t0, t1, p0, p1, a0, a1]:
- // s0 = a0 * 32 and s0 = p0 and t0 <= p0 < t0 + 32 and
- // s1 = a1 * 32 and s1 = p1 and t1 <= p1 < t1 + 32}
- //
- // and project out the auxilary dimensions a0 and a1.
- isl_space *Space =
- isl_space_alloc(ctx, 0, scheduleDimensions, scheduleDimensions * 3);
- isl_basic_map *tileMap = isl_basic_map_universe(isl_space_copy(Space));
-
- isl_local_space *LocalSpace = isl_local_space_from_space(Space);
-
- for (int x = 0; x < scheduleDimensions; x++) {
- int sX = x;
- int tX = x;
- int pX = scheduleDimensions + x;
- int aX = 2 * scheduleDimensions + x;
-
- isl_constraint *c;
-
- // sX = aX * tileSize;
- c = isl_equality_alloc(isl_local_space_copy(LocalSpace));
- isl_constraint_set_coefficient_si(c, isl_dim_out, sX, 1);
- isl_constraint_set_coefficient_si(c, isl_dim_out, aX, -tileSize);
- tileMap = isl_basic_map_add_constraint(tileMap, c);
-
- // pX = sX;
- c = isl_equality_alloc(isl_local_space_copy(LocalSpace));
- isl_constraint_set_coefficient_si(c, isl_dim_out, pX, 1);
- isl_constraint_set_coefficient_si(c, isl_dim_in, sX, -1);
- tileMap = isl_basic_map_add_constraint(tileMap, c);
-
- // tX <= pX
- c = isl_inequality_alloc(isl_local_space_copy(LocalSpace));
- isl_constraint_set_coefficient_si(c, isl_dim_out, pX, 1);
- isl_constraint_set_coefficient_si(c, isl_dim_out, tX, -1);
- tileMap = isl_basic_map_add_constraint(tileMap, c);
-
- // pX <= tX + (tileSize - 1)
- c = isl_inequality_alloc(isl_local_space_copy(LocalSpace));
- isl_constraint_set_coefficient_si(c, isl_dim_out, tX, 1);
- isl_constraint_set_coefficient_si(c, isl_dim_out, pX, -1);
- isl_constraint_set_constant_si(c, tileSize - 1);
- tileMap = isl_basic_map_add_constraint(tileMap, c);
- }
-
- // Project out auxilary dimensions.
- //
- // The auxilary dimensions are transformed into existentially quantified ones.
- // This reduces the number of visible scattering dimensions and allows Cloog
- // to produces better code.
- tileMap = isl_basic_map_project_out(
- tileMap, isl_dim_out, 2 * scheduleDimensions, scheduleDimensions);
- isl_local_space_free(LocalSpace);
- return tileMap;
-}
-
-isl_union_map *IslScheduleOptimizer::getScheduleForBand(isl_band *Band,
- int *Dimensions) {
- isl_union_map *PartialSchedule;
- isl_ctx *ctx;
- isl_space *Space;
- isl_basic_map *TileMap;
- isl_union_map *TileUMap;
-
- PartialSchedule = isl_band_get_partial_schedule(Band);
- *Dimensions = isl_band_n_member(Band);
-
- if (DisableTiling)
- return PartialSchedule;
-
- // It does not make any sense to tile a band with just one dimension.
- if (*Dimensions == 1)
- return PartialSchedule;
-
- ctx = isl_union_map_get_ctx(PartialSchedule);
- Space = isl_union_map_get_space(PartialSchedule);
-
- TileMap = getTileMap(ctx, *Dimensions, Space);
- TileUMap = isl_union_map_from_map(isl_map_from_basic_map(TileMap));
- TileUMap = isl_union_map_align_params(TileUMap, Space);
- *Dimensions = 2 * *Dimensions;
-
- return isl_union_map_apply_range(PartialSchedule, TileUMap);
-}
-
-isl_map *IslScheduleOptimizer::getPrevectorMap(isl_ctx *ctx, int DimToVectorize,
- int ScheduleDimensions,
- int VectorWidth) {
- isl_space *Space;
- isl_local_space *LocalSpace, *LocalSpaceRange;
- isl_set *Modulo;
- isl_map *TilingMap;
- isl_constraint *c;
- isl_aff *Aff;
- int PointDimension; /* ip */
- int TileDimension; /* it */
- isl_val *VectorWidthMP;
-
- assert(0 <= DimToVectorize && DimToVectorize < ScheduleDimensions);
-
- Space = isl_space_alloc(ctx, 0, ScheduleDimensions, ScheduleDimensions + 1);
- TilingMap = isl_map_universe(isl_space_copy(Space));
- LocalSpace = isl_local_space_from_space(Space);
- PointDimension = ScheduleDimensions;
- TileDimension = DimToVectorize;
-
- // Create an identity map for everything except DimToVectorize and map
- // DimToVectorize to the point loop at the innermost dimension.
- for (int i = 0; i < ScheduleDimensions; i++) {
- c = isl_equality_alloc(isl_local_space_copy(LocalSpace));
- isl_constraint_set_coefficient_si(c, isl_dim_in, i, -1);
-
- if (i == DimToVectorize)
- isl_constraint_set_coefficient_si(c, isl_dim_out, PointDimension, 1);
- else
- isl_constraint_set_coefficient_si(c, isl_dim_out, i, 1);
-
- TilingMap = isl_map_add_constraint(TilingMap, c);
- }
-
- // it % 'VectorWidth' = 0
- LocalSpaceRange = isl_local_space_range(isl_local_space_copy(LocalSpace));
- Aff = isl_aff_zero_on_domain(LocalSpaceRange);
- Aff = isl_aff_set_constant_si(Aff, VectorWidth);
- Aff = isl_aff_set_coefficient_si(Aff, isl_dim_in, TileDimension, 1);
- VectorWidthMP = isl_val_int_from_si(ctx, VectorWidth);
- Aff = isl_aff_mod_val(Aff, VectorWidthMP);
- Modulo = isl_pw_aff_zero_set(isl_pw_aff_from_aff(Aff));
- TilingMap = isl_map_intersect_range(TilingMap, Modulo);
-
- // it <= ip
- c = isl_inequality_alloc(isl_local_space_copy(LocalSpace));
- isl_constraint_set_coefficient_si(c, isl_dim_out, TileDimension, -1);
- isl_constraint_set_coefficient_si(c, isl_dim_out, PointDimension, 1);
- TilingMap = isl_map_add_constraint(TilingMap, c);
-
- // ip <= it + ('VectorWidth' - 1)
- c = isl_inequality_alloc(LocalSpace);
- isl_constraint_set_coefficient_si(c, isl_dim_out, TileDimension, 1);
- isl_constraint_set_coefficient_si(c, isl_dim_out, PointDimension, -1);
- isl_constraint_set_constant_si(c, VectorWidth - 1);
- TilingMap = isl_map_add_constraint(TilingMap, c);
-
- return TilingMap;
-}
-
-isl_union_map *
-IslScheduleOptimizer::getScheduleForBandList(isl_band_list *BandList) {
- int NumBands;
- isl_union_map *Schedule;
- isl_ctx *ctx;
-
- ctx = isl_band_list_get_ctx(BandList);
- NumBands = isl_band_list_n_band(BandList);
- Schedule = isl_union_map_empty(isl_space_params_alloc(ctx, 0));
-
- for (int i = 0; i < NumBands; i++) {
- isl_band *Band;
- isl_union_map *PartialSchedule;
- int ScheduleDimensions;
- isl_space *Space;
-
- Band = isl_band_list_get_band(BandList, i);
- PartialSchedule = getScheduleForBand(Band, &ScheduleDimensions);
- Space = isl_union_map_get_space(PartialSchedule);
-
- if (isl_band_has_children(Band)) {
- isl_band_list *Children;
- isl_union_map *SuffixSchedule;
-
- Children = isl_band_get_children(Band);
- SuffixSchedule = getScheduleForBandList(Children);
- PartialSchedule =
- isl_union_map_flat_range_product(PartialSchedule, SuffixSchedule);
- isl_band_list_free(Children);
- } else if (PollyVectorizerChoice != VECTORIZER_NONE) {
- // In case we are at the innermost band, we try to prepare for
- // vectorization. This means, we look for the innermost parallel loop
- // and strip mine this loop to the innermost level using a strip-mine
- // factor corresponding to the number of vector iterations.
- int NumDims = isl_band_n_member(Band);
- for (int j = NumDims - 1; j >= 0; j--) {
- if (isl_band_member_is_coincident(Band, j)) {
- isl_map *TileMap;
- isl_union_map *TileUMap;
-
- TileMap = getPrevectorMap(ctx, ScheduleDimensions - NumDims + j,
- ScheduleDimensions);
- TileUMap = isl_union_map_from_map(TileMap);
- TileUMap =
- isl_union_map_align_params(TileUMap, isl_space_copy(Space));
- PartialSchedule =
- isl_union_map_apply_range(PartialSchedule, TileUMap);
- break;
- }
- }
- }
-
- Schedule = isl_union_map_union(Schedule, PartialSchedule);
-
- isl_band_free(Band);
- isl_space_free(Space);
- }
-
- return Schedule;
-}
-
-isl_union_map *IslScheduleOptimizer::getScheduleMap(isl_schedule *Schedule) {
- isl_band_list *BandList = isl_schedule_get_band_forest(Schedule);
- isl_union_map *ScheduleMap = getScheduleForBandList(BandList);
- isl_band_list_free(BandList);
- return ScheduleMap;
-}
-
-bool IslScheduleOptimizer::runOnScop(Scop &S) {
- Dependences *D = &getAnalysis<Dependences>();
-
- if (!D->hasValidDependences())
- return false;
-
- isl_schedule_free(LastSchedule);
- LastSchedule = NULL;
-
- // Build input data.
- int ValidityKinds =
- Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
- int ProximityKinds;
-
- if (OptimizeDeps == "all")
- ProximityKinds =
- Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
- else if (OptimizeDeps == "raw")
- ProximityKinds = Dependences::TYPE_RAW;
- else {
- errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
- << " Falling back to optimizing all dependences.\n";
- ProximityKinds =
- Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
- }
-
- isl_union_set *Domain = S.getDomains();
-
- if (!Domain)
- return false;
-
- isl_union_map *Validity = D->getDependences(ValidityKinds);
- isl_union_map *Proximity = D->getDependences(ProximityKinds);
-
- // Simplify the dependences by removing the constraints introduced by the
- // domains. This can speed up the scheduling time significantly, as large
- // constant coefficients will be removed from the dependences. The
- // introduction of some additional dependences reduces the possible
- // transformations, but in most cases, such transformation do not seem to be
- // interesting anyway. In some cases this option may stop the scheduler to
- // find any schedule.
- if (SimplifyDeps == "yes") {
- Validity = isl_union_map_gist_domain(Validity, isl_union_set_copy(Domain));
- Validity = isl_union_map_gist_range(Validity, isl_union_set_copy(Domain));
- Proximity =
- isl_union_map_gist_domain(Proximity, isl_union_set_copy(Domain));
- Proximity = isl_union_map_gist_range(Proximity, isl_union_set_copy(Domain));
- } else if (SimplifyDeps != "no") {
- errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
- "or 'no'. Falling back to default: 'yes'\n";
- }
-
- DEBUG(dbgs() << "\n\nCompute schedule from: ");
- DEBUG(dbgs() << "Domain := "; isl_union_set_dump(Domain); dbgs() << ";\n");
- DEBUG(dbgs() << "Proximity := "; isl_union_map_dump(Proximity);
- dbgs() << ";\n");
- DEBUG(dbgs() << "Validity := "; isl_union_map_dump(Validity);
- dbgs() << ";\n");
-
- int IslFusionStrategy;
-
- if (FusionStrategy == "max") {
- IslFusionStrategy = ISL_SCHEDULE_FUSE_MAX;
- } else if (FusionStrategy == "min") {
- IslFusionStrategy = ISL_SCHEDULE_FUSE_MIN;
- } else {
- errs() << "warning: Unknown fusion strategy. Falling back to maximal "
- "fusion.\n";
- IslFusionStrategy = ISL_SCHEDULE_FUSE_MAX;
- }
-
- int IslMaximizeBands;
-
- if (MaximizeBandDepth == "yes") {
- IslMaximizeBands = 1;
- } else if (MaximizeBandDepth == "no") {
- IslMaximizeBands = 0;
- } else {
- errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
- " or 'no'. Falling back to default: 'yes'\n";
- IslMaximizeBands = 1;
- }
-
- isl_options_set_schedule_fuse(S.getIslCtx(), IslFusionStrategy);
- isl_options_set_schedule_maximize_band_depth(S.getIslCtx(), IslMaximizeBands);
- isl_options_set_schedule_max_constant_term(S.getIslCtx(), MaxConstantTerm);
- isl_options_set_schedule_max_coefficient(S.getIslCtx(), MaxCoefficient);
-
- isl_options_set_on_error(S.getIslCtx(), ISL_ON_ERROR_CONTINUE);
-
- isl_schedule_constraints *ScheduleConstraints;
- ScheduleConstraints = isl_schedule_constraints_on_domain(Domain);
- ScheduleConstraints =
- isl_schedule_constraints_set_proximity(ScheduleConstraints, Proximity);
- ScheduleConstraints = isl_schedule_constraints_set_validity(
- ScheduleConstraints, isl_union_map_copy(Validity));
- ScheduleConstraints =
- isl_schedule_constraints_set_coincidence(ScheduleConstraints, Validity);
- isl_schedule *Schedule;
- Schedule = isl_schedule_constraints_compute_schedule(ScheduleConstraints);
- isl_options_set_on_error(S.getIslCtx(), ISL_ON_ERROR_ABORT);
-
- // In cases the scheduler is not able to optimize the code, we just do not
- // touch the schedule.
- if (!Schedule)
- return false;
-
- DEBUG(dbgs() << "Schedule := "; isl_schedule_dump(Schedule); dbgs() << ";\n");
-
- isl_union_map *ScheduleMap = getScheduleMap(Schedule);
-
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI) {
- ScopStmt *Stmt = *SI;
- isl_map *StmtSchedule;
- isl_set *Domain = Stmt->getDomain();
- isl_union_map *StmtBand;
- StmtBand = isl_union_map_intersect_domain(isl_union_map_copy(ScheduleMap),
- isl_union_set_from_set(Domain));
- if (isl_union_map_is_empty(StmtBand)) {
- StmtSchedule = isl_map_from_domain(isl_set_empty(Stmt->getDomainSpace()));
- isl_union_map_free(StmtBand);
- } else {
- assert(isl_union_map_n_map(StmtBand) == 1);
- StmtSchedule = isl_map_from_union_map(StmtBand);
- }
-
- Stmt->setScattering(StmtSchedule);
- }
-
- isl_union_map_free(ScheduleMap);
- LastSchedule = Schedule;
-
- unsigned MaxScatDims = 0;
-
- for (Scop::iterator SI = S.begin(), SE = S.end(); SI != SE; ++SI)
- MaxScatDims = std::max((*SI)->getNumScattering(), MaxScatDims);
-
- extendScattering(S, MaxScatDims);
- return false;
-}
-
-void IslScheduleOptimizer::printScop(raw_ostream &OS) const {
- isl_printer *p;
- char *ScheduleStr;
-
- OS << "Calculated schedule:\n";
-
- if (!LastSchedule) {
- OS << "n/a\n";
- return;
- }
-
- p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule));
- p = isl_printer_print_schedule(p, LastSchedule);
- ScheduleStr = isl_printer_get_str(p);
- isl_printer_free(p);
-
- OS << ScheduleStr << "\n";
-}
-
-void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
- ScopPass::getAnalysisUsage(AU);
- AU.addRequired<Dependences>();
-}
-
-Pass *polly::createIslScheduleOptimizerPass() {
- return new IslScheduleOptimizer();
-}
-
-INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl",
- "Polly - Optimize schedule of SCoP", false, false);
-INITIALIZE_PASS_DEPENDENCY(Dependences);
-INITIALIZE_PASS_DEPENDENCY(ScopInfo);
-INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl",
- "Polly - Optimize schedule of SCoP", false, false)
Copied: polly/trunk/lib/Transform/Canonicalization.cpp (from r203544, polly/trunk/lib/Canonicalization.cpp)
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Transform/Canonicalization.cpp?p2=polly/trunk/lib/Transform/Canonicalization.cpp&p1=polly/trunk/lib/Canonicalization.cpp&r1=203544&r2=203607&rev=203607&view=diff
==============================================================================
(empty)
Copied: polly/trunk/lib/Transform/CodePreparation.cpp (from r203544, polly/trunk/lib/CodePreparation.cpp)
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Transform/CodePreparation.cpp?p2=polly/trunk/lib/Transform/CodePreparation.cpp&p1=polly/trunk/lib/CodePreparation.cpp&r1=203544&r2=203607&rev=203607&view=diff
==============================================================================
(empty)
Copied: polly/trunk/lib/Transform/DeadCodeElimination.cpp (from r203544, polly/trunk/lib/DeadCodeElimination.cpp)
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Transform/DeadCodeElimination.cpp?p2=polly/trunk/lib/Transform/DeadCodeElimination.cpp&p1=polly/trunk/lib/DeadCodeElimination.cpp&r1=203544&r2=203607&rev=203607&view=diff
==============================================================================
(empty)
Copied: polly/trunk/lib/Transform/IndVarSimplify.cpp (from r203544, polly/trunk/lib/IndVarSimplify.cpp)
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Transform/IndVarSimplify.cpp?p2=polly/trunk/lib/Transform/IndVarSimplify.cpp&p1=polly/trunk/lib/IndVarSimplify.cpp&r1=203544&r2=203607&rev=203607&view=diff
==============================================================================
(empty)
Copied: polly/trunk/lib/Transform/IndependentBlocks.cpp (from r203544, polly/trunk/lib/IndependentBlocks.cpp)
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Transform/IndependentBlocks.cpp?p2=polly/trunk/lib/Transform/IndependentBlocks.cpp&p1=polly/trunk/lib/IndependentBlocks.cpp&r1=203544&r2=203607&rev=203607&view=diff
==============================================================================
(empty)
Copied: polly/trunk/lib/Transform/Pluto.cpp (from r203544, polly/trunk/lib/Pluto.cpp)
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Transform/Pluto.cpp?p2=polly/trunk/lib/Transform/Pluto.cpp&p1=polly/trunk/lib/Pluto.cpp&r1=203544&r2=203607&rev=203607&view=diff
==============================================================================
(empty)
Copied: polly/trunk/lib/Transform/Pocc.cpp (from r203544, polly/trunk/lib/Pocc.cpp)
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Transform/Pocc.cpp?p2=polly/trunk/lib/Transform/Pocc.cpp&p1=polly/trunk/lib/Pocc.cpp&r1=203544&r2=203607&rev=203607&view=diff
==============================================================================
(empty)
Copied: polly/trunk/lib/Transform/ScheduleOptimizer.cpp (from r203544, polly/trunk/lib/ScheduleOptimizer.cpp)
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Transform/ScheduleOptimizer.cpp?p2=polly/trunk/lib/Transform/ScheduleOptimizer.cpp&p1=polly/trunk/lib/ScheduleOptimizer.cpp&r1=203544&r2=203607&rev=203607&view=diff
==============================================================================
(empty)
More information about the llvm-commits
mailing list