[polly] r273982 - Move ScopBuilder into its own file. NFC.

Michael Kruse via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 27 18:37:21 PDT 2016


Author: meinersbur
Date: Mon Jun 27 20:37:20 2016
New Revision: 273982

URL: http://llvm.org/viewvc/llvm-project?rev=273982&view=rev
Log:
Move ScopBuilder into its own file. NFC.

The methods in ScopBuilder are used for the construction of a Scop,
while the remaining classes of ScopInfo are required by all passes that
use Polly's polyhedral analysis.

Added:
    polly/trunk/lib/Analysis/ScopBuilder.cpp
Modified:
    polly/trunk/include/polly/ScopInfo.h
    polly/trunk/lib/Analysis/ScopInfo.cpp
    polly/trunk/lib/CMakeLists.txt

Modified: polly/trunk/include/polly/ScopInfo.h
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/include/polly/ScopInfo.h?rev=273982&r1=273981&r2=273982&view=diff
==============================================================================
--- polly/trunk/include/polly/ScopInfo.h (original)
+++ polly/trunk/include/polly/ScopInfo.h Mon Jun 27 20:37:20 2016
@@ -7,10 +7,8 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// Create a polyhedral description for a static control flow region.
-//
-// The pass creates a polyhedral description of the Scops detected by the Scop
-// detection derived from their LLVM-IR code.
+// Store the polyhedral model representation of a static control flow region,
+// also called SCoP (Static Control Part).
 //
 // This representation is shared among several tools in the polyhedral
 // community, which are e.g. CLooG, Pluto, Loopo, Graphite.

Added: polly/trunk/lib/Analysis/ScopBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Analysis/ScopBuilder.cpp?rev=273982&view=auto
==============================================================================
--- polly/trunk/lib/Analysis/ScopBuilder.cpp (added)
+++ polly/trunk/lib/Analysis/ScopBuilder.cpp Mon Jun 27 20:37:20 2016
@@ -0,0 +1,670 @@
+//===- ScopBuilder.cpp ---------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Create a polyhedral description for a static control flow region.
+//
+// The pass creates a polyhedral description of the Scops detected by the SCoP
+// detection derived from their LLVM-IR code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "polly/Options.h"
+#include "polly/ScopInfo.h"
+#include "polly/Support/GICHelper.h"
+#include "polly/Support/SCEVValidator.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/IR/DiagnosticInfo.h"
+
+using namespace llvm;
+using namespace polly;
+
+#define DEBUG_TYPE "polly-scops"
+
+STATISTIC(ScopFound, "Number of valid Scops");
+STATISTIC(RichScopFound, "Number of Scops containing a loop");
+
+static cl::opt<bool> ModelReadOnlyScalars(
+    "polly-analyze-read-only-scalars",
+    cl::desc("Model read-only scalar values in the scop description"),
+    cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::cat(PollyCategory));
+
+void ScopBuilder::buildPHIAccesses(PHINode *PHI, Region *NonAffineSubRegion,
+                                   bool IsExitBlock) {
+
+  // PHI nodes that are in the exit block of the region, hence if IsExitBlock is
+  // true, are not modeled as ordinary PHI nodes as they are not part of the
+  // region. However, we model the operands in the predecessor blocks that are
+  // part of the region as regular scalar accesses.
+
+  // If we can synthesize a PHI we can skip it, however only if it is in
+  // the region. If it is not it can only be in the exit block of the region.
+  // In this case we model the operands but not the PHI itself.
+  auto *Scope = LI.getLoopFor(PHI->getParent());
+  if (!IsExitBlock && canSynthesize(PHI, *scop, &LI, &SE, Scope))
+    return;
+
+  // PHI nodes are modeled as if they had been demoted prior to the SCoP
+  // detection. Hence, the PHI is a load of a new memory location in which the
+  // incoming value was written at the end of the incoming basic block.
+  bool OnlyNonAffineSubRegionOperands = true;
+  for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
+    Value *Op = PHI->getIncomingValue(u);
+    BasicBlock *OpBB = PHI->getIncomingBlock(u);
+
+    // Do not build scalar dependences inside a non-affine subregion.
+    if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB))
+      continue;
+
+    OnlyNonAffineSubRegionOperands = false;
+    ensurePHIWrite(PHI, OpBB, Op, IsExitBlock);
+  }
+
+  if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
+    addPHIReadAccess(PHI);
+  }
+}
+
+void ScopBuilder::buildScalarDependences(Instruction *Inst) {
+  assert(!isa<PHINode>(Inst));
+
+  // Pull-in required operands.
+  for (Use &Op : Inst->operands())
+    ensureValueRead(Op.get(), Inst->getParent());
+}
+
+void ScopBuilder::buildEscapingDependences(Instruction *Inst) {
+  // Check for uses of this instruction outside the scop. Because we do not
+  // iterate over such instructions and therefore did not "ensure" the existence
+  // of a write, we must determine such use here.
+  for (Use &U : Inst->uses()) {
+    Instruction *UI = dyn_cast<Instruction>(U.getUser());
+    if (!UI)
+      continue;
+
+    BasicBlock *UseParent = getUseBlock(U);
+    BasicBlock *UserParent = UI->getParent();
+
+    // An escaping value is either used by an instruction not within the scop,
+    // or (when the scop region's exit needs to be simplified) by a PHI in the
+    // scop's exit block. This is because region simplification before code
+    // generation inserts new basic blocks before the PHI such that its incoming
+    // blocks are not in the scop anymore.
+    if (!scop->contains(UseParent) ||
+        (isa<PHINode>(UI) && scop->isExit(UserParent) &&
+         scop->hasSingleExitEdge())) {
+      // At least one escaping use found.
+      ensureValueWrite(Inst);
+      break;
+    }
+  }
+}
+
+bool ScopBuilder::buildAccessMultiDimFixed(MemAccInst Inst, Loop *L) {
+  Value *Val = Inst.getValueOperand();
+  Type *ElementType = Val->getType();
+  Value *Address = Inst.getPointerOperand();
+  const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
+  const SCEVUnknown *BasePointer =
+      dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
+  enum MemoryAccess::AccessType AccType =
+      isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
+
+  if (auto *BitCast = dyn_cast<BitCastInst>(Address)) {
+    auto *Src = BitCast->getOperand(0);
+    auto *SrcTy = Src->getType();
+    auto *DstTy = BitCast->getType();
+    // Do not try to delinearize non-sized (opaque) pointers.
+    if ((SrcTy->isPointerTy() && !SrcTy->getPointerElementType()->isSized()) ||
+        (DstTy->isPointerTy() && !DstTy->getPointerElementType()->isSized())) {
+      return false;
+    }
+    if (SrcTy->isPointerTy() && DstTy->isPointerTy() &&
+        DL.getTypeAllocSize(SrcTy->getPointerElementType()) ==
+            DL.getTypeAllocSize(DstTy->getPointerElementType()))
+      Address = Src;
+  }
+
+  auto *GEP = dyn_cast<GetElementPtrInst>(Address);
+  if (!GEP)
+    return false;
+
+  std::vector<const SCEV *> Subscripts;
+  std::vector<int> Sizes;
+  std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, SE);
+  auto *BasePtr = GEP->getOperand(0);
+
+  if (auto *BasePtrCast = dyn_cast<BitCastInst>(BasePtr))
+    BasePtr = BasePtrCast->getOperand(0);
+
+  // Check for identical base pointers to ensure that we do not miss index
+  // offsets that have been added before this GEP is applied.
+  if (BasePtr != BasePointer->getValue())
+    return false;
+
+  std::vector<const SCEV *> SizesSCEV;
+
+  const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
+  for (auto *Subscript : Subscripts) {
+    InvariantLoadsSetTy AccessILS;
+    if (!isAffineExpr(&scop->getRegion(), L, Subscript, SE, &AccessILS))
+      return false;
+
+    for (LoadInst *LInst : AccessILS)
+      if (!ScopRIL.count(LInst))
+        return false;
+  }
+
+  if (Sizes.empty())
+    return false;
+
+  for (auto V : Sizes)
+    SizesSCEV.push_back(SE.getSCEV(
+        ConstantInt::get(IntegerType::getInt64Ty(BasePtr->getContext()), V)));
+
+  addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
+                 Subscripts, SizesSCEV, Val);
+  return true;
+}
+
+bool ScopBuilder::buildAccessMultiDimParam(MemAccInst Inst, Loop *L) {
+  if (!PollyDelinearize)
+    return false;
+
+  Value *Address = Inst.getPointerOperand();
+  Value *Val = Inst.getValueOperand();
+  Type *ElementType = Val->getType();
+  unsigned ElementSize = DL.getTypeAllocSize(ElementType);
+  enum MemoryAccess::AccessType AccType =
+      isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
+
+  const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
+  const SCEVUnknown *BasePointer =
+      dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
+
+  assert(BasePointer && "Could not find base pointer");
+
+  auto &InsnToMemAcc = scop->getInsnToMemAccMap();
+  auto AccItr = InsnToMemAcc.find(Inst);
+  if (AccItr == InsnToMemAcc.end())
+    return false;
+
+  std::vector<const SCEV *> Sizes(
+      AccItr->second.Shape->DelinearizedSizes.begin(),
+      AccItr->second.Shape->DelinearizedSizes.end());
+  // Remove the element size. This information is already provided by the
+  // ElementSize parameter. In case the element size of this access and the
+  // element size used for delinearization differs the delinearization is
+  // incorrect. Hence, we invalidate the scop.
+  //
+  // TODO: Handle delinearization with differing element sizes.
+  auto DelinearizedSize =
+      cast<SCEVConstant>(Sizes.back())->getAPInt().getSExtValue();
+  Sizes.pop_back();
+  if (ElementSize != DelinearizedSize)
+    scop->invalidate(DELINEARIZATION, Inst->getDebugLoc());
+
+  addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
+                 AccItr->second.DelinearizedSubscripts, Sizes, Val);
+  return true;
+}
+
+bool ScopBuilder::buildAccessMemIntrinsic(MemAccInst Inst, Loop *L) {
+  auto *MemIntr = dyn_cast_or_null<MemIntrinsic>(Inst);
+
+  if (MemIntr == nullptr)
+    return false;
+
+  auto *LengthVal = SE.getSCEVAtScope(MemIntr->getLength(), L);
+  assert(LengthVal);
+
+  // Check if the length val is actually affine or if we overapproximate it
+  InvariantLoadsSetTy AccessILS;
+  const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
+  bool LengthIsAffine =
+      isAffineExpr(&scop->getRegion(), L, LengthVal, SE, &AccessILS);
+  for (LoadInst *LInst : AccessILS)
+    if (!ScopRIL.count(LInst))
+      LengthIsAffine = false;
+  if (!LengthIsAffine)
+    LengthVal = nullptr;
+
+  auto *DestPtrVal = MemIntr->getDest();
+  assert(DestPtrVal);
+
+  auto *DestAccFunc = SE.getSCEVAtScope(DestPtrVal, L);
+  assert(DestAccFunc);
+  // Ignore accesses to "NULL".
+  // TODO: We could use this to optimize the region further, e.g., intersect
+  //       the context with
+  //          isl_set_complement(isl_set_params(getDomain()))
+  //       as we know it would be undefined to execute this instruction anyway.
+  if (DestAccFunc->isZero())
+    return true;
+
+  auto *DestPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(DestAccFunc));
+  assert(DestPtrSCEV);
+  DestAccFunc = SE.getMinusSCEV(DestAccFunc, DestPtrSCEV);
+  addArrayAccess(Inst, MemoryAccess::MUST_WRITE, DestPtrSCEV->getValue(),
+                 IntegerType::getInt8Ty(DestPtrVal->getContext()), false,
+                 {DestAccFunc, LengthVal}, {}, Inst.getValueOperand());
+
+  auto *MemTrans = dyn_cast<MemTransferInst>(MemIntr);
+  if (!MemTrans)
+    return true;
+
+  auto *SrcPtrVal = MemTrans->getSource();
+  assert(SrcPtrVal);
+
+  auto *SrcAccFunc = SE.getSCEVAtScope(SrcPtrVal, L);
+  assert(SrcAccFunc);
+  // Ignore accesses to "NULL".
+  // TODO: See above TODO
+  if (SrcAccFunc->isZero())
+    return true;
+
+  auto *SrcPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(SrcAccFunc));
+  assert(SrcPtrSCEV);
+  SrcAccFunc = SE.getMinusSCEV(SrcAccFunc, SrcPtrSCEV);
+  addArrayAccess(Inst, MemoryAccess::READ, SrcPtrSCEV->getValue(),
+                 IntegerType::getInt8Ty(SrcPtrVal->getContext()), false,
+                 {SrcAccFunc, LengthVal}, {}, Inst.getValueOperand());
+
+  return true;
+}
+
+bool ScopBuilder::buildAccessCallInst(MemAccInst Inst, Loop *L) {
+  auto *CI = dyn_cast_or_null<CallInst>(Inst);
+
+  if (CI == nullptr)
+    return false;
+
+  if (CI->doesNotAccessMemory() || isIgnoredIntrinsic(CI))
+    return true;
+
+  bool ReadOnly = false;
+  auto *AF = SE.getConstant(IntegerType::getInt64Ty(CI->getContext()), 0);
+  auto *CalledFunction = CI->getCalledFunction();
+  switch (AA.getModRefBehavior(CalledFunction)) {
+  case llvm::FMRB_UnknownModRefBehavior:
+    llvm_unreachable("Unknown mod ref behaviour cannot be represented.");
+  case llvm::FMRB_DoesNotAccessMemory:
+    return true;
+  case llvm::FMRB_OnlyReadsMemory:
+    GlobalReads.push_back(CI);
+    return true;
+  case llvm::FMRB_OnlyReadsArgumentPointees:
+    ReadOnly = true;
+  // Fall through
+  case llvm::FMRB_OnlyAccessesArgumentPointees:
+    auto AccType = ReadOnly ? MemoryAccess::READ : MemoryAccess::MAY_WRITE;
+    for (const auto &Arg : CI->arg_operands()) {
+      if (!Arg->getType()->isPointerTy())
+        continue;
+
+      auto *ArgSCEV = SE.getSCEVAtScope(Arg, L);
+      if (ArgSCEV->isZero())
+        continue;
+
+      auto *ArgBasePtr = cast<SCEVUnknown>(SE.getPointerBase(ArgSCEV));
+      addArrayAccess(Inst, AccType, ArgBasePtr->getValue(),
+                     ArgBasePtr->getType(), false, {AF}, {}, CI);
+    }
+    return true;
+  }
+
+  return true;
+}
+
+void ScopBuilder::buildAccessSingleDim(MemAccInst Inst, Loop *L) {
+  Value *Address = Inst.getPointerOperand();
+  Value *Val = Inst.getValueOperand();
+  Type *ElementType = Val->getType();
+  enum MemoryAccess::AccessType AccType =
+      isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
+
+  const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
+  const SCEVUnknown *BasePointer =
+      dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
+
+  assert(BasePointer && "Could not find base pointer");
+  AccessFunction = SE.getMinusSCEV(AccessFunction, BasePointer);
+
+  // Check if the access depends on a loop contained in a non-affine subregion.
+  bool isVariantInNonAffineLoop = false;
+  SetVector<const Loop *> Loops;
+  auto &BoxedLoops = scop->getBoxedLoops();
+  findLoops(AccessFunction, Loops);
+  for (const Loop *L : Loops)
+    if (BoxedLoops.count(L))
+      isVariantInNonAffineLoop = true;
+
+  InvariantLoadsSetTy AccessILS;
+  bool IsAffine =
+      !isVariantInNonAffineLoop &&
+      isAffineExpr(&scop->getRegion(), L, AccessFunction, SE, &AccessILS);
+
+  const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
+  for (LoadInst *LInst : AccessILS)
+    if (!ScopRIL.count(LInst))
+      IsAffine = false;
+
+  if (!IsAffine && AccType == MemoryAccess::MUST_WRITE)
+    AccType = MemoryAccess::MAY_WRITE;
+
+  addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, IsAffine,
+                 {AccessFunction}, {}, Val);
+}
+
+void ScopBuilder::buildMemoryAccess(MemAccInst Inst, Loop *L) {
+
+  if (buildAccessMemIntrinsic(Inst, L))
+    return;
+
+  if (buildAccessCallInst(Inst, L))
+    return;
+
+  if (buildAccessMultiDimFixed(Inst, L))
+    return;
+
+  if (buildAccessMultiDimParam(Inst, L))
+    return;
+
+  buildAccessSingleDim(Inst, L);
+}
+
+void ScopBuilder::buildAccessFunctions(Region &SR) {
+
+  if (scop->isNonAffineSubRegion(&SR)) {
+    for (BasicBlock *BB : SR.blocks())
+      buildAccessFunctions(*BB, &SR);
+    return;
+  }
+
+  for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
+    if (I->isSubRegion())
+      buildAccessFunctions(*I->getNodeAs<Region>());
+    else
+      buildAccessFunctions(*I->getNodeAs<BasicBlock>());
+}
+
+void ScopBuilder::buildStmts(Region &SR) {
+
+  if (scop->isNonAffineSubRegion(&SR)) {
+    scop->addScopStmt(nullptr, &SR);
+    return;
+  }
+
+  for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
+    if (I->isSubRegion())
+      buildStmts(*I->getNodeAs<Region>());
+    else
+      scop->addScopStmt(I->getNodeAs<BasicBlock>(), nullptr);
+}
+
+void ScopBuilder::buildAccessFunctions(BasicBlock &BB,
+                                       Region *NonAffineSubRegion,
+                                       bool IsExitBlock) {
+  // We do not build access functions for error blocks, as they may contain
+  // instructions we can not model.
+  if (isErrorBlock(BB, scop->getRegion(), LI, DT) && !IsExitBlock)
+    return;
+
+  Loop *L = LI.getLoopFor(&BB);
+
+  for (Instruction &Inst : BB) {
+    PHINode *PHI = dyn_cast<PHINode>(&Inst);
+    if (PHI)
+      buildPHIAccesses(PHI, NonAffineSubRegion, IsExitBlock);
+
+    // For the exit block we stop modeling after the last PHI node.
+    if (!PHI && IsExitBlock)
+      break;
+
+    if (auto MemInst = MemAccInst::dyn_cast(Inst))
+      buildMemoryAccess(MemInst, L);
+
+    if (isIgnoredIntrinsic(&Inst))
+      continue;
+
+    // PHI nodes have already been modeled above and TerminatorInsts that are
+    // not part of a non-affine subregion are fully modeled and regenerated
+    // from the polyhedral domains. Hence, they do not need to be modeled as
+    // explicit data dependences.
+    if (!PHI && (!isa<TerminatorInst>(&Inst) || NonAffineSubRegion))
+      buildScalarDependences(&Inst);
+
+    if (!IsExitBlock)
+      buildEscapingDependences(&Inst);
+  }
+}
+
+MemoryAccess *ScopBuilder::addMemoryAccess(
+    BasicBlock *BB, Instruction *Inst, MemoryAccess::AccessType AccType,
+    Value *BaseAddress, Type *ElementType, bool Affine, Value *AccessValue,
+    ArrayRef<const SCEV *> Subscripts, ArrayRef<const SCEV *> Sizes,
+    ScopArrayInfo::MemoryKind Kind) {
+  ScopStmt *Stmt = scop->getStmtFor(BB);
+
+  // Do not create a memory access for anything not in the SCoP. It would be
+  // ignored anyway.
+  if (!Stmt)
+    return nullptr;
+
+  AccFuncSetType &AccList = scop->getOrCreateAccessFunctions(BB);
+  Value *BaseAddr = BaseAddress;
+  std::string BaseName = getIslCompatibleName("MemRef_", BaseAddr, "");
+
+  bool isKnownMustAccess = false;
+
+  // Accesses in single-basic block statements are always excuted.
+  if (Stmt->isBlockStmt())
+    isKnownMustAccess = true;
+
+  if (Stmt->isRegionStmt()) {
+    // Accesses that dominate the exit block of a non-affine region are always
+    // executed. In non-affine regions there may exist MK_Values that do not
+    // dominate the exit. MK_Values will always dominate the exit and MK_PHIs
+    // only if there is at most one PHI_WRITE in the non-affine region.
+    if (DT.dominates(BB, Stmt->getRegion()->getExit()))
+      isKnownMustAccess = true;
+  }
+
+  // Non-affine PHI writes do not "happen" at a particular instruction, but
+  // after exiting the statement. Therefore they are guaranteed execute and
+  // overwrite the old value.
+  if (Kind == ScopArrayInfo::MK_PHI || Kind == ScopArrayInfo::MK_ExitPHI)
+    isKnownMustAccess = true;
+
+  if (!isKnownMustAccess && AccType == MemoryAccess::MUST_WRITE)
+    AccType = MemoryAccess::MAY_WRITE;
+
+  AccList.emplace_back(Stmt, Inst, AccType, BaseAddress, ElementType, Affine,
+                       Subscripts, Sizes, AccessValue, Kind, BaseName);
+  Stmt->addAccess(&AccList.back());
+  return &AccList.back();
+}
+
+void ScopBuilder::addArrayAccess(
+    MemAccInst MemAccInst, MemoryAccess::AccessType AccType, Value *BaseAddress,
+    Type *ElementType, bool IsAffine, ArrayRef<const SCEV *> Subscripts,
+    ArrayRef<const SCEV *> Sizes, Value *AccessValue) {
+  ArrayBasePointers.insert(BaseAddress);
+  addMemoryAccess(MemAccInst->getParent(), MemAccInst, AccType, BaseAddress,
+                  ElementType, IsAffine, AccessValue, Subscripts, Sizes,
+                  ScopArrayInfo::MK_Array);
+}
+
+void ScopBuilder::ensureValueWrite(Instruction *Inst) {
+  ScopStmt *Stmt = scop->getStmtFor(Inst);
+
+  // Inst not defined within this SCoP.
+  if (!Stmt)
+    return;
+
+  // Do not process further if the instruction is already written.
+  if (Stmt->lookupValueWriteOf(Inst))
+    return;
+
+  addMemoryAccess(Inst->getParent(), Inst, MemoryAccess::MUST_WRITE, Inst,
+                  Inst->getType(), true, Inst, ArrayRef<const SCEV *>(),
+                  ArrayRef<const SCEV *>(), ScopArrayInfo::MK_Value);
+}
+
+void ScopBuilder::ensureValueRead(Value *V, BasicBlock *UserBB) {
+
+  // There cannot be an "access" for literal constants. BasicBlock references
+  // (jump destinations) also never change.
+  if ((isa<Constant>(V) && !isa<GlobalVariable>(V)) || isa<BasicBlock>(V))
+    return;
+
+  // If the instruction can be synthesized and the user is in the region we do
+  // not need to add a value dependences.
+  auto *Scope = LI.getLoopFor(UserBB);
+  if (canSynthesize(V, *scop, &LI, &SE, Scope))
+    return;
+
+  // Do not build scalar dependences for required invariant loads as we will
+  // hoist them later on anyway or drop the SCoP if we cannot.
+  auto &ScopRIL = scop->getRequiredInvariantLoads();
+  if (ScopRIL.count(dyn_cast<LoadInst>(V)))
+    return;
+
+  // Determine the ScopStmt containing the value's definition and use. There is
+  // no defining ScopStmt if the value is a function argument, a global value,
+  // or defined outside the SCoP.
+  Instruction *ValueInst = dyn_cast<Instruction>(V);
+  ScopStmt *ValueStmt = ValueInst ? scop->getStmtFor(ValueInst) : nullptr;
+
+  ScopStmt *UserStmt = scop->getStmtFor(UserBB);
+
+  // We do not model uses outside the scop.
+  if (!UserStmt)
+    return;
+
+  // Add MemoryAccess for invariant values only if requested.
+  if (!ModelReadOnlyScalars && !ValueStmt)
+    return;
+
+  // Ignore use-def chains within the same ScopStmt.
+  if (ValueStmt == UserStmt)
+    return;
+
+  // Do not create another MemoryAccess for reloading the value if one already
+  // exists.
+  if (UserStmt->lookupValueReadOf(V))
+    return;
+
+  // For exit PHIs use the MK_ExitPHI MemoryKind not MK_Value.
+  ScopArrayInfo::MemoryKind Kind = ScopArrayInfo::MK_Value;
+  if (!ValueStmt && isa<PHINode>(V))
+    Kind = ScopArrayInfo::MK_ExitPHI;
+
+  addMemoryAccess(UserBB, nullptr, MemoryAccess::READ, V, V->getType(), true, V,
+                  ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(), Kind);
+  if (ValueInst)
+    ensureValueWrite(ValueInst);
+}
+
+void ScopBuilder::ensurePHIWrite(PHINode *PHI, BasicBlock *IncomingBlock,
+                                 Value *IncomingValue, bool IsExitBlock) {
+  // As the incoming block might turn out to be an error statement ensure we
+  // will create an exit PHI SAI object. It is needed during code generation
+  // and would be created later anyway.
+  if (IsExitBlock)
+    scop->getOrCreateScopArrayInfo(PHI, PHI->getType(), {},
+                                   ScopArrayInfo::MK_ExitPHI);
+
+  ScopStmt *IncomingStmt = scop->getStmtFor(IncomingBlock);
+  if (!IncomingStmt)
+    return;
+
+  // Take care for the incoming value being available in the incoming block.
+  // This must be done before the check for multiple PHI writes because multiple
+  // exiting edges from subregion each can be the effective written value of the
+  // subregion. As such, all of them must be made available in the subregion
+  // statement.
+  ensureValueRead(IncomingValue, IncomingBlock);
+
+  // Do not add more than one MemoryAccess per PHINode and ScopStmt.
+  if (MemoryAccess *Acc = IncomingStmt->lookupPHIWriteOf(PHI)) {
+    assert(Acc->getAccessInstruction() == PHI);
+    Acc->addIncoming(IncomingBlock, IncomingValue);
+    return;
+  }
+
+  MemoryAccess *Acc = addMemoryAccess(
+      IncomingStmt->getEntryBlock(), PHI, MemoryAccess::MUST_WRITE, PHI,
+      PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
+      ArrayRef<const SCEV *>(),
+      IsExitBlock ? ScopArrayInfo::MK_ExitPHI : ScopArrayInfo::MK_PHI);
+  assert(Acc);
+  Acc->addIncoming(IncomingBlock, IncomingValue);
+}
+
+void ScopBuilder::addPHIReadAccess(PHINode *PHI) {
+  addMemoryAccess(PHI->getParent(), PHI, MemoryAccess::READ, PHI,
+                  PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
+                  ArrayRef<const SCEV *>(), ScopArrayInfo::MK_PHI);
+}
+
+void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
+  scop.reset(new Scop(R, SE, LI, *SD.getDetectionContext(&R)));
+
+  buildStmts(R);
+  buildAccessFunctions(R);
+
+  // In case the region does not have an exiting block we will later (during
+  // code generation) split the exit block. This will move potential PHI nodes
+  // from the current exit block into the new region exiting block. Hence, PHI
+  // nodes that are at this point not part of the region will be.
+  // To handle these PHI nodes later we will now model their operands as scalar
+  // accesses. Note that we do not model anything in the exit block if we have
+  // an exiting block in the region, as there will not be any splitting later.
+  if (!scop->hasSingleExitEdge())
+    buildAccessFunctions(*R.getExit(), nullptr,
+                         /* IsExitBlock */ true);
+
+  // Create memory accesses for global reads since all arrays are now known.
+  auto *AF = SE.getConstant(IntegerType::getInt64Ty(SE.getContext()), 0);
+  for (auto *GlobalRead : GlobalReads)
+    for (auto *BP : ArrayBasePointers)
+      addArrayAccess(MemAccInst(GlobalRead), MemoryAccess::READ, BP,
+                     BP->getType(), false, {AF}, {}, GlobalRead);
+
+  scop->init(AA, AC, DT, LI);
+}
+
+ScopBuilder::ScopBuilder(Region *R, AssumptionCache &AC, AliasAnalysis &AA,
+                         const DataLayout &DL, DominatorTree &DT, LoopInfo &LI,
+                         ScopDetection &SD, ScalarEvolution &SE)
+    : AA(AA), DL(DL), DT(DT), LI(LI), SD(SD), SE(SE) {
+
+  Function *F = R->getEntry()->getParent();
+
+  DebugLoc Beg, End;
+  getDebugLocations(getBBPairForRegion(R), Beg, End);
+  std::string Msg = "SCoP begins here.";
+  emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, Beg, Msg);
+
+  buildScop(*R, AC);
+
+  DEBUG(scop->print(dbgs()));
+
+  if (!scop->hasFeasibleRuntimeContext()) {
+    Msg = "SCoP ends here but was dismissed.";
+    scop.reset();
+  } else {
+    Msg = "SCoP ends here.";
+    ++ScopFound;
+    if (scop->getMaxLoopDepth() > 0)
+      ++RichScopFound;
+  }
+
+  emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, End, Msg);
+}

Modified: polly/trunk/lib/Analysis/ScopInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/Analysis/ScopInfo.cpp?rev=273982&r1=273981&r2=273982&view=diff
==============================================================================
--- polly/trunk/lib/Analysis/ScopInfo.cpp (original)
+++ polly/trunk/lib/Analysis/ScopInfo.cpp Mon Jun 27 20:37:20 2016
@@ -1,4 +1,4 @@
-//===--------- ScopInfo.cpp  - Create Scops from LLVM IR ------------------===//
+//===--------- ScopInfo.cpp ----------------------------------------------===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -60,9 +60,6 @@ using namespace polly;
 
 #define DEBUG_TYPE "polly-scops"
 
-STATISTIC(ScopFound, "Number of valid Scops");
-STATISTIC(RichScopFound, "Number of Scops containing a loop");
-
 // The maximal number of basic sets we allow during domain construction to
 // be created. More complex scops will result in very high compile time and
 // are also unlikely to result in good code
@@ -73,11 +70,6 @@ static cl::opt<bool> PollyRemarksMinimal
     cl::desc("Do not emit remarks about assumptions that are known"),
     cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
 
-static cl::opt<bool> ModelReadOnlyScalars(
-    "polly-analyze-read-only-scalars",
-    cl::desc("Model read-only scalar values in the scop description"),
-    cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::cat(PollyCategory));
-
 // Multiplicative reductions can be disabled separately as these kind of
 // operations can overflow easily. Additive reductions and bit operations
 // are in contrast pretty stable.
@@ -4185,641 +4177,6 @@ int Scop::getRelativeLoopDepth(const Loo
   return L->getLoopDepth() - OuterLoop->getLoopDepth();
 }
 
-void ScopBuilder::buildPHIAccesses(PHINode *PHI, Region *NonAffineSubRegion,
-                                   bool IsExitBlock) {
-
-  // PHI nodes that are in the exit block of the region, hence if IsExitBlock is
-  // true, are not modeled as ordinary PHI nodes as they are not part of the
-  // region. However, we model the operands in the predecessor blocks that are
-  // part of the region as regular scalar accesses.
-
-  // If we can synthesize a PHI we can skip it, however only if it is in
-  // the region. If it is not it can only be in the exit block of the region.
-  // In this case we model the operands but not the PHI itself.
-  auto *Scope = LI.getLoopFor(PHI->getParent());
-  if (!IsExitBlock && canSynthesize(PHI, *scop, &LI, &SE, Scope))
-    return;
-
-  // PHI nodes are modeled as if they had been demoted prior to the SCoP
-  // detection. Hence, the PHI is a load of a new memory location in which the
-  // incoming value was written at the end of the incoming basic block.
-  bool OnlyNonAffineSubRegionOperands = true;
-  for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
-    Value *Op = PHI->getIncomingValue(u);
-    BasicBlock *OpBB = PHI->getIncomingBlock(u);
-
-    // Do not build scalar dependences inside a non-affine subregion.
-    if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB))
-      continue;
-
-    OnlyNonAffineSubRegionOperands = false;
-    ensurePHIWrite(PHI, OpBB, Op, IsExitBlock);
-  }
-
-  if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
-    addPHIReadAccess(PHI);
-  }
-}
-
-void ScopBuilder::buildScalarDependences(Instruction *Inst) {
-  assert(!isa<PHINode>(Inst));
-
-  // Pull-in required operands.
-  for (Use &Op : Inst->operands())
-    ensureValueRead(Op.get(), Inst->getParent());
-}
-
-void ScopBuilder::buildEscapingDependences(Instruction *Inst) {
-  // Check for uses of this instruction outside the scop. Because we do not
-  // iterate over such instructions and therefore did not "ensure" the existence
-  // of a write, we must determine such use here.
-  for (Use &U : Inst->uses()) {
-    Instruction *UI = dyn_cast<Instruction>(U.getUser());
-    if (!UI)
-      continue;
-
-    BasicBlock *UseParent = getUseBlock(U);
-    BasicBlock *UserParent = UI->getParent();
-
-    // An escaping value is either used by an instruction not within the scop,
-    // or (when the scop region's exit needs to be simplified) by a PHI in the
-    // scop's exit block. This is because region simplification before code
-    // generation inserts new basic blocks before the PHI such that its incoming
-    // blocks are not in the scop anymore.
-    if (!scop->contains(UseParent) ||
-        (isa<PHINode>(UI) && scop->isExit(UserParent) &&
-         scop->hasSingleExitEdge())) {
-      // At least one escaping use found.
-      ensureValueWrite(Inst);
-      break;
-    }
-  }
-}
-
-bool ScopBuilder::buildAccessMultiDimFixed(MemAccInst Inst, Loop *L) {
-  Value *Val = Inst.getValueOperand();
-  Type *ElementType = Val->getType();
-  Value *Address = Inst.getPointerOperand();
-  const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
-  const SCEVUnknown *BasePointer =
-      dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
-  enum MemoryAccess::AccessType AccType =
-      isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
-
-  if (auto *BitCast = dyn_cast<BitCastInst>(Address)) {
-    auto *Src = BitCast->getOperand(0);
-    auto *SrcTy = Src->getType();
-    auto *DstTy = BitCast->getType();
-    // Do not try to delinearize non-sized (opaque) pointers.
-    if ((SrcTy->isPointerTy() && !SrcTy->getPointerElementType()->isSized()) ||
-        (DstTy->isPointerTy() && !DstTy->getPointerElementType()->isSized())) {
-      return false;
-    }
-    if (SrcTy->isPointerTy() && DstTy->isPointerTy() &&
-        DL.getTypeAllocSize(SrcTy->getPointerElementType()) ==
-            DL.getTypeAllocSize(DstTy->getPointerElementType()))
-      Address = Src;
-  }
-
-  auto *GEP = dyn_cast<GetElementPtrInst>(Address);
-  if (!GEP)
-    return false;
-
-  std::vector<const SCEV *> Subscripts;
-  std::vector<int> Sizes;
-  std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, SE);
-  auto *BasePtr = GEP->getOperand(0);
-
-  if (auto *BasePtrCast = dyn_cast<BitCastInst>(BasePtr))
-    BasePtr = BasePtrCast->getOperand(0);
-
-  // Check for identical base pointers to ensure that we do not miss index
-  // offsets that have been added before this GEP is applied.
-  if (BasePtr != BasePointer->getValue())
-    return false;
-
-  std::vector<const SCEV *> SizesSCEV;
-
-  const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
-  for (auto *Subscript : Subscripts) {
-    InvariantLoadsSetTy AccessILS;
-    if (!isAffineExpr(&scop->getRegion(), L, Subscript, SE, &AccessILS))
-      return false;
-
-    for (LoadInst *LInst : AccessILS)
-      if (!ScopRIL.count(LInst))
-        return false;
-  }
-
-  if (Sizes.empty())
-    return false;
-
-  for (auto V : Sizes)
-    SizesSCEV.push_back(SE.getSCEV(
-        ConstantInt::get(IntegerType::getInt64Ty(BasePtr->getContext()), V)));
-
-  addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
-                 Subscripts, SizesSCEV, Val);
-  return true;
-}
-
-bool ScopBuilder::buildAccessMultiDimParam(MemAccInst Inst, Loop *L) {
-  if (!PollyDelinearize)
-    return false;
-
-  Value *Address = Inst.getPointerOperand();
-  Value *Val = Inst.getValueOperand();
-  Type *ElementType = Val->getType();
-  unsigned ElementSize = DL.getTypeAllocSize(ElementType);
-  enum MemoryAccess::AccessType AccType =
-      isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
-
-  const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
-  const SCEVUnknown *BasePointer =
-      dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
-
-  assert(BasePointer && "Could not find base pointer");
-
-  auto &InsnToMemAcc = scop->getInsnToMemAccMap();
-  auto AccItr = InsnToMemAcc.find(Inst);
-  if (AccItr == InsnToMemAcc.end())
-    return false;
-
-  std::vector<const SCEV *> Sizes(
-      AccItr->second.Shape->DelinearizedSizes.begin(),
-      AccItr->second.Shape->DelinearizedSizes.end());
-  // Remove the element size. This information is already provided by the
-  // ElementSize parameter. In case the element size of this access and the
-  // element size used for delinearization differs the delinearization is
-  // incorrect. Hence, we invalidate the scop.
-  //
-  // TODO: Handle delinearization with differing element sizes.
-  auto DelinearizedSize =
-      cast<SCEVConstant>(Sizes.back())->getAPInt().getSExtValue();
-  Sizes.pop_back();
-  if (ElementSize != DelinearizedSize)
-    scop->invalidate(DELINEARIZATION, Inst->getDebugLoc());
-
-  addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
-                 AccItr->second.DelinearizedSubscripts, Sizes, Val);
-  return true;
-}
-
-bool ScopBuilder::buildAccessMemIntrinsic(MemAccInst Inst, Loop *L) {
-  auto *MemIntr = dyn_cast_or_null<MemIntrinsic>(Inst);
-
-  if (MemIntr == nullptr)
-    return false;
-
-  auto *LengthVal = SE.getSCEVAtScope(MemIntr->getLength(), L);
-  assert(LengthVal);
-
-  // Check if the length val is actually affine or if we overapproximate it
-  InvariantLoadsSetTy AccessILS;
-  const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
-  bool LengthIsAffine =
-      isAffineExpr(&scop->getRegion(), L, LengthVal, SE, &AccessILS);
-  for (LoadInst *LInst : AccessILS)
-    if (!ScopRIL.count(LInst))
-      LengthIsAffine = false;
-  if (!LengthIsAffine)
-    LengthVal = nullptr;
-
-  auto *DestPtrVal = MemIntr->getDest();
-  assert(DestPtrVal);
-
-  auto *DestAccFunc = SE.getSCEVAtScope(DestPtrVal, L);
-  assert(DestAccFunc);
-  // Ignore accesses to "NULL".
-  // TODO: We could use this to optimize the region further, e.g., intersect
-  //       the context with
-  //          isl_set_complement(isl_set_params(getDomain()))
-  //       as we know it would be undefined to execute this instruction anyway.
-  if (DestAccFunc->isZero())
-    return true;
-
-  auto *DestPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(DestAccFunc));
-  assert(DestPtrSCEV);
-  DestAccFunc = SE.getMinusSCEV(DestAccFunc, DestPtrSCEV);
-  addArrayAccess(Inst, MemoryAccess::MUST_WRITE, DestPtrSCEV->getValue(),
-                 IntegerType::getInt8Ty(DestPtrVal->getContext()), false,
-                 {DestAccFunc, LengthVal}, {}, Inst.getValueOperand());
-
-  auto *MemTrans = dyn_cast<MemTransferInst>(MemIntr);
-  if (!MemTrans)
-    return true;
-
-  auto *SrcPtrVal = MemTrans->getSource();
-  assert(SrcPtrVal);
-
-  auto *SrcAccFunc = SE.getSCEVAtScope(SrcPtrVal, L);
-  assert(SrcAccFunc);
-  // Ignore accesses to "NULL".
-  // TODO: See above TODO
-  if (SrcAccFunc->isZero())
-    return true;
-
-  auto *SrcPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(SrcAccFunc));
-  assert(SrcPtrSCEV);
-  SrcAccFunc = SE.getMinusSCEV(SrcAccFunc, SrcPtrSCEV);
-  addArrayAccess(Inst, MemoryAccess::READ, SrcPtrSCEV->getValue(),
-                 IntegerType::getInt8Ty(SrcPtrVal->getContext()), false,
-                 {SrcAccFunc, LengthVal}, {}, Inst.getValueOperand());
-
-  return true;
-}
-
-bool ScopBuilder::buildAccessCallInst(MemAccInst Inst, Loop *L) {
-  auto *CI = dyn_cast_or_null<CallInst>(Inst);
-
-  if (CI == nullptr)
-    return false;
-
-  if (CI->doesNotAccessMemory() || isIgnoredIntrinsic(CI))
-    return true;
-
-  bool ReadOnly = false;
-  auto *AF = SE.getConstant(IntegerType::getInt64Ty(CI->getContext()), 0);
-  auto *CalledFunction = CI->getCalledFunction();
-  switch (AA.getModRefBehavior(CalledFunction)) {
-  case llvm::FMRB_UnknownModRefBehavior:
-    llvm_unreachable("Unknown mod ref behaviour cannot be represented.");
-  case llvm::FMRB_DoesNotAccessMemory:
-    return true;
-  case llvm::FMRB_OnlyReadsMemory:
-    GlobalReads.push_back(CI);
-    return true;
-  case llvm::FMRB_OnlyReadsArgumentPointees:
-    ReadOnly = true;
-  // Fall through
-  case llvm::FMRB_OnlyAccessesArgumentPointees:
-    auto AccType = ReadOnly ? MemoryAccess::READ : MemoryAccess::MAY_WRITE;
-    for (const auto &Arg : CI->arg_operands()) {
-      if (!Arg->getType()->isPointerTy())
-        continue;
-
-      auto *ArgSCEV = SE.getSCEVAtScope(Arg, L);
-      if (ArgSCEV->isZero())
-        continue;
-
-      auto *ArgBasePtr = cast<SCEVUnknown>(SE.getPointerBase(ArgSCEV));
-      addArrayAccess(Inst, AccType, ArgBasePtr->getValue(),
-                     ArgBasePtr->getType(), false, {AF}, {}, CI);
-    }
-    return true;
-  }
-
-  return true;
-}
-
-void ScopBuilder::buildAccessSingleDim(MemAccInst Inst, Loop *L) {
-  Value *Address = Inst.getPointerOperand();
-  Value *Val = Inst.getValueOperand();
-  Type *ElementType = Val->getType();
-  enum MemoryAccess::AccessType AccType =
-      isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
-
-  const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
-  const SCEVUnknown *BasePointer =
-      dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
-
-  assert(BasePointer && "Could not find base pointer");
-  AccessFunction = SE.getMinusSCEV(AccessFunction, BasePointer);
-
-  // Check if the access depends on a loop contained in a non-affine subregion.
-  bool isVariantInNonAffineLoop = false;
-  SetVector<const Loop *> Loops;
-  auto &BoxedLoops = scop->getBoxedLoops();
-  findLoops(AccessFunction, Loops);
-  for (const Loop *L : Loops)
-    if (BoxedLoops.count(L))
-      isVariantInNonAffineLoop = true;
-
-  InvariantLoadsSetTy AccessILS;
-  bool IsAffine =
-      !isVariantInNonAffineLoop &&
-      isAffineExpr(&scop->getRegion(), L, AccessFunction, SE, &AccessILS);
-
-  const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
-  for (LoadInst *LInst : AccessILS)
-    if (!ScopRIL.count(LInst))
-      IsAffine = false;
-
-  if (!IsAffine && AccType == MemoryAccess::MUST_WRITE)
-    AccType = MemoryAccess::MAY_WRITE;
-
-  addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, IsAffine,
-                 {AccessFunction}, {}, Val);
-}
-
-void ScopBuilder::buildMemoryAccess(MemAccInst Inst, Loop *L) {
-
-  if (buildAccessMemIntrinsic(Inst, L))
-    return;
-
-  if (buildAccessCallInst(Inst, L))
-    return;
-
-  if (buildAccessMultiDimFixed(Inst, L))
-    return;
-
-  if (buildAccessMultiDimParam(Inst, L))
-    return;
-
-  buildAccessSingleDim(Inst, L);
-}
-
-void ScopBuilder::buildAccessFunctions(Region &SR) {
-
-  if (scop->isNonAffineSubRegion(&SR)) {
-    for (BasicBlock *BB : SR.blocks())
-      buildAccessFunctions(*BB, &SR);
-    return;
-  }
-
-  for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
-    if (I->isSubRegion())
-      buildAccessFunctions(*I->getNodeAs<Region>());
-    else
-      buildAccessFunctions(*I->getNodeAs<BasicBlock>());
-}
-
-void ScopBuilder::buildStmts(Region &SR) {
-
-  if (scop->isNonAffineSubRegion(&SR)) {
-    scop->addScopStmt(nullptr, &SR);
-    return;
-  }
-
-  for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
-    if (I->isSubRegion())
-      buildStmts(*I->getNodeAs<Region>());
-    else
-      scop->addScopStmt(I->getNodeAs<BasicBlock>(), nullptr);
-}
-
-void ScopBuilder::buildAccessFunctions(BasicBlock &BB,
-                                       Region *NonAffineSubRegion,
-                                       bool IsExitBlock) {
-  // We do not build access functions for error blocks, as they may contain
-  // instructions we can not model.
-  if (isErrorBlock(BB, scop->getRegion(), LI, DT) && !IsExitBlock)
-    return;
-
-  Loop *L = LI.getLoopFor(&BB);
-
-  for (Instruction &Inst : BB) {
-    PHINode *PHI = dyn_cast<PHINode>(&Inst);
-    if (PHI)
-      buildPHIAccesses(PHI, NonAffineSubRegion, IsExitBlock);
-
-    // For the exit block we stop modeling after the last PHI node.
-    if (!PHI && IsExitBlock)
-      break;
-
-    if (auto MemInst = MemAccInst::dyn_cast(Inst))
-      buildMemoryAccess(MemInst, L);
-
-    if (isIgnoredIntrinsic(&Inst))
-      continue;
-
-    // PHI nodes have already been modeled above and TerminatorInsts that are
-    // not part of a non-affine subregion are fully modeled and regenerated
-    // from the polyhedral domains. Hence, they do not need to be modeled as
-    // explicit data dependences.
-    if (!PHI && (!isa<TerminatorInst>(&Inst) || NonAffineSubRegion))
-      buildScalarDependences(&Inst);
-
-    if (!IsExitBlock)
-      buildEscapingDependences(&Inst);
-  }
-}
-
-MemoryAccess *ScopBuilder::addMemoryAccess(
-    BasicBlock *BB, Instruction *Inst, MemoryAccess::AccessType AccType,
-    Value *BaseAddress, Type *ElementType, bool Affine, Value *AccessValue,
-    ArrayRef<const SCEV *> Subscripts, ArrayRef<const SCEV *> Sizes,
-    ScopArrayInfo::MemoryKind Kind) {
-  ScopStmt *Stmt = scop->getStmtFor(BB);
-
-  // Do not create a memory access for anything not in the SCoP. It would be
-  // ignored anyway.
-  if (!Stmt)
-    return nullptr;
-
-  AccFuncSetType &AccList = scop->getOrCreateAccessFunctions(BB);
-  Value *BaseAddr = BaseAddress;
-  std::string BaseName = getIslCompatibleName("MemRef_", BaseAddr, "");
-
-  bool isKnownMustAccess = false;
-
-  // Accesses in single-basic block statements are always excuted.
-  if (Stmt->isBlockStmt())
-    isKnownMustAccess = true;
-
-  if (Stmt->isRegionStmt()) {
-    // Accesses that dominate the exit block of a non-affine region are always
-    // executed. In non-affine regions there may exist MK_Values that do not
-    // dominate the exit. MK_Values will always dominate the exit and MK_PHIs
-    // only if there is at most one PHI_WRITE in the non-affine region.
-    if (DT.dominates(BB, Stmt->getRegion()->getExit()))
-      isKnownMustAccess = true;
-  }
-
-  // Non-affine PHI writes do not "happen" at a particular instruction, but
-  // after exiting the statement. Therefore they are guaranteed execute and
-  // overwrite the old value.
-  if (Kind == ScopArrayInfo::MK_PHI || Kind == ScopArrayInfo::MK_ExitPHI)
-    isKnownMustAccess = true;
-
-  if (!isKnownMustAccess && AccType == MemoryAccess::MUST_WRITE)
-    AccType = MemoryAccess::MAY_WRITE;
-
-  AccList.emplace_back(Stmt, Inst, AccType, BaseAddress, ElementType, Affine,
-                       Subscripts, Sizes, AccessValue, Kind, BaseName);
-  Stmt->addAccess(&AccList.back());
-  return &AccList.back();
-}
-
-void ScopBuilder::addArrayAccess(
-    MemAccInst MemAccInst, MemoryAccess::AccessType AccType, Value *BaseAddress,
-    Type *ElementType, bool IsAffine, ArrayRef<const SCEV *> Subscripts,
-    ArrayRef<const SCEV *> Sizes, Value *AccessValue) {
-  ArrayBasePointers.insert(BaseAddress);
-  addMemoryAccess(MemAccInst->getParent(), MemAccInst, AccType, BaseAddress,
-                  ElementType, IsAffine, AccessValue, Subscripts, Sizes,
-                  ScopArrayInfo::MK_Array);
-}
-
-void ScopBuilder::ensureValueWrite(Instruction *Inst) {
-  ScopStmt *Stmt = scop->getStmtFor(Inst);
-
-  // Inst not defined within this SCoP.
-  if (!Stmt)
-    return;
-
-  // Do not process further if the instruction is already written.
-  if (Stmt->lookupValueWriteOf(Inst))
-    return;
-
-  addMemoryAccess(Inst->getParent(), Inst, MemoryAccess::MUST_WRITE, Inst,
-                  Inst->getType(), true, Inst, ArrayRef<const SCEV *>(),
-                  ArrayRef<const SCEV *>(), ScopArrayInfo::MK_Value);
-}
-
-void ScopBuilder::ensureValueRead(Value *V, BasicBlock *UserBB) {
-
-  // There cannot be an "access" for literal constants. BasicBlock references
-  // (jump destinations) also never change.
-  if ((isa<Constant>(V) && !isa<GlobalVariable>(V)) || isa<BasicBlock>(V))
-    return;
-
-  // If the instruction can be synthesized and the user is in the region we do
-  // not need to add a value dependences.
-  auto *Scope = LI.getLoopFor(UserBB);
-  if (canSynthesize(V, *scop, &LI, &SE, Scope))
-    return;
-
-  // Do not build scalar dependences for required invariant loads as we will
-  // hoist them later on anyway or drop the SCoP if we cannot.
-  auto &ScopRIL = scop->getRequiredInvariantLoads();
-  if (ScopRIL.count(dyn_cast<LoadInst>(V)))
-    return;
-
-  // Determine the ScopStmt containing the value's definition and use. There is
-  // no defining ScopStmt if the value is a function argument, a global value,
-  // or defined outside the SCoP.
-  Instruction *ValueInst = dyn_cast<Instruction>(V);
-  ScopStmt *ValueStmt = ValueInst ? scop->getStmtFor(ValueInst) : nullptr;
-
-  ScopStmt *UserStmt = scop->getStmtFor(UserBB);
-
-  // We do not model uses outside the scop.
-  if (!UserStmt)
-    return;
-
-  // Add MemoryAccess for invariant values only if requested.
-  if (!ModelReadOnlyScalars && !ValueStmt)
-    return;
-
-  // Ignore use-def chains within the same ScopStmt.
-  if (ValueStmt == UserStmt)
-    return;
-
-  // Do not create another MemoryAccess for reloading the value if one already
-  // exists.
-  if (UserStmt->lookupValueReadOf(V))
-    return;
-
-  // For exit PHIs use the MK_ExitPHI MemoryKind not MK_Value.
-  ScopArrayInfo::MemoryKind Kind = ScopArrayInfo::MK_Value;
-  if (!ValueStmt && isa<PHINode>(V))
-    Kind = ScopArrayInfo::MK_ExitPHI;
-
-  addMemoryAccess(UserBB, nullptr, MemoryAccess::READ, V, V->getType(), true, V,
-                  ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(), Kind);
-  if (ValueInst)
-    ensureValueWrite(ValueInst);
-}
-
-void ScopBuilder::ensurePHIWrite(PHINode *PHI, BasicBlock *IncomingBlock,
-                                 Value *IncomingValue, bool IsExitBlock) {
-  // As the incoming block might turn out to be an error statement ensure we
-  // will create an exit PHI SAI object. It is needed during code generation
-  // and would be created later anyway.
-  if (IsExitBlock)
-    scop->getOrCreateScopArrayInfo(PHI, PHI->getType(), {},
-                                   ScopArrayInfo::MK_ExitPHI);
-
-  ScopStmt *IncomingStmt = scop->getStmtFor(IncomingBlock);
-  if (!IncomingStmt)
-    return;
-
-  // Take care for the incoming value being available in the incoming block.
-  // This must be done before the check for multiple PHI writes because multiple
-  // exiting edges from subregion each can be the effective written value of the
-  // subregion. As such, all of them must be made available in the subregion
-  // statement.
-  ensureValueRead(IncomingValue, IncomingBlock);
-
-  // Do not add more than one MemoryAccess per PHINode and ScopStmt.
-  if (MemoryAccess *Acc = IncomingStmt->lookupPHIWriteOf(PHI)) {
-    assert(Acc->getAccessInstruction() == PHI);
-    Acc->addIncoming(IncomingBlock, IncomingValue);
-    return;
-  }
-
-  MemoryAccess *Acc = addMemoryAccess(
-      IncomingStmt->getEntryBlock(), PHI, MemoryAccess::MUST_WRITE, PHI,
-      PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
-      ArrayRef<const SCEV *>(),
-      IsExitBlock ? ScopArrayInfo::MK_ExitPHI : ScopArrayInfo::MK_PHI);
-  assert(Acc);
-  Acc->addIncoming(IncomingBlock, IncomingValue);
-}
-
-void ScopBuilder::addPHIReadAccess(PHINode *PHI) {
-  addMemoryAccess(PHI->getParent(), PHI, MemoryAccess::READ, PHI,
-                  PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
-                  ArrayRef<const SCEV *>(), ScopArrayInfo::MK_PHI);
-}
-
-void ScopBuilder::buildScop(Region &R, AssumptionCache &AC) {
-  scop.reset(new Scop(R, SE, LI, *SD.getDetectionContext(&R)));
-
-  buildStmts(R);
-  buildAccessFunctions(R);
-
-  // In case the region does not have an exiting block we will later (during
-  // code generation) split the exit block. This will move potential PHI nodes
-  // from the current exit block into the new region exiting block. Hence, PHI
-  // nodes that are at this point not part of the region will be.
-  // To handle these PHI nodes later we will now model their operands as scalar
-  // accesses. Note that we do not model anything in the exit block if we have
-  // an exiting block in the region, as there will not be any splitting later.
-  if (!scop->hasSingleExitEdge())
-    buildAccessFunctions(*R.getExit(), nullptr,
-                         /* IsExitBlock */ true);
-
-  // Create memory accesses for global reads since all arrays are now known.
-  auto *AF = SE.getConstant(IntegerType::getInt64Ty(SE.getContext()), 0);
-  for (auto *GlobalRead : GlobalReads)
-    for (auto *BP : ArrayBasePointers)
-      addArrayAccess(MemAccInst(GlobalRead), MemoryAccess::READ, BP,
-                     BP->getType(), false, {AF}, {}, GlobalRead);
-
-  scop->init(AA, AC, DT, LI);
-}
-
-ScopBuilder::ScopBuilder(Region *R, AssumptionCache &AC, AliasAnalysis &AA,
-                         const DataLayout &DL, DominatorTree &DT, LoopInfo &LI,
-                         ScopDetection &SD, ScalarEvolution &SE)
-    : AA(AA), DL(DL), DT(DT), LI(LI), SD(SD), SE(SE) {
-
-  Function *F = R->getEntry()->getParent();
-
-  DebugLoc Beg, End;
-  getDebugLocations(getBBPairForRegion(R), Beg, End);
-  std::string Msg = "SCoP begins here.";
-  emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, Beg, Msg);
-
-  buildScop(*R, AC);
-
-  DEBUG(scop->print(dbgs()));
-
-  if (!scop->hasFeasibleRuntimeContext()) {
-    Msg = "SCoP ends here but was dismissed.";
-    scop.reset();
-  } else {
-    Msg = "SCoP ends here.";
-    ++ScopFound;
-    if (scop->getMaxLoopDepth() > 0)
-      ++RichScopFound;
-  }
-
-  emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, End, Msg);
-}
-
 //===----------------------------------------------------------------------===//
 void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addRequired<LoopInfoWrapperPass>();

Modified: polly/trunk/lib/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/polly/trunk/lib/CMakeLists.txt?rev=273982&r1=273981&r2=273982&view=diff
==============================================================================
--- polly/trunk/lib/CMakeLists.txt (original)
+++ polly/trunk/lib/CMakeLists.txt Mon Jun 27 20:37:20 2016
@@ -29,6 +29,7 @@ add_polly_library(Polly
   Analysis/ScopDetection.cpp
   Analysis/ScopDetectionDiagnostic.cpp
   Analysis/ScopInfo.cpp
+  Analysis/ScopBuilder.cpp
   Analysis/ScopGraphPrinter.cpp
   Analysis/ScopPass.cpp
   CodeGen/BlockGenerators.cpp




More information about the llvm-commits mailing list