[llvm] r293579 - NVPTX: Move InferAddressSpaces to generic code

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 30 17:10:59 PST 2017


Author: arsenm
Date: Mon Jan 30 19:10:58 2017
New Revision: 293579

URL: http://llvm.org/viewvc/llvm-project?rev=293579&view=rev
Log:
NVPTX: Move InferAddressSpaces to generic code

Added:
    llvm/trunk/lib/Transforms/Scalar/InferAddressSpaces.cpp
      - copied, changed from r293577, llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp
Removed:
    llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp
Modified:
    llvm/trunk/include/llvm/InitializePasses.h
    llvm/trunk/include/llvm/Transforms/Scalar.h
    llvm/trunk/lib/Target/NVPTX/CMakeLists.txt
    llvm/trunk/lib/Target/NVPTX/NVPTX.h
    llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
    llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt
    llvm/trunk/lib/Transforms/Scalar/Scalar.cpp
    llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll
    llvm/trunk/test/CodeGen/NVPTX/lower-alloca.ll

Modified: llvm/trunk/include/llvm/InitializePasses.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/InitializePasses.h?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/include/llvm/InitializePasses.h (original)
+++ llvm/trunk/include/llvm/InitializePasses.h Mon Jan 30 19:10:58 2017
@@ -156,6 +156,7 @@ void initializeIfConverterPass(PassRegis
 void initializeImplicitNullChecksPass(PassRegistry&);
 void initializeIndVarSimplifyLegacyPassPass(PassRegistry&);
 void initializeInductiveRangeCheckEliminationPass(PassRegistry&);
+void initializeInferAddressSpacesPass(PassRegistry&);
 void initializeInferFunctionAttrsLegacyPassPass(PassRegistry&);
 void initializeInlineCostAnalysisPass(PassRegistry&);
 void initializeInstCountPass(PassRegistry&);

Modified: llvm/trunk/include/llvm/Transforms/Scalar.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Scalar.h?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Scalar.h (original)
+++ llvm/trunk/include/llvm/Transforms/Scalar.h Mon Jan 30 19:10:58 2017
@@ -412,6 +412,15 @@ Pass *createCorrelatedValuePropagationPa
 
 //===----------------------------------------------------------------------===//
 //
+// InferAddressSpaces - Modify users of addrspacecast instructions with values
+// in the source address space if using the destination address space is slower
+// on the target.
+//
+FunctionPass *createInferAddressSpacesPass();
+extern char &InferAddressSpacesID;
+
+//===----------------------------------------------------------------------===//
+//
 // InstructionSimplifier - Remove redundant instructions.
 //
 FunctionPass *createInstructionSimplifierPass();

Modified: llvm/trunk/lib/Target/NVPTX/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/CMakeLists.txt?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/NVPTX/CMakeLists.txt Mon Jan 30 19:10:58 2017
@@ -17,7 +17,6 @@ set(NVPTXCodeGen_sources
   NVPTXISelDAGToDAG.cpp
   NVPTXISelLowering.cpp
   NVPTXImageOptimizer.cpp
-  NVPTXInferAddressSpaces.cpp
   NVPTXInstrInfo.cpp
   NVPTXLowerAggrCopies.cpp
   NVPTXLowerArgs.cpp

Modified: llvm/trunk/lib/Target/NVPTX/NVPTX.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTX.h?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTX.h (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTX.h Mon Jan 30 19:10:58 2017
@@ -45,7 +45,6 @@ FunctionPass *createNVPTXISelDag(NVPTXTa
                                  llvm::CodeGenOpt::Level OptLevel);
 ModulePass *createNVPTXAssignValidGlobalNamesPass();
 ModulePass *createGenericToNVVMPass();
-FunctionPass *createNVPTXInferAddressSpacesPass();
 FunctionPass *createNVVMIntrRangePass(unsigned int SmVersion);
 FunctionPass *createNVVMReflectPass();
 MachineFunctionPass *createNVPTXPrologEpilogPass();

Removed: llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp?rev=293578&view=auto
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp (removed)
@@ -1,609 +0,0 @@
-//===-- NVPTXInferAddressSpace.cpp - ---------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// CUDA C/C++ includes memory space designation as variable type qualifers (such
-// as __global__ and __shared__). Knowing the space of a memory access allows
-// CUDA compilers to emit faster PTX loads and stores. For example, a load from
-// shared memory can be translated to `ld.shared` which is roughly 10% faster
-// than a generic `ld` on an NVIDIA Tesla K40c.
-//
-// Unfortunately, type qualifiers only apply to variable declarations, so CUDA
-// compilers must infer the memory space of an address expression from
-// type-qualified variables.
-//
-// LLVM IR uses non-zero (so-called) specific address spaces to represent memory
-// spaces (e.g. addrspace(3) means shared memory). The Clang frontend
-// places only type-qualified variables in specific address spaces, and then
-// conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
-// (so-called the generic address space) for other instructions to use.
-//
-// For example, the Clang translates the following CUDA code
-//   __shared__ float a[10];
-//   float v = a[i];
-// to
-//   %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
-//   %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
-//   %v = load float, float* %1 ; emits ld.f32
-// @a is in addrspace(3) since it's type-qualified, but its use from %1 is
-// redirected to %0 (the generic version of @a).
-//
-// The optimization implemented in this file propagates specific address spaces
-// from type-qualified variable declarations to its users. For example, it
-// optimizes the above IR to
-//   %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
-//   %v = load float addrspace(3)* %1 ; emits ld.shared.f32
-// propagating the addrspace(3) from @a to %1. As the result, the NVPTX
-// codegen is able to emit ld.shared.f32 for %v.
-//
-// Address space inference works in two steps. First, it uses a data-flow
-// analysis to infer as many generic pointers as possible to point to only one
-// specific address space. In the above example, it can prove that %1 only
-// points to addrspace(3). This algorithm was published in
-//   CUDA: Compiling and optimizing for a GPU platform
-//   Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
-//   ICCS 2012
-//
-// Then, address space inference replaces all refinable generic pointers with
-// equivalent specific pointers.
-//
-// The major challenge of implementing this optimization is handling PHINodes,
-// which may create loops in the data flow graph. This brings two complications.
-//
-// First, the data flow analysis in Step 1 needs to be circular. For example,
-//     %generic.input = addrspacecast float addrspace(3)* %input to float*
-//   loop:
-//     %y = phi [ %generic.input, %y2 ]
-//     %y2 = getelementptr %y, 1
-//     %v = load %y2
-//     br ..., label %loop, ...
-// proving %y specific requires proving both %generic.input and %y2 specific,
-// but proving %y2 specific circles back to %y. To address this complication,
-// the data flow analysis operates on a lattice:
-//   uninitialized > specific address spaces > generic.
-// All address expressions (our implementation only considers phi, bitcast,
-// addrspacecast, and getelementptr) start with the uninitialized address space.
-// The monotone transfer function moves the address space of a pointer down a
-// lattice path from uninitialized to specific and then to generic. A join
-// operation of two different specific address spaces pushes the expression down
-// to the generic address space. The analysis completes once it reaches a fixed
-// point.
-//
-// Second, IR rewriting in Step 2 also needs to be circular. For example,
-// converting %y to addrspace(3) requires the compiler to know the converted
-// %y2, but converting %y2 needs the converted %y. To address this complication,
-// we break these cycles using "undef" placeholders. When converting an
-// instruction `I` to a new address space, if its operand `Op` is not converted
-// yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
-// For instance, our algorithm first converts %y to
-//   %y' = phi float addrspace(3)* [ %input, undef ]
-// Then, it converts %y2 to
-//   %y2' = getelementptr %y', 1
-// Finally, it fixes the undef in %y' so that
-//   %y' = phi float addrspace(3)* [ %input, %y2' ]
-//
-//===----------------------------------------------------------------------===//
-
-#include "NVPTX.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/InstIterator.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Operator.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Transforms/Utils/ValueMapper.h"
-
-#define DEBUG_TYPE "nvptx-infer-addrspace"
-
-using namespace llvm;
-
-namespace {
-static const unsigned UnknownAddressSpace = ~0u;
-
-using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
-
-/// \brief NVPTXInferAddressSpaces
-class NVPTXInferAddressSpaces: public FunctionPass {
-  /// Target specific address space which uses of should be replaced if
-  /// possible.
-  unsigned FlatAddrSpace;
-
-public:
-  static char ID;
-
-  NVPTXInferAddressSpaces() : FunctionPass(ID) {}
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.setPreservesCFG();
-    AU.addRequired<TargetTransformInfoWrapperPass>();
-  }
-
-  bool runOnFunction(Function &F) override;
-
-private:
-  // Returns the new address space of V if updated; otherwise, returns None.
-  Optional<unsigned>
-  updateAddressSpace(const Value &V,
-                     const ValueToAddrSpaceMapTy &InferredAddrSpace) const;
-
-  // Tries to infer the specific address space of each address expression in
-  // Postorder.
-  void inferAddressSpaces(const std::vector<Value *> &Postorder,
-                          ValueToAddrSpaceMapTy *InferredAddrSpace) const;
-
-  // Changes the flat address expressions in function F to point to specific
-  // address spaces if InferredAddrSpace says so. Postorder is the postorder of
-  // all flat expressions in the use-def graph of function F.
-  bool
-  rewriteWithNewAddressSpaces(const std::vector<Value *> &Postorder,
-                              const ValueToAddrSpaceMapTy &InferredAddrSpace,
-                              Function *F) const;
-
-  void appendsFlatAddressExpressionToPostorderStack(
-    Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
-    DenseSet<Value *> *Visited) const;
-
-  std::vector<Value *> collectFlatAddressExpressions(Function &F) const;
-  Value *cloneValueWithNewAddressSpace(
-    Value *V, unsigned NewAddrSpace,
-    const ValueToValueMapTy &ValueWithNewAddrSpace,
-    SmallVectorImpl<const Use *> *UndefUsesToFix) const;
-  unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
-};
-} // end anonymous namespace
-
-char NVPTXInferAddressSpaces::ID = 0;
-
-namespace llvm {
-void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
-}
-INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace",
-                "Infer address spaces",
-                false, false)
-
-// Returns true if V is an address expression.
-// TODO: Currently, we consider only phi, bitcast, addrspacecast, and
-// getelementptr operators.
-static bool isAddressExpression(const Value &V) {
-  if (!isa<Operator>(V))
-    return false;
-
-  switch (cast<Operator>(V).getOpcode()) {
-  case Instruction::PHI:
-  case Instruction::BitCast:
-  case Instruction::AddrSpaceCast:
-  case Instruction::GetElementPtr:
-    return true;
-  default:
-    return false;
-  }
-}
-
-// Returns the pointer operands of V.
-//
-// Precondition: V is an address expression.
-static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
-  assert(isAddressExpression(V));
-  const Operator& Op = cast<Operator>(V);
-  switch (Op.getOpcode()) {
-  case Instruction::PHI: {
-    auto IncomingValues = cast<PHINode>(Op).incoming_values();
-    return SmallVector<Value *, 2>(IncomingValues.begin(),
-                                   IncomingValues.end());
-  }
-  case Instruction::BitCast:
-  case Instruction::AddrSpaceCast:
-  case Instruction::GetElementPtr:
-    return {Op.getOperand(0)};
-  default:
-    llvm_unreachable("Unexpected instruction type.");
-  }
-}
-
-// If V is an unvisited flat address expression, appends V to PostorderStack
-// and marks it as visited.
-void NVPTXInferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
-    Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
-    DenseSet<Value *> *Visited) const {
-  assert(V->getType()->isPointerTy());
-  if (isAddressExpression(*V) &&
-      V->getType()->getPointerAddressSpace() == FlatAddrSpace) {
-    if (Visited->insert(V).second)
-      PostorderStack->push_back(std::make_pair(V, false));
-  }
-}
-
-// Returns all flat address expressions in function F. The elements are ordered
-// in postorder.
-std::vector<Value *>
-NVPTXInferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
-  // This function implements a non-recursive postorder traversal of a partial
-  // use-def graph of function F.
-  std::vector<std::pair<Value*, bool>> PostorderStack;
-  // The set of visited expressions.
-  DenseSet<Value*> Visited;
-  // We only explore address expressions that are reachable from loads and
-  // stores for now because we aim at generating faster loads and stores.
-  for (Instruction &I : instructions(F)) {
-    if (isa<LoadInst>(I)) {
-      appendsFlatAddressExpressionToPostorderStack(
-          I.getOperand(0), &PostorderStack, &Visited);
-    } else if (isa<StoreInst>(I)) {
-      appendsFlatAddressExpressionToPostorderStack(
-          I.getOperand(1), &PostorderStack, &Visited);
-    }
-  }
-
-  std::vector<Value *> Postorder; // The resultant postorder.
-  while (!PostorderStack.empty()) {
-    // If the operands of the expression on the top are already explored,
-    // adds that expression to the resultant postorder.
-    if (PostorderStack.back().second) {
-      Postorder.push_back(PostorderStack.back().first);
-      PostorderStack.pop_back();
-      continue;
-    }
-    // Otherwise, adds its operands to the stack and explores them.
-    PostorderStack.back().second = true;
-    for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
-      appendsFlatAddressExpressionToPostorderStack(
-          PtrOperand, &PostorderStack, &Visited);
-    }
-  }
-  return Postorder;
-}
-
-// A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
-// of OperandUse.get() in the new address space. If the clone is not ready yet,
-// returns an undef in the new address space as a placeholder.
-static Value *operandWithNewAddressSpaceOrCreateUndef(
-    const Use &OperandUse, unsigned NewAddrSpace,
-    const ValueToValueMapTy &ValueWithNewAddrSpace,
-    SmallVectorImpl<const Use *> *UndefUsesToFix) {
-  Value *Operand = OperandUse.get();
-  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
-    return NewOperand;
-
-  UndefUsesToFix->push_back(&OperandUse);
-  return UndefValue::get(
-      Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
-}
-
-// Returns a clone of `I` with its operands converted to those specified in
-// ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
-// operand whose address space needs to be modified might not exist in
-// ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
-// adds that operand use to UndefUsesToFix so that caller can fix them later.
-//
-// Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
-// from a pointer whose type already matches. Therefore, this function returns a
-// Value* instead of an Instruction*.
-static Value *cloneInstructionWithNewAddressSpace(
-    Instruction *I, unsigned NewAddrSpace,
-    const ValueToValueMapTy &ValueWithNewAddrSpace,
-    SmallVectorImpl<const Use *> *UndefUsesToFix) {
-  Type *NewPtrType =
-      I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
-
-  if (I->getOpcode() == Instruction::AddrSpaceCast) {
-    Value *Src = I->getOperand(0);
-    // Because `I` is flat, the source address space must be specific.
-    // Therefore, the inferred address space must be the source space, according
-    // to our algorithm.
-    assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
-    if (Src->getType() != NewPtrType)
-      return new BitCastInst(Src, NewPtrType);
-    return Src;
-  }
-
-  // Computes the converted pointer operands.
-  SmallVector<Value *, 4> NewPointerOperands;
-  for (const Use &OperandUse : I->operands()) {
-    if (!OperandUse.get()->getType()->isPointerTy())
-      NewPointerOperands.push_back(nullptr);
-    else
-      NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
-          OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
-  }
-
-  switch (I->getOpcode()) {
-  case Instruction::BitCast:
-    return new BitCastInst(NewPointerOperands[0], NewPtrType);
-  case Instruction::PHI: {
-    assert(I->getType()->isPointerTy());
-    PHINode *PHI = cast<PHINode>(I);
-    PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
-    for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
-      unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
-      NewPHI->addIncoming(NewPointerOperands[OperandNo],
-                          PHI->getIncomingBlock(Index));
-    }
-    return NewPHI;
-  }
-  case Instruction::GetElementPtr: {
-    GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
-    GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
-        GEP->getSourceElementType(), NewPointerOperands[0],
-        SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
-    NewGEP->setIsInBounds(GEP->isInBounds());
-    return NewGEP;
-  }
-  default:
-    llvm_unreachable("Unexpected opcode");
-  }
-}
-
-// Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
-// constant expression `CE` with its operands replaced as specified in
-// ValueWithNewAddrSpace.
-static Value *cloneConstantExprWithNewAddressSpace(
-    ConstantExpr *CE, unsigned NewAddrSpace,
-    const ValueToValueMapTy &ValueWithNewAddrSpace) {
-  Type *TargetType =
-      CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
-
-  if (CE->getOpcode() == Instruction::AddrSpaceCast) {
-    // Because CE is flat, the source address space must be specific.
-    // Therefore, the inferred address space must be the source space according
-    // to our algorithm.
-    assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
-           NewAddrSpace);
-    return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
-  }
-
-  // Computes the operands of the new constant expression.
-  SmallVector<Constant *, 4> NewOperands;
-  for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
-    Constant *Operand = CE->getOperand(Index);
-    // If the address space of `Operand` needs to be modified, the new operand
-    // with the new address space should already be in ValueWithNewAddrSpace
-    // because (1) the constant expressions we consider (i.e. addrspacecast,
-    // bitcast, and getelementptr) do not incur cycles in the data flow graph
-    // and (2) this function is called on constant expressions in postorder.
-    if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
-      NewOperands.push_back(cast<Constant>(NewOperand));
-    } else {
-      // Otherwise, reuses the old operand.
-      NewOperands.push_back(Operand);
-    }
-  }
-
-  if (CE->getOpcode() == Instruction::GetElementPtr) {
-    // Needs to specify the source type while constructing a getelementptr
-    // constant expression.
-    return CE->getWithOperands(
-        NewOperands, TargetType, /*OnlyIfReduced=*/false,
-        NewOperands[0]->getType()->getPointerElementType());
-  }
-
-  return CE->getWithOperands(NewOperands, TargetType);
-}
-
-// Returns a clone of the value `V`, with its operands replaced as specified in
-// ValueWithNewAddrSpace. This function is called on every flat address
-// expression whose address space needs to be modified, in postorder.
-//
-// See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
-Value *NVPTXInferAddressSpaces::cloneValueWithNewAddressSpace(
-  Value *V, unsigned NewAddrSpace,
-  const ValueToValueMapTy &ValueWithNewAddrSpace,
-  SmallVectorImpl<const Use *> *UndefUsesToFix) const {
-  // All values in Postorder are flat address expressions.
-  assert(isAddressExpression(*V) &&
-         V->getType()->getPointerAddressSpace() == FlatAddrSpace);
-
-  if (Instruction *I = dyn_cast<Instruction>(V)) {
-    Value *NewV = cloneInstructionWithNewAddressSpace(
-        I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
-    if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
-      if (NewI->getParent() == nullptr) {
-        NewI->insertBefore(I);
-        NewI->takeName(I);
-      }
-    }
-    return NewV;
-  }
-
-  return cloneConstantExprWithNewAddressSpace(
-      cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
-}
-
-// Defines the join operation on the address space lattice (see the file header
-// comments).
-unsigned NVPTXInferAddressSpaces::joinAddressSpaces(unsigned AS1,
-                                                    unsigned AS2) const {
-  if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
-    return FlatAddrSpace;
-
-  if (AS1 == UnknownAddressSpace)
-    return AS2;
-  if (AS2 == UnknownAddressSpace)
-    return AS1;
-
-  // The join of two different specific address spaces is flat.
-  return (AS1 == AS2) ? AS1 : FlatAddrSpace;
-}
-
-bool NVPTXInferAddressSpaces::runOnFunction(Function &F) {
-  if (skipFunction(F))
-    return false;
-
-  const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
-  FlatAddrSpace = TTI.getFlatAddressSpace();
-  if (FlatAddrSpace == UnknownAddressSpace)
-    return false;
-
-  // Collects all flat address expressions in postorder.
-  std::vector<Value *> Postorder = collectFlatAddressExpressions(F);
-
-  // Runs a data-flow analysis to refine the address spaces of every expression
-  // in Postorder.
-  ValueToAddrSpaceMapTy InferredAddrSpace;
-  inferAddressSpaces(Postorder, &InferredAddrSpace);
-
-  // Changes the address spaces of the flat address expressions who are inferred
-  // to point to a specific address space.
-  return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
-}
-
-void NVPTXInferAddressSpaces::inferAddressSpaces(
-    const std::vector<Value *> &Postorder,
-    ValueToAddrSpaceMapTy *InferredAddrSpace) const {
-  SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
-  // Initially, all expressions are in the uninitialized address space.
-  for (Value *V : Postorder)
-    (*InferredAddrSpace)[V] = UnknownAddressSpace;
-
-  while (!Worklist.empty()) {
-    Value* V = Worklist.pop_back_val();
-
-    // Tries to update the address space of the stack top according to the
-    // address spaces of its operands.
-    DEBUG(dbgs() << "Updating the address space of\n  " << *V << '\n');
-    Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
-    if (!NewAS.hasValue())
-      continue;
-    // If any updates are made, grabs its users to the worklist because
-    // their address spaces can also be possibly updated.
-    DEBUG(dbgs() << "  to " << NewAS.getValue() << '\n');
-    (*InferredAddrSpace)[V] = NewAS.getValue();
-
-    for (Value *User : V->users()) {
-      // Skip if User is already in the worklist.
-      if (Worklist.count(User))
-        continue;
-
-      auto Pos = InferredAddrSpace->find(User);
-      // Our algorithm only updates the address spaces of flat address
-      // expressions, which are those in InferredAddrSpace.
-      if (Pos == InferredAddrSpace->end())
-        continue;
-
-      // Function updateAddressSpace moves the address space down a lattice
-      // path. Therefore, nothing to do if User is already inferred as flat
-      // (the bottom element in the lattice).
-      if (Pos->second == FlatAddrSpace)
-        continue;
-
-      Worklist.insert(User);
-    }
-  }
-}
-
-Optional<unsigned> NVPTXInferAddressSpaces::updateAddressSpace(
-    const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
-  assert(InferredAddrSpace.count(&V));
-
-  // The new inferred address space equals the join of the address spaces
-  // of all its pointer operands.
-  unsigned NewAS = UnknownAddressSpace;
-  for (Value *PtrOperand : getPointerOperands(V)) {
-    unsigned OperandAS;
-    if (InferredAddrSpace.count(PtrOperand))
-      OperandAS = InferredAddrSpace.lookup(PtrOperand);
-    else
-      OperandAS = PtrOperand->getType()->getPointerAddressSpace();
-    NewAS = joinAddressSpaces(NewAS, OperandAS);
-    // join(flat, *) = flat. So we can break if NewAS is already generic.
-    if (NewAS == FlatAddrSpace)
-      break;
-  }
-
-  unsigned OldAS = InferredAddrSpace.lookup(&V);
-  assert(OldAS != FlatAddrSpace);
-  if (OldAS == NewAS)
-    return None;
-  return NewAS;
-}
-
-bool NVPTXInferAddressSpaces::rewriteWithNewAddressSpaces(
-    const std::vector<Value *> &Postorder,
-    const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
-  // For each address expression to be modified, creates a clone of it with its
-  // pointer operands converted to the new address space. Since the pointer
-  // operands are converted, the clone is naturally in the new address space by
-  // construction.
-  ValueToValueMapTy ValueWithNewAddrSpace;
-  SmallVector<const Use *, 32> UndefUsesToFix;
-  for (Value* V : Postorder) {
-    unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
-    if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
-      ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
-          V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
-    }
-  }
-
-  if (ValueWithNewAddrSpace.empty())
-    return false;
-
-  // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
-  for (const Use* UndefUse : UndefUsesToFix) {
-    User *V = UndefUse->getUser();
-    User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
-    unsigned OperandNo = UndefUse->getOperandNo();
-    assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
-    NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
-  }
-
-  // Replaces the uses of the old address expressions with the new ones.
-  for (Value *V : Postorder) {
-    Value *NewV = ValueWithNewAddrSpace.lookup(V);
-    if (NewV == nullptr)
-      continue;
-
-    SmallVector<Use *, 4> Uses;
-    for (Use &U : V->uses())
-      Uses.push_back(&U);
-
-    DEBUG(dbgs() << "Replacing the uses of " << *V
-                 << "\n  with\n  " << *NewV << '\n');
-
-    for (Use *U : Uses) {
-      if (isa<LoadInst>(U->getUser()) ||
-          (isa<StoreInst>(U->getUser()) &&
-           U->getOperandNo() == StoreInst::getPointerOperandIndex())) {
-        // If V is used as the pointer operand of a load/store, sets the pointer
-        // operand to NewV. This replacement does not change the element type,
-        // so the resultant load/store is still valid.
-        U->set(NewV);
-      } else if (isa<Instruction>(U->getUser())) {
-        // Otherwise, replaces the use with generic(NewV).
-        // TODO: Some optimization opportunities are missed. For example, in
-        //   %0 = icmp eq float* %p, %q
-        // if both p and q are inferred to be shared, we can rewrite %0 as
-        //   %0 = icmp eq float addrspace(3)* %new_p, %new_q
-        // instead of currently
-        //   %generic_p = addrspacecast float addrspace(3)* %new_p to float*
-        //   %generic_q = addrspacecast float addrspace(3)* %new_q to float*
-        //   %0 = icmp eq float* %generic_p, %generic_q
-        if (Instruction *I = dyn_cast<Instruction>(V)) {
-          BasicBlock::iterator InsertPos = std::next(I->getIterator());
-          while (isa<PHINode>(InsertPos))
-            ++InsertPos;
-          U->set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
-        } else {
-          U->set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
-                                                V->getType()));
-        }
-      }
-    }
-    if (V->use_empty())
-      RecursivelyDeleteTriviallyDeadInstructions(V);
-  }
-
-  return true;
-}
-
-FunctionPass *llvm::createNVPTXInferAddressSpacesPass() {
-  return new NVPTXInferAddressSpaces();
-}

Modified: llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp Mon Jan 30 19:10:58 2017
@@ -51,7 +51,6 @@ void initializeNVVMReflectPass(PassRegis
 void initializeGenericToNVVMPass(PassRegistry&);
 void initializeNVPTXAllocaHoistingPass(PassRegistry &);
 void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&);
-void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
 void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
 void initializeNVPTXLowerArgsPass(PassRegistry &);
 void initializeNVPTXLowerAllocaPass(PassRegistry &);
@@ -71,7 +70,6 @@ extern "C" void LLVMInitializeNVPTXTarge
   initializeGenericToNVVMPass(PR);
   initializeNVPTXAllocaHoistingPass(PR);
   initializeNVPTXAssignValidGlobalNamesPass(PR);
-  initializeNVPTXInferAddressSpacesPass(PR);
   initializeNVPTXLowerArgsPass(PR);
   initializeNVPTXLowerAllocaPass(PR);
   initializeNVPTXLowerAggrCopiesPass(PR);
@@ -195,7 +193,7 @@ void NVPTXPassConfig::addAddressSpaceInf
   // be eliminated by SROA.
   addPass(createSROAPass());
   addPass(createNVPTXLowerAllocaPass());
-  addPass(createNVPTXInferAddressSpacesPass());
+  addPass(createInferAddressSpacesPass());
 }
 
 void NVPTXPassConfig::addStraightLineScalarOptimizationPasses() {

Modified: llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt (original)
+++ llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt Mon Jan 30 19:10:58 2017
@@ -16,6 +16,7 @@ add_llvm_library(LLVMScalarOpts
   IVUsersPrinter.cpp
   InductiveRangeCheckElimination.cpp
   IndVarSimplify.cpp
+  InferAddressSpaces.cpp
   JumpThreading.cpp
   LICM.cpp
   LoopAccessAnalysisPrinter.cpp

Copied: llvm/trunk/lib/Transforms/Scalar/InferAddressSpaces.cpp (from r293577, llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/InferAddressSpaces.cpp?p2=llvm/trunk/lib/Transforms/Scalar/InferAddressSpaces.cpp&p1=llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp&r1=293577&r2=293579&rev=293579&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/InferAddressSpaces.cpp Mon Jan 30 19:10:58 2017
@@ -89,7 +89,7 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "NVPTX.h"
+#include "llvm/Transforms/Scalar.h"
 #include "llvm/ADT/DenseSet.h"
 #include "llvm/ADT/Optional.h"
 #include "llvm/ADT/SetVector.h"
@@ -103,7 +103,7 @@
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/Transforms/Utils/ValueMapper.h"
 
-#define DEBUG_TYPE "nvptx-infer-addrspace"
+#define DEBUG_TYPE "infer-address-spaces"
 
 using namespace llvm;
 
@@ -112,8 +112,8 @@ static const unsigned UnknownAddressSpac
 
 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
 
-/// \brief NVPTXInferAddressSpaces
-class NVPTXInferAddressSpaces: public FunctionPass {
+/// \brief InferAddressSpaces
+class InferAddressSpaces: public FunctionPass {
   /// Target specific address space which uses of should be replaced if
   /// possible.
   unsigned FlatAddrSpace;
@@ -121,7 +121,7 @@ class NVPTXInferAddressSpaces: public Fu
 public:
   static char ID;
 
-  NVPTXInferAddressSpaces() : FunctionPass(ID) {}
+  InferAddressSpaces() : FunctionPass(ID) {}
 
   void getAnalysisUsage(AnalysisUsage &AU) const override {
     AU.setPreservesCFG();
@@ -162,13 +162,13 @@ private:
 };
 } // end anonymous namespace
 
-char NVPTXInferAddressSpaces::ID = 0;
+char InferAddressSpaces::ID = 0;
 
 namespace llvm {
-void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
+void initializeInferAddressSpacesPass(PassRegistry &);
 }
-INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace",
-                "Infer address spaces",
+
+INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
                 false, false)
 
 // Returns true if V is an address expression.
@@ -212,9 +212,9 @@ static SmallVector<Value *, 2> getPointe
 
 // If V is an unvisited flat address expression, appends V to PostorderStack
 // and marks it as visited.
-void NVPTXInferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
-    Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
-    DenseSet<Value *> *Visited) const {
+void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
+  Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
+  DenseSet<Value *> *Visited) const {
   assert(V->getType()->isPointerTy());
   if (isAddressExpression(*V) &&
       V->getType()->getPointerAddressSpace() == FlatAddrSpace) {
@@ -226,7 +226,7 @@ void NVPTXInferAddressSpaces::appendsFla
 // Returns all flat address expressions in function F. The elements are ordered
 // in postorder.
 std::vector<Value *>
-NVPTXInferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
+InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
   // This function implements a non-recursive postorder traversal of a partial
   // use-def graph of function F.
   std::vector<std::pair<Value*, bool>> PostorderStack;
@@ -237,10 +237,10 @@ NVPTXInferAddressSpaces::collectFlatAddr
   for (Instruction &I : instructions(F)) {
     if (isa<LoadInst>(I)) {
       appendsFlatAddressExpressionToPostorderStack(
-          I.getOperand(0), &PostorderStack, &Visited);
+        I.getOperand(0), &PostorderStack, &Visited);
     } else if (isa<StoreInst>(I)) {
       appendsFlatAddressExpressionToPostorderStack(
-          I.getOperand(1), &PostorderStack, &Visited);
+        I.getOperand(1), &PostorderStack, &Visited);
     }
   }
 
@@ -257,7 +257,7 @@ NVPTXInferAddressSpaces::collectFlatAddr
     PostorderStack.back().second = true;
     for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
       appendsFlatAddressExpressionToPostorderStack(
-          PtrOperand, &PostorderStack, &Visited);
+        PtrOperand, &PostorderStack, &Visited);
     }
   }
   return Postorder;
@@ -267,16 +267,16 @@ NVPTXInferAddressSpaces::collectFlatAddr
 // of OperandUse.get() in the new address space. If the clone is not ready yet,
 // returns an undef in the new address space as a placeholder.
 static Value *operandWithNewAddressSpaceOrCreateUndef(
-    const Use &OperandUse, unsigned NewAddrSpace,
-    const ValueToValueMapTy &ValueWithNewAddrSpace,
-    SmallVectorImpl<const Use *> *UndefUsesToFix) {
+  const Use &OperandUse, unsigned NewAddrSpace,
+  const ValueToValueMapTy &ValueWithNewAddrSpace,
+  SmallVectorImpl<const Use *> *UndefUsesToFix) {
   Value *Operand = OperandUse.get();
   if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
     return NewOperand;
 
   UndefUsesToFix->push_back(&OperandUse);
   return UndefValue::get(
-      Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
+    Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
 }
 
 // Returns a clone of `I` with its operands converted to those specified in
@@ -289,11 +289,11 @@ static Value *operandWithNewAddressSpace
 // from a pointer whose type already matches. Therefore, this function returns a
 // Value* instead of an Instruction*.
 static Value *cloneInstructionWithNewAddressSpace(
-    Instruction *I, unsigned NewAddrSpace,
-    const ValueToValueMapTy &ValueWithNewAddrSpace,
-    SmallVectorImpl<const Use *> *UndefUsesToFix) {
+  Instruction *I, unsigned NewAddrSpace,
+  const ValueToValueMapTy &ValueWithNewAddrSpace,
+  SmallVectorImpl<const Use *> *UndefUsesToFix) {
   Type *NewPtrType =
-      I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
+    I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
 
   if (I->getOpcode() == Instruction::AddrSpaceCast) {
     Value *Src = I->getOperand(0);
@@ -313,7 +313,7 @@ static Value *cloneInstructionWithNewAdd
       NewPointerOperands.push_back(nullptr);
     else
       NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
-          OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
+                                     OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
   }
 
   switch (I->getOpcode()) {
@@ -333,8 +333,8 @@ static Value *cloneInstructionWithNewAdd
   case Instruction::GetElementPtr: {
     GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
     GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
-        GEP->getSourceElementType(), NewPointerOperands[0],
-        SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
+      GEP->getSourceElementType(), NewPointerOperands[0],
+      SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
     NewGEP->setIsInBounds(GEP->isInBounds());
     return NewGEP;
   }
@@ -347,10 +347,10 @@ static Value *cloneInstructionWithNewAdd
 // constant expression `CE` with its operands replaced as specified in
 // ValueWithNewAddrSpace.
 static Value *cloneConstantExprWithNewAddressSpace(
-    ConstantExpr *CE, unsigned NewAddrSpace,
-    const ValueToValueMapTy &ValueWithNewAddrSpace) {
+  ConstantExpr *CE, unsigned NewAddrSpace,
+  const ValueToValueMapTy &ValueWithNewAddrSpace) {
   Type *TargetType =
-      CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
+    CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
 
   if (CE->getOpcode() == Instruction::AddrSpaceCast) {
     // Because CE is flat, the source address space must be specific.
@@ -382,8 +382,8 @@ static Value *cloneConstantExprWithNewAd
     // Needs to specify the source type while constructing a getelementptr
     // constant expression.
     return CE->getWithOperands(
-        NewOperands, TargetType, /*OnlyIfReduced=*/false,
-        NewOperands[0]->getType()->getPointerElementType());
+      NewOperands, TargetType, /*OnlyIfReduced=*/false,
+      NewOperands[0]->getType()->getPointerElementType());
   }
 
   return CE->getWithOperands(NewOperands, TargetType);
@@ -394,7 +394,7 @@ static Value *cloneConstantExprWithNewAd
 // expression whose address space needs to be modified, in postorder.
 //
 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
-Value *NVPTXInferAddressSpaces::cloneValueWithNewAddressSpace(
+Value *InferAddressSpaces::cloneValueWithNewAddressSpace(
   Value *V, unsigned NewAddrSpace,
   const ValueToValueMapTy &ValueWithNewAddrSpace,
   SmallVectorImpl<const Use *> *UndefUsesToFix) const {
@@ -404,7 +404,7 @@ Value *NVPTXInferAddressSpaces::cloneVal
 
   if (Instruction *I = dyn_cast<Instruction>(V)) {
     Value *NewV = cloneInstructionWithNewAddressSpace(
-        I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
+      I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
     if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
       if (NewI->getParent() == nullptr) {
         NewI->insertBefore(I);
@@ -415,13 +415,13 @@ Value *NVPTXInferAddressSpaces::cloneVal
   }
 
   return cloneConstantExprWithNewAddressSpace(
-      cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
+    cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
 }
 
 // Defines the join operation on the address space lattice (see the file header
 // comments).
-unsigned NVPTXInferAddressSpaces::joinAddressSpaces(unsigned AS1,
-                                                    unsigned AS2) const {
+unsigned InferAddressSpaces::joinAddressSpaces(unsigned AS1,
+                                               unsigned AS2) const {
   if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
     return FlatAddrSpace;
 
@@ -434,7 +434,7 @@ unsigned NVPTXInferAddressSpaces::joinAd
   return (AS1 == AS2) ? AS1 : FlatAddrSpace;
 }
 
-bool NVPTXInferAddressSpaces::runOnFunction(Function &F) {
+bool InferAddressSpaces::runOnFunction(Function &F) {
   if (skipFunction(F))
     return false;
 
@@ -456,9 +456,9 @@ bool NVPTXInferAddressSpaces::runOnFunct
   return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
 }
 
-void NVPTXInferAddressSpaces::inferAddressSpaces(
-    const std::vector<Value *> &Postorder,
-    ValueToAddrSpaceMapTy *InferredAddrSpace) const {
+void InferAddressSpaces::inferAddressSpaces(
+  const std::vector<Value *> &Postorder,
+  ValueToAddrSpaceMapTy *InferredAddrSpace) const {
   SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
   // Initially, all expressions are in the uninitialized address space.
   for (Value *V : Postorder)
@@ -490,8 +490,8 @@ void NVPTXInferAddressSpaces::inferAddre
         continue;
 
       // Function updateAddressSpace moves the address space down a lattice
-      // path. Therefore, nothing to do if User is already inferred as flat
-      // (the bottom element in the lattice).
+      // path. Therefore, nothing to do if User is already inferred as flat (the
+      // bottom element in the lattice).
       if (Pos->second == FlatAddrSpace)
         continue;
 
@@ -500,8 +500,8 @@ void NVPTXInferAddressSpaces::inferAddre
   }
 }
 
-Optional<unsigned> NVPTXInferAddressSpaces::updateAddressSpace(
-    const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
+Optional<unsigned> InferAddressSpaces::updateAddressSpace(
+  const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
   assert(InferredAddrSpace.count(&V));
 
   // The new inferred address space equals the join of the address spaces
@@ -514,7 +514,8 @@ Optional<unsigned> NVPTXInferAddressSpac
     else
       OperandAS = PtrOperand->getType()->getPointerAddressSpace();
     NewAS = joinAddressSpaces(NewAS, OperandAS);
-    // join(flat, *) = flat. So we can break if NewAS is already generic.
+
+    // join(flat, *) = flat. So we can break if NewAS is already flat.
     if (NewAS == FlatAddrSpace)
       break;
   }
@@ -526,9 +527,9 @@ Optional<unsigned> NVPTXInferAddressSpac
   return NewAS;
 }
 
-bool NVPTXInferAddressSpaces::rewriteWithNewAddressSpaces(
-    const std::vector<Value *> &Postorder,
-    const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
+bool InferAddressSpaces::rewriteWithNewAddressSpaces(
+  const std::vector<Value *> &Postorder,
+  const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
   // For each address expression to be modified, creates a clone of it with its
   // pointer operands converted to the new address space. Since the pointer
   // operands are converted, the clone is naturally in the new address space by
@@ -539,7 +540,7 @@ bool NVPTXInferAddressSpaces::rewriteWit
     unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
     if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
       ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
-          V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
+        V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
     }
   }
 
@@ -577,15 +578,15 @@ bool NVPTXInferAddressSpaces::rewriteWit
         // so the resultant load/store is still valid.
         U->set(NewV);
       } else if (isa<Instruction>(U->getUser())) {
-        // Otherwise, replaces the use with generic(NewV).
+        // Otherwise, replaces the use with flat(NewV).
         // TODO: Some optimization opportunities are missed. For example, in
         //   %0 = icmp eq float* %p, %q
         // if both p and q are inferred to be shared, we can rewrite %0 as
         //   %0 = icmp eq float addrspace(3)* %new_p, %new_q
         // instead of currently
-        //   %generic_p = addrspacecast float addrspace(3)* %new_p to float*
-        //   %generic_q = addrspacecast float addrspace(3)* %new_q to float*
-        //   %0 = icmp eq float* %generic_p, %generic_q
+        //   %flat_p = addrspacecast float addrspace(3)* %new_p to float*
+        //   %flat_q = addrspacecast float addrspace(3)* %new_q to float*
+        //   %0 = icmp eq float* %flat_p, %flat_q
         if (Instruction *I = dyn_cast<Instruction>(V)) {
           BasicBlock::iterator InsertPos = std::next(I->getIterator());
           while (isa<PHINode>(InsertPos))
@@ -604,6 +605,6 @@ bool NVPTXInferAddressSpaces::rewriteWit
   return true;
 }
 
-FunctionPass *llvm::createNVPTXInferAddressSpacesPass() {
-  return new NVPTXInferAddressSpaces();
+FunctionPass *llvm::createInferAddressSpacesPass() {
+  return new InferAddressSpaces();
 }

Modified: llvm/trunk/lib/Transforms/Scalar/Scalar.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/Scalar.cpp?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/Scalar.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/Scalar.cpp Mon Jan 30 19:10:58 2017
@@ -50,6 +50,7 @@ void llvm::initializeScalarOpts(PassRegi
   initializeFlattenCFGPassPass(Registry);
   initializeInductiveRangeCheckEliminationPass(Registry);
   initializeIndVarSimplifyLegacyPassPass(Registry);
+  initializeInferAddressSpacesPass(Registry);
   initializeJumpThreadingPass(Registry);
   initializeLegacyLICMPassPass(Registry);
   initializeLegacyLoopSinkPassPass(Registry);

Modified: llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll (original)
+++ llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll Mon Jan 30 19:10:58 2017
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX
 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX
-; RUN: opt -mtriple=nvptx-- < %s -S -nvptx-infer-addrspace | FileCheck %s --check-prefix IR
-; RUN: opt -mtriple=nvptx64-- < %s -S -nvptx-infer-addrspace | FileCheck %s --check-prefix IR
+; RUN: opt -mtriple=nvptx-- < %s -S -infer-address-spaces | FileCheck %s --check-prefix IR
+; RUN: opt -mtriple=nvptx64-- < %s -S -infer-address-spaces | FileCheck %s --check-prefix IR
 
 @array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
 @scalar = internal addrspace(3) global float 0.000000e+00, align 4

Modified: llvm/trunk/test/CodeGen/NVPTX/lower-alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/NVPTX/lower-alloca.ll?rev=293579&r1=293578&r2=293579&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/NVPTX/lower-alloca.ll (original)
+++ llvm/trunk/test/CodeGen/NVPTX/lower-alloca.ll Mon Jan 30 19:10:58 2017
@@ -1,4 +1,4 @@
-; RUN: opt < %s -S -nvptx-lower-alloca -nvptx-infer-addrspace | FileCheck %s
+; RUN: opt < %s -S -nvptx-lower-alloca -infer-address-spaces | FileCheck %s
 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s --check-prefix PTX
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"




More information about the llvm-commits mailing list