[llvm] r263916 - [NVPTX] Adds a new address space inference pass.

Jingyue Wu via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 20 13:59:21 PDT 2016


Author: jingyue
Date: Sun Mar 20 15:59:20 2016
New Revision: 263916

URL: http://llvm.org/viewvc/llvm-project?rev=263916&view=rev
Log:
[NVPTX] Adds a new address space inference pass.

Summary:
The old address space inference pass (NVPTXFavorNonGenericAddrSpaces) is unable
to convert the address space of a pointer induction variable. This patch adds a
new pass called NVPTXInferAddressSpaces that overcomes that limitation using a
fixed-point data-flow analysis (see the file header comments for details).

The new pass is experimental and not enabled by default. Users can turn
it on by setting the -nvptx-use-infer-addrspace flag of llc.

Reviewers: jholewinski, tra, jlebar

Subscribers: jholewinski, llvm-commits

Differential Revision: http://reviews.llvm.org/D17965

Added:
    llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp
Modified:
    llvm/trunk/lib/Target/NVPTX/CMakeLists.txt
    llvm/trunk/lib/Target/NVPTX/NVPTX.h
    llvm/trunk/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
    llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
    llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll

Modified: llvm/trunk/lib/Target/NVPTX/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/CMakeLists.txt?rev=263916&r1=263915&r2=263916&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/NVPTX/CMakeLists.txt Sun Mar 20 15:59:20 2016
@@ -18,6 +18,7 @@ set(NVPTXCodeGen_sources
   NVPTXISelDAGToDAG.cpp
   NVPTXISelLowering.cpp
   NVPTXImageOptimizer.cpp
+  NVPTXInferAddressSpaces.cpp
   NVPTXInstrInfo.cpp
   NVPTXLowerAggrCopies.cpp
   NVPTXLowerKernelArgs.cpp

Modified: llvm/trunk/lib/Target/NVPTX/NVPTX.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTX.h?rev=263916&r1=263915&r2=263916&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTX.h (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTX.h Sun Mar 20 15:59:20 2016
@@ -46,6 +46,7 @@ FunctionPass *createNVPTXISelDag(NVPTXTa
 ModulePass *createNVPTXAssignValidGlobalNamesPass();
 ModulePass *createGenericToNVVMPass();
 FunctionPass *createNVPTXFavorNonGenericAddrSpacesPass();
+FunctionPass *createNVPTXInferAddressSpacesPass();
 ModulePass *createNVVMReflectPass();
 ModulePass *createNVVMReflectPass(const StringMap<int>& Mapping);
 MachineFunctionPass *createNVPTXPrologEpilogPass();

Modified: llvm/trunk/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp?rev=263916&r1=263915&r2=263916&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp Sun Mar 20 15:59:20 2016
@@ -7,6 +7,9 @@
 //
 //===----------------------------------------------------------------------===//
 //
+// FIXME: This pass is deprecated in favor of NVPTXInferAddressSpaces, which
+// uses a new algorithm that handles pointer induction variables.
+//
 // When a load/store accesses the generic address space, checks whether the
 // address is casted from a non-generic address space. If so, remove this
 // addrspacecast because accessing non-generic address spaces is typically
@@ -164,8 +167,8 @@ Value *NVPTXFavorNonGenericAddrSpaces::h
         GEP->getSourceElementType(), Cast->getOperand(0), Indices,
         "", GEPI);
     NewGEP->setIsInBounds(GEP->isInBounds());
+    NewGEP->takeName(GEP);
     NewASC = new AddrSpaceCastInst(NewGEP, GEP->getType(), "", GEPI);
-    NewASC->takeName(GEP);
     // Without RAUWing GEP, the compiler would visit GEP again and emit
     // redundant instructions. This is exercised in test @rauw in
     // access-non-generic.ll.

Added: llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp?rev=263916&view=auto
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp (added)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXInferAddressSpaces.cpp Sun Mar 20 15:59:20 2016
@@ -0,0 +1,584 @@
+//===-- NVPTXInferAddressSpace.cpp - ---------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CUDA C/C++ includes memory space designation as variable type qualifers (such
+// as __global__ and __shared__). Knowing the space of a memory access allows
+// CUDA compilers to emit faster PTX loads and stores. For example, a load from
+// shared memory can be translated to `ld.shared` which is roughly 10% faster
+// than a generic `ld` on an NVIDIA Tesla K40c.
+//
+// Unfortunately, type qualifiers only apply to variable declarations, so CUDA
+// compilers must infer the memory space of an address expression from
+// type-qualified variables.
+//
+// LLVM IR uses non-zero (so-called) specific address spaces to represent memory
+// spaces (e.g. addrspace(3) means shared memory). The Clang frontend
+// places only type-qualified variables in specific address spaces, and then
+// conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
+// (so-called the generic address space) for other instructions to use.
+//
+// For example, the Clang translates the following CUDA code
+//   __shared__ float a[10];
+//   float v = a[i];
+// to
+//   %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
+//   %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
+//   %v = load float, float* %1 ; emits ld.f32
+// @a is in addrspace(3) since it's type-qualified, but its use from %1 is
+// redirected to %0 (the generic version of @a).
+//
+// The optimization implemented in this file propagates specific address spaces
+// from type-qualified variable declarations to its users. For example, it
+// optimizes the above IR to
+//   %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
+//   %v = load float addrspace(3)* %1 ; emits ld.shared.f32
+// propagating the addrspace(3) from @a to %1. As the result, the NVPTX
+// codegen is able to emit ld.shared.f32 for %v.
+//
+// Address space inference works in two steps. First, it uses a data-flow
+// analysis to infer as many generic pointers as possible to point to only one
+// specific address space. In the above example, it can prove that %1 only
+// points to addrspace(3). This algorithm was published in
+//   CUDA: Compiling and optimizing for a GPU platform
+//   Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
+//   ICCS 2012
+//
+// Then, address space inference replaces all refinable generic pointers with
+// equivalent specific pointers.
+//
+// The major challenge of implementing this optimization is handling PHINodes,
+// which may create loops in the data flow graph. This brings two complications.
+//
+// First, the data flow analysis in Step 1 needs to be circular. For example,
+//     %generic.input = addrspacecast float addrspace(3)* %input to float*
+//   loop:
+//     %y = phi [ %generic.input, %y2 ]
+//     %y2 = getelementptr %y, 1
+//     %v = load %y2
+//     br ..., label %loop, ...
+// proving %y specific requires proving both %generic.input and %y2 specific,
+// but proving %y2 specific circles back to %y. To address this complication,
+// the data flow analysis operates on a lattice:
+//   uninitialized > specific address spaces > generic.
+// All address expressions (our implementation only considers phi, bitcast,
+// addrspacecast, and getelementptr) start with the uninitialized address space.
+// The monotone transfer function moves the address space of a pointer down a
+// lattice path from uninitialized to specific and then to generic. A join
+// operation of two different specific address spaces pushes the expression down
+// to the generic address space. The analysis completes once it reaches a fixed
+// point.
+//
+// Second, IR rewriting in Step 2 also needs to be circular. For example,
+// converting %y to addrspace(3) requires the compiler to know the converted
+// %y2, but converting %y2 needs the converted %y. To address this complication,
+// we break these cycles using "undef" placeholders. When converting an
+// instruction `I` to a new address space, if its operand `Op` is not converted
+// yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
+// For instance, our algorithm first converts %y to
+//   %y' = phi float addrspace(3)* [ %input, undef ]
+// Then, it converts %y2 to
+//   %y2' = getelementptr %y', 1
+// Finally, it fixes the undef in %y' so that
+//   %y' = phi float addrspace(3)* [ %input, %y2' ]
+//
+// TODO: This pass is experimental and not enabled by default. Users can turn it
+// on by setting the -nvptx-use-infer-addrspace flag of llc. We plan to replace
+// NVPTXNonFavorGenericAddrSpaces with this pass shortly.
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "nvptx-infer-addrspace"
+
+#include "NVPTX.h"
+#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+
+using namespace llvm;
+
+namespace {
+const unsigned ADDRESS_SPACE_UNINITIALIZED = (unsigned)-1;
+
+using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
+
+/// \brief NVPTXInferAddressSpaces
+class NVPTXInferAddressSpaces: public FunctionPass {
+public:
+  static char ID;
+
+  NVPTXInferAddressSpaces() : FunctionPass(ID) {}
+
+  bool runOnFunction(Function &F) override;
+
+private:
+  // Returns the new address space of V if updated; otherwise, returns None.
+  Optional<unsigned>
+  updateAddressSpace(const Value &V,
+                     const ValueToAddrSpaceMapTy &InferredAddrSpace);
+
+  // Tries to infer the specific address space of each address expression in
+  // Postorder.
+  void inferAddressSpaces(const std::vector<Value *> &Postorder,
+                          ValueToAddrSpaceMapTy *InferredAddrSpace);
+
+  // Changes the generic address expressions in function F to point to specific
+  // address spaces if InferredAddrSpace says so. Postorder is the postorder of
+  // all generic address expressions in the use-def graph of function F.
+  bool
+  rewriteWithNewAddressSpaces(const std::vector<Value *> &Postorder,
+                              const ValueToAddrSpaceMapTy &InferredAddrSpace,
+                              Function *F);
+};
+} // end anonymous namespace
+
+char NVPTXInferAddressSpaces::ID = 0;
+
+namespace llvm {
+void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
+}
+INITIALIZE_PASS(NVPTXInferAddressSpaces, "nvptx-infer-addrspace",
+                "Infer address spaces",
+                false, false)
+
+// Returns true if V is an address expression.
+// TODO: Currently, we consider only phi, bitcast, addrspacecast, and
+// getelementptr operators.
+static bool isAddressExpression(const Value &V) {
+  if (!isa<Operator>(V))
+    return false;
+
+  switch (cast<Operator>(V).getOpcode()) {
+  case Instruction::PHI:
+  case Instruction::BitCast:
+  case Instruction::AddrSpaceCast:
+  case Instruction::GetElementPtr:
+    return true;
+  default:
+    return false;
+  }
+}
+
+// Returns the pointer operands of V.
+//
+// Precondition: V is an address expression.
+static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
+  assert(isAddressExpression(V));
+  const Operator& Op = cast<Operator>(V);
+  switch (Op.getOpcode()) {
+  case Instruction::PHI: {
+    auto IncomingValues = cast<PHINode>(Op).incoming_values();
+    return SmallVector<Value *, 2>(IncomingValues.begin(),
+                                   IncomingValues.end());
+  }
+  case Instruction::BitCast:
+  case Instruction::AddrSpaceCast:
+  case Instruction::GetElementPtr:
+    return {Op.getOperand(0)};
+  default:
+    llvm_unreachable("Unexpected instruction type.");
+  }
+}
+
+// If V is an unvisited generic address expression, appends V to PostorderStack
+// and marks it as visited.
+static void appendsGenericAddressExpressionToPostorderStack(
+    Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
+    DenseSet<Value *> *Visited) {
+  assert(V->getType()->isPointerTy());
+  if (isAddressExpression(*V) &&
+      V->getType()->getPointerAddressSpace() ==
+          AddressSpace::ADDRESS_SPACE_GENERIC) {
+    if (Visited->insert(V).second)
+      PostorderStack->push_back(std::make_pair(V, false));
+  }
+}
+
+// Returns all generic address expressions in function F. The elements are
+// ordered in postorder.
+static std::vector<Value *> collectGenericAddressExpressions(Function &F) {
+  // This function implements a non-recursive postorder traversal of a partial
+  // use-def graph of function F.
+  std::vector<std::pair<Value*, bool>> PostorderStack;
+  // The set of visited expressions.
+  DenseSet<Value*> Visited;
+  // We only explore address expressions that are reachable from loads and
+  // stores for now because we aim at generating faster loads and stores.
+  for (Instruction &I : instructions(F)) {
+    if (isa<LoadInst>(I)) {
+      appendsGenericAddressExpressionToPostorderStack(
+          I.getOperand(0), &PostorderStack, &Visited);
+    } else if (isa<StoreInst>(I)) {
+      appendsGenericAddressExpressionToPostorderStack(
+          I.getOperand(1), &PostorderStack, &Visited);
+    }
+  }
+
+  std::vector<Value *> Postorder; // The resultant postorder.
+  while (!PostorderStack.empty()) {
+    // If the operands of the expression on the top are already explored,
+    // adds that expression to the resultant postorder.
+    if (PostorderStack.back().second) {
+      Postorder.push_back(PostorderStack.back().first);
+      PostorderStack.pop_back();
+      continue;
+    }
+    // Otherwise, adds its operands to the stack and explores them.
+    PostorderStack.back().second = true;
+    for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
+      appendsGenericAddressExpressionToPostorderStack(
+          PtrOperand, &PostorderStack, &Visited);
+    }
+  }
+  return Postorder;
+}
+
+// A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
+// of OperandUse.get() in the new address space. If the clone is not ready yet,
+// returns an undef in the new address space as a placeholder.
+static Value *operandWithNewAddressSpaceOrCreateUndef(
+    const Use &OperandUse, unsigned NewAddrSpace,
+    const ValueToValueMapTy &ValueWithNewAddrSpace,
+    SmallVectorImpl<const Use *> *UndefUsesToFix) {
+  Value *Operand = OperandUse.get();
+  if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
+    return NewOperand;
+
+  UndefUsesToFix->push_back(&OperandUse);
+  return UndefValue::get(
+      Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
+}
+
+// Returns a clone of `I` with its operands converted to those specified in
+// ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
+// operand whose address space needs to be modified might not exist in
+// ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
+// adds that operand use to UndefUsesToFix so that caller can fix them later.
+//
+// Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
+// from a pointer whose type already matches. Therefore, this function returns a
+// Value* instead of an Instruction*.
+static Value *cloneInstructionWithNewAddressSpace(
+    Instruction *I, unsigned NewAddrSpace,
+    const ValueToValueMapTy &ValueWithNewAddrSpace,
+    SmallVectorImpl<const Use *> *UndefUsesToFix) {
+  Type *NewPtrType =
+      I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
+
+  if (I->getOpcode() == Instruction::AddrSpaceCast) {
+    Value *Src = I->getOperand(0);
+    // Because `I` is generic, the source address space must be specific.
+    // Therefore, the inferred address space must be the source space, according
+    // to our algorithm.
+    assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
+    if (Src->getType() != NewPtrType)
+      return new BitCastInst(Src, NewPtrType);
+    return Src;
+  }
+
+  // Computes the converted pointer operands.
+  SmallVector<Value *, 4> NewPointerOperands;
+  for (const Use &OperandUse : I->operands()) {
+    if (!OperandUse.get()->getType()->isPointerTy())
+      NewPointerOperands.push_back(nullptr);
+    else
+      NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
+          OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
+  }
+
+  switch (I->getOpcode()) {
+  case Instruction::BitCast:
+    return new BitCastInst(NewPointerOperands[0], NewPtrType);
+  case Instruction::PHI: {
+    assert(I->getType()->isPointerTy());
+    PHINode *PHI = cast<PHINode>(I);
+    PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
+    for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
+      unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
+      NewPHI->addIncoming(NewPointerOperands[OperandNo],
+                          PHI->getIncomingBlock(Index));
+    }
+    return NewPHI;
+  }
+  case Instruction::GetElementPtr: {
+    GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
+    GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
+        GEP->getSourceElementType(), NewPointerOperands[0],
+        SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
+    NewGEP->setIsInBounds(GEP->isInBounds());
+    return NewGEP;
+  }
+  default:
+    llvm_unreachable("Unexpected opcode");
+  }
+}
+
+// Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
+// constant expression `CE` with its operands replaced as specified in
+// ValueWithNewAddrSpace.
+static Value *cloneConstantExprWithNewAddressSpace(
+    ConstantExpr *CE, unsigned NewAddrSpace,
+    const ValueToValueMapTy &ValueWithNewAddrSpace) {
+  Type *TargetType =
+      CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
+
+  if (CE->getOpcode() == Instruction::AddrSpaceCast) {
+    // Because CE is generic, the source address space must be specific.
+    // Therefore, the inferred address space must be the source space according
+    // to our algorithm.
+    assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
+           NewAddrSpace);
+    return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
+  }
+
+  // Computes the operands of the new constant expression.
+  SmallVector<Constant *, 4> NewOperands;
+  for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
+    Constant *Operand = CE->getOperand(Index);
+    // If the address space of `Operand` needs to be modified, the new operand
+    // with the new address space should already be in ValueWithNewAddrSpace
+    // because (1) the constant expressions we consider (i.e. addrspacecast,
+    // bitcast, and getelementptr) do not incur cycles in the data flow graph
+    // and (2) this function is called on constant expressions in postorder.
+    if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
+      NewOperands.push_back(cast<Constant>(NewOperand));
+    } else {
+      // Otherwise, reuses the old operand.
+      NewOperands.push_back(Operand);
+    }
+  }
+
+  if (CE->getOpcode() == Instruction::GetElementPtr) {
+    // Needs to specify the source type while constructing a getelementptr
+    // constant expression.
+    return CE->getWithOperands(
+        NewOperands, TargetType, /*OnlyIfReduced=*/false,
+        NewOperands[0]->getType()->getPointerElementType());
+  }
+
+  return CE->getWithOperands(NewOperands, TargetType);
+}
+
+// Returns a clone of the value `V`, with its operands replaced as specified in
+// ValueWithNewAddrSpace. This function is called on every generic address
+// expression whose address space needs to be modified, in postorder.
+//
+// See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
+static Value *
+cloneValueWithNewAddressSpace(Value *V, unsigned NewAddrSpace,
+                              const ValueToValueMapTy &ValueWithNewAddrSpace,
+                              SmallVectorImpl<const Use *> *UndefUsesToFix) {
+  // All values in Postorder are generic address expressions.
+  assert(isAddressExpression(*V) &&
+         V->getType()->getPointerAddressSpace() ==
+             AddressSpace::ADDRESS_SPACE_GENERIC);
+
+  if (Instruction *I = dyn_cast<Instruction>(V)) {
+    Value *NewV = cloneInstructionWithNewAddressSpace(
+        I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
+    if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
+      if (NewI->getParent() == nullptr) {
+        NewI->insertBefore(I);
+        NewI->takeName(I);
+      }
+    }
+    return NewV;
+  }
+
+  return cloneConstantExprWithNewAddressSpace(
+      cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
+}
+
+// Defines the join operation on the address space lattice (see the file header
+// comments).
+static unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) {
+  if (AS1 == AddressSpace::ADDRESS_SPACE_GENERIC ||
+      AS2 == AddressSpace::ADDRESS_SPACE_GENERIC)
+    return AddressSpace::ADDRESS_SPACE_GENERIC;
+
+  if (AS1 == ADDRESS_SPACE_UNINITIALIZED)
+    return AS2;
+  if (AS2 == ADDRESS_SPACE_UNINITIALIZED)
+    return AS1;
+
+  // The join of two different specific address spaces is generic.
+  return AS1 == AS2 ? AS1 : (unsigned)AddressSpace::ADDRESS_SPACE_GENERIC;
+}
+
+bool NVPTXInferAddressSpaces::runOnFunction(Function &F) {
+  // Collects all generic address expressions in postorder.
+  std::vector<Value *> Postorder = collectGenericAddressExpressions(F);
+
+  // Runs a data-flow analysis to refine the address spaces of every expression
+  // in Postorder.
+  ValueToAddrSpaceMapTy InferredAddrSpace;
+  inferAddressSpaces(Postorder, &InferredAddrSpace);
+
+  // Changes the address spaces of the generic address expressions who are
+  // inferred to point to a specific address space.
+  return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
+}
+
+void NVPTXInferAddressSpaces::inferAddressSpaces(
+    const std::vector<Value *> &Postorder,
+    ValueToAddrSpaceMapTy *InferredAddrSpace) {
+  SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
+  // Initially, all expressions are in the uninitialized address space.
+  for (Value *V : Postorder)
+    (*InferredAddrSpace)[V] = ADDRESS_SPACE_UNINITIALIZED;
+
+  while (!Worklist.empty()) {
+    Value* V = Worklist.pop_back_val();
+
+    // Tries to update the address space of the stack top according to the
+    // address spaces of its operands.
+    DEBUG(dbgs() << "Updating the address space of\n"
+                 << "  " << *V << "\n");
+    Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
+    if (!NewAS.hasValue())
+      continue;
+    // If any updates are made, grabs its users to the worklist because
+    // their address spaces can also be possibly updated.
+    DEBUG(dbgs() << "  to " << NewAS.getValue() << "\n");
+    (*InferredAddrSpace)[V] = NewAS.getValue();
+
+    for (Value *User : V->users()) {
+      // Skip if User is already in the worklist.
+      if (Worklist.count(User))
+        continue;
+
+      auto Pos = InferredAddrSpace->find(User);
+      // Our algorithm only updates the address spaces of generic address
+      // expressions, which are those in InferredAddrSpace.
+      if (Pos == InferredAddrSpace->end())
+        continue;
+
+      // Function updateAddressSpace moves the address space down a lattice
+      // path. Therefore, nothing to do if User is already inferred as
+      // generic (the bottom element in the lattice).
+      if (Pos->second == AddressSpace::ADDRESS_SPACE_GENERIC)
+        continue;
+
+      Worklist.insert(User);
+    }
+  }
+}
+
+Optional<unsigned> NVPTXInferAddressSpaces::updateAddressSpace(
+    const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) {
+  assert(InferredAddrSpace.count(&V));
+
+  // The new inferred address space equals the join of the address spaces
+  // of all its pointer operands.
+  unsigned NewAS = ADDRESS_SPACE_UNINITIALIZED;
+  for (Value *PtrOperand : getPointerOperands(V)) {
+    unsigned OperandAS;
+    if (InferredAddrSpace.count(PtrOperand))
+      OperandAS = InferredAddrSpace.lookup(PtrOperand);
+    else
+      OperandAS = PtrOperand->getType()->getPointerAddressSpace();
+    NewAS = joinAddressSpaces(NewAS, OperandAS);
+    // join(generic, *) = generic. So we can break if NewAS is already generic.
+    if (NewAS == AddressSpace::ADDRESS_SPACE_GENERIC)
+      break;
+  }
+
+  unsigned OldAS = InferredAddrSpace.lookup(&V);
+  assert(OldAS != AddressSpace::ADDRESS_SPACE_GENERIC);
+  if (OldAS == NewAS)
+    return None;
+  return NewAS;
+}
+
+bool NVPTXInferAddressSpaces::rewriteWithNewAddressSpaces(
+    const std::vector<Value *> &Postorder,
+    const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) {
+  // For each address expression to be modified, creates a clone of it with its
+  // pointer operands converted to the new address space. Since the pointer
+  // operands are converted, the clone is naturally in the new address space by
+  // construction.
+  ValueToValueMapTy ValueWithNewAddrSpace;
+  SmallVector<const Use *, 32> UndefUsesToFix;
+  for (Value* V : Postorder) {
+    unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
+    if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
+      ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
+          V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
+    }
+  }
+
+  if (ValueWithNewAddrSpace.empty())
+    return false;
+
+  // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
+  for (const Use* UndefUse : UndefUsesToFix) {
+    User *V = UndefUse->getUser();
+    User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
+    unsigned OperandNo = UndefUse->getOperandNo();
+    assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
+    NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
+  }
+
+  // Replaces the uses of the old address expressions with the new ones.
+  for (Value *V : Postorder) {
+    Value *NewV = ValueWithNewAddrSpace.lookup(V);
+    if (NewV == nullptr)
+      continue;
+
+    SmallVector<Use *, 4> Uses;
+    for (Use &U : V->uses())
+      Uses.push_back(&U);
+    DEBUG(dbgs() << "Replacing the uses of " << *V << "\n  to\n  " << *NewV
+                 << "\n");
+    for (Use *U : Uses) {
+      if (isa<LoadInst>(U->getUser()) ||
+          (isa<StoreInst>(U->getUser()) && U->getOperandNo() == 1)) {
+        // If V is used as the pointer operand of a load/store, sets the pointer
+        // operand to NewV. This replacement does not change the element type,
+        // so the resultant load/store is still valid.
+        U->set(NewV);
+      } else if (isa<Instruction>(U->getUser())) {
+        // Otherwise, replaces the use with generic(NewV).
+        // TODO: Some optimization opportunities are missed. For example, in
+        //   %0 = icmp eq float* %p, %q
+        // if both p and q are inferred to be shared, we can rewrite %0 as
+        //   %0 = icmp eq float addrspace(3)* %new_p, %new_q
+        // instead of currently
+        //   %generic_p = addrspacecast float addrspace(3)* %new_p to float*
+        //   %generic_q = addrspacecast float addrspace(3)* %new_q to float*
+        //   %0 = icmp eq float* %generic_p, %generic_q
+        if (Instruction *I = dyn_cast<Instruction>(V)) {
+          BasicBlock::iterator InsertPos = std::next(I->getIterator());
+          while (isa<PHINode>(InsertPos))
+            ++InsertPos;
+          U->set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
+        } else {
+          U->set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
+                                                V->getType()));
+        }
+      }
+    }
+    if (V->use_empty())
+      RecursivelyDeleteTriviallyDeadInstructions(V);
+  }
+
+  return true;
+}
+
+FunctionPass *llvm::createNVPTXInferAddressSpacesPass() {
+  return new NVPTXInferAddressSpaces();
+}

Modified: llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp?rev=263916&r1=263915&r2=263916&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp Sun Mar 20 15:59:20 2016
@@ -48,12 +48,18 @@
 
 using namespace llvm;
 
+static cl::opt<bool> UseInferAddressSpaces(
+    "nvptx-use-infer-addrspace", cl::init(false), cl::Hidden,
+    cl::desc("Optimize address spaces using NVPTXInferAddressSpaces instead of "
+             "NVPTXFavorNonGenericAddrSpaces"));
+
 namespace llvm {
 void initializeNVVMReflectPass(PassRegistry&);
 void initializeGenericToNVVMPass(PassRegistry&);
 void initializeNVPTXAllocaHoistingPass(PassRegistry &);
 void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&);
 void initializeNVPTXFavorNonGenericAddrSpacesPass(PassRegistry &);
+void initializeNVPTXInferAddressSpacesPass(PassRegistry &);
 void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
 void initializeNVPTXLowerKernelArgsPass(PassRegistry &);
 void initializeNVPTXLowerAllocaPass(PassRegistry &);
@@ -72,6 +78,7 @@ extern "C" void LLVMInitializeNVPTXTarge
   initializeNVPTXAllocaHoistingPass(PR);
   initializeNVPTXAssignValidGlobalNamesPass(PR);
   initializeNVPTXFavorNonGenericAddrSpacesPass(PR);
+  initializeNVPTXInferAddressSpacesPass(PR);
   initializeNVPTXLowerKernelArgsPass(PR);
   initializeNVPTXLowerAllocaPass(PR);
   initializeNVPTXLowerAggrCopiesPass(PR);
@@ -149,7 +156,7 @@ private:
   void addEarlyCSEOrGVNPass();
 
   // Add passes that propagate special memory spaces.
-  void addMemorySpaceInferencePasses();
+  void addAddressSpaceInferencePasses();
 
   // Add passes that perform straight-line scalar optimizations.
   void addStraightLineScalarOptimizationPasses();
@@ -173,17 +180,21 @@ void NVPTXPassConfig::addEarlyCSEOrGVNPa
     addPass(createEarlyCSEPass());
 }
 
-void NVPTXPassConfig::addMemorySpaceInferencePasses() {
+void NVPTXPassConfig::addAddressSpaceInferencePasses() {
   addPass(createNVPTXLowerKernelArgsPass(&getNVPTXTargetMachine()));
   // NVPTXLowerKernelArgs emits alloca for byval parameters which can often
   // be eliminated by SROA.
   addPass(createSROAPass());
   addPass(createNVPTXLowerAllocaPass());
-  addPass(createNVPTXFavorNonGenericAddrSpacesPass());
-  // FavorNonGenericAddrSpaces shortcuts unnecessary addrspacecasts, and leave
-  // them unused. We could remove dead code in an ad-hoc manner, but that
-  // requires manual work and might be error-prone.
-  addPass(createDeadCodeEliminationPass());
+  if (UseInferAddressSpaces) {
+    addPass(createNVPTXInferAddressSpacesPass());
+  } else {
+    addPass(createNVPTXFavorNonGenericAddrSpacesPass());
+    // FavorNonGenericAddrSpaces shortcuts unnecessary addrspacecasts, and leave
+    // them unused. We could remove dead code in an ad-hoc manner, but that
+    // requires manual work and might be error-prone.
+    addPass(createDeadCodeEliminationPass());
+  }
 }
 
 void NVPTXPassConfig::addStraightLineScalarOptimizationPasses() {
@@ -220,7 +231,7 @@ void NVPTXPassConfig::addIRPasses() {
   addPass(createGenericToNVVMPass());
 
   if (getOptLevel() != CodeGenOpt::None) {
-    addMemorySpaceInferencePasses();
+    addAddressSpaceInferencePasses();
     addStraightLineScalarOptimizationPasses();
   }
 

Modified: llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll?rev=263916&r1=263915&r2=263916&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll (original)
+++ llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll Sun Mar 20 15:59:20 2016
@@ -1,9 +1,18 @@
 ; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix PTX
 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix PTX
+; RUN: llc < %s  -march=nvptx64 -mcpu=sm_20 -nvptx-use-infer-addrspace | FileCheck %s --check-prefix PTX
 ; RUN: opt < %s -S -nvptx-favor-non-generic -dce | FileCheck %s --check-prefix IR
+; RUN: opt < %s -S -nvptx-infer-addrspace | FileCheck %s --check-prefix IR --check-prefix IR-WITH-LOOP
 
 @array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
 @scalar = internal addrspace(3) global float 0.000000e+00, align 4
+ at generic_scalar = internal global float 0.000000e+00, align 4
+
+define float @ld_from_shared() {
+  %1 = addrspacecast float* @generic_scalar to float addrspace(3)*
+  %2 = load float, float addrspace(3)* %1
+  ret float %2
+}
 
 ; Verifies nvptx-favor-non-generic correctly optimizes generic address space
 ; usage to non-generic address space usage for the patterns we claim to handle:
@@ -13,12 +22,13 @@
 ; 4. store gep cast
 ; gep and cast can be an instruction or a constant expression. This function
 ; tries all possible combinations.
-define float @ld_st_shared_f32(i32 %i, float %v) {
+define void @ld_st_shared_f32(i32 %i, float %v) {
 ; IR-LABEL: @ld_st_shared_f32
 ; IR-NOT: addrspacecast
 ; PTX-LABEL: ld_st_shared_f32(
   ; load cast
   %1 = load float, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
+  call void @use(float %1)
 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
   ; store cast
   store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
@@ -30,6 +40,7 @@ define float @ld_st_shared_f32(i32 %i, f
   ; cast; load
   %2 = addrspacecast float addrspace(3)* @scalar to float*
   %3 = load float, float* %2, align 4
+  call void @use(float %3)
 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [scalar];
   ; cast; store
   store float %v, float* %2, align 4
@@ -39,6 +50,7 @@ define float @ld_st_shared_f32(i32 %i, f
 
   ; load gep cast
   %4 = load float, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
+  call void @use(float %4)
 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
   ; store gep cast
   store float %v, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
@@ -49,6 +61,7 @@ define float @ld_st_shared_f32(i32 %i, f
   ; gep cast; load
   %5 = getelementptr inbounds [10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
   %6 = load float, float* %5, align 4
+  call void @use(float %6)
 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
   ; gep cast; store
   store float %v, float* %5, align 4
@@ -60,6 +73,7 @@ define float @ld_st_shared_f32(i32 %i, f
   %7 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]*
   %8 = getelementptr inbounds [10 x float], [10 x float]* %7, i32 0, i32 %i
   %9 = load float, float* %8, align 4
+  call void @use(float %9)
 ; PTX: ld.shared.f32 %f{{[0-9]+}}, [%{{(r|rl|rd)[0-9]+}}];
   ; cast; gep; store
   store float %v, float* %8, align 4
@@ -67,11 +81,7 @@ define float @ld_st_shared_f32(i32 %i, f
   call void @llvm.cuda.syncthreads()
 ; PTX: bar.sync 0;
 
-  %sum2 = fadd float %1, %3
-  %sum3 = fadd float %sum2, %4
-  %sum4 = fadd float %sum3, %6
-  %sum5 = fadd float %sum4, %9
-  ret float %sum5
+  ret void
 }
 
 ; When hoisting an addrspacecast between different pointer types, replace the
@@ -117,13 +127,62 @@ define void @rauw(float addrspace(1)* %i
   store float %v, float* %addr
   ret void
 ; IR-LABEL: @rauw(
-; IR-NEXT: %1 = getelementptr float, float addrspace(1)* %input, i64 10
-; IR-NEXT: %v = load float, float addrspace(1)* %1
-; IR-NEXT: store float %v, float addrspace(1)* %1
+; IR-NEXT: %addr = getelementptr float, float addrspace(1)* %input, i64 10
+; IR-NEXT: %v = load float, float addrspace(1)* %addr
+; IR-NEXT: store float %v, float addrspace(1)* %addr
 ; IR-NEXT: ret void
 }
 
+define void @loop() {
+; IR-WITH-LOOP-LABEL: @loop(
+entry:
+  %p = addrspacecast [10 x float] addrspace(3)* @array to float*
+  %end = getelementptr float, float* %p, i64 10
+  br label %loop
+
+loop:
+  %i = phi float* [ %p, %entry ], [ %i2, %loop ]
+; IR-WITH-LOOP: phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
+  %v = load float, float* %i
+; IR-WITH-LOOP: %v = load float, float addrspace(3)* %i
+  call void @use(float %v)
+  %i2 = getelementptr float, float* %i, i64 1
+; IR-WITH-LOOP: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
+  %exit_cond = icmp eq float* %i2, %end
+  br i1 %exit_cond, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+ at generic_end = external global float*
+
+define void @loop_with_generic_bound() {
+; IR-WITH-LOOP-LABEL: @loop_with_generic_bound(
+entry:
+  %p = addrspacecast [10 x float] addrspace(3)* @array to float*
+  %end = load float*, float** @generic_end
+  br label %loop
+
+loop:
+  %i = phi float* [ %p, %entry ], [ %i2, %loop ]
+; IR-WITH-LOOP: phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
+  %v = load float, float* %i
+; IR-WITH-LOOP: %v = load float, float addrspace(3)* %i
+  call void @use(float %v)
+  %i2 = getelementptr float, float* %i, i64 1
+; IR-WITH-LOOP: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
+  %exit_cond = icmp eq float* %i2, %end
+; IR-WITH-LOOP: addrspacecast float addrspace(3)* %i2 to float*
+; IR-WITH-LOOP: icmp eq float* %{{[0-9]+}}, %end
+  br i1 %exit_cond, label %exit, label %loop
+
+exit:
+  ret void
+}
+
 declare void @llvm.cuda.syncthreads() #3
 
-attributes #3 = { noduplicate nounwind }
+declare void @use(float)
 
+attributes #3 = { noduplicate nounwind }




More information about the llvm-commits mailing list