[llvm] r207783 - Add an optimization that does CSE in a group of similar GEPs.

Eli Bendersky eliben at google.com
Thu May 1 12:27:30 PDT 2014


I've seen the bot failures now - working on it.


On Thu, May 1, 2014 at 11:38 AM, Eli Bendersky <eliben at google.com> wrote:

> Author: eliben
> Date: Thu May  1 13:38:36 2014
> New Revision: 207783
>
> URL: http://llvm.org/viewvc/llvm-project?rev=207783&view=rev
> Log:
> Add an optimization that does CSE in a group of similar GEPs.
>
> This optimization merges the common part of a group of GEPs, so we can
> compute
> each pointer address by adding a simple offset to the common part.
>
> The optimization is currently only enabled for the NVPTX backend, where it
> has
> a large payoff on some benchmarks.
>
> Review: http://reviews.llvm.org/D3462
>
> Patch by Jingyue Wu.
>
>
>
> Added:
>     llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
>     llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/
>     llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/
>
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
>
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
>
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
> Modified:
>     llvm/trunk/include/llvm/InitializePasses.h
>     llvm/trunk/include/llvm/LinkAllPasses.h
>     llvm/trunk/include/llvm/Transforms/Scalar.h
>     llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
>     llvm/trunk/lib/Transforms/Scalar/Scalar.cpp
>
> Modified: llvm/trunk/include/llvm/InitializePasses.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/InitializePasses.h?rev=207783&r1=207782&r2=207783&view=diff
>
> ==============================================================================
> --- llvm/trunk/include/llvm/InitializePasses.h (original)
> +++ llvm/trunk/include/llvm/InitializePasses.h Thu May  1 13:38:36 2014
> @@ -238,6 +238,7 @@ void initializeSimpleInlinerPass(PassReg
>  void initializeRegisterCoalescerPass(PassRegistry&);
>  void initializeSingleLoopExtractorPass(PassRegistry&);
>  void initializeSinkingPass(PassRegistry&);
> +void initializeSeparateConstOffsetFromGEPPass(PassRegistry &);
>  void initializeSlotIndexesPass(PassRegistry&);
>  void initializeSpillPlacementPass(PassRegistry&);
>  void initializeStackProtectorPass(PassRegistry&);
>
> Modified: llvm/trunk/include/llvm/LinkAllPasses.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/LinkAllPasses.h?rev=207783&r1=207782&r2=207783&view=diff
>
> ==============================================================================
> --- llvm/trunk/include/llvm/LinkAllPasses.h (original)
> +++ llvm/trunk/include/llvm/LinkAllPasses.h Thu May  1 13:38:36 2014
> @@ -156,6 +156,7 @@ namespace {
>        (void) llvm::createBBVectorizePass();
>        (void) llvm::createPartiallyInlineLibCallsPass();
>        (void) llvm::createScalarizerPass();
> +      (void) llvm::createSeparateConstOffsetFromGEPPass();
>
>        (void)new llvm::IntervalPartition();
>        (void)new llvm::FindUsedTypes();
>
> Modified: llvm/trunk/include/llvm/Transforms/Scalar.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Scalar.h?rev=207783&r1=207782&r2=207783&view=diff
>
> ==============================================================================
> --- llvm/trunk/include/llvm/Transforms/Scalar.h (original)
> +++ llvm/trunk/include/llvm/Transforms/Scalar.h Thu May  1 13:38:36 2014
> @@ -377,6 +377,12 @@ FunctionPass *createScalarizerPass();
>  // AddDiscriminators - Add DWARF path discriminators to the IR.
>  FunctionPass *createAddDiscriminatorsPass();
>
>
> +//===----------------------------------------------------------------------===//
> +//
> +// SeparateConstOffsetFromGEP - Split GEPs for better CSE
> +//
> +FunctionPass *createSeparateConstOffsetFromGEPPass();
> +
>  } // End llvm namespace
>
>  #endif
>
> Modified: llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp?rev=207783&r1=207782&r2=207783&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp (original)
> +++ llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp Thu May  1 13:38:36
> 2014
> @@ -147,10 +147,23 @@ void NVPTXPassConfig::addIRPasses() {
>    addPass(createNVPTXAssignValidGlobalNamesPass());
>    addPass(createGenericToNVVMPass());
>    addPass(createNVPTXFavorNonGenericAddrSpacesPass());
> -  // The FavorNonGenericAddrSpaces pass may remove instructions and leave
> some
> -  // values unused. Therefore, we run a DCE pass right afterwards. We
> could
> -  // remove unused values in an ad-hoc manner, but it requires manual
> work and
> -  // might be error-prone.
> +  addPass(createSeparateConstOffsetFromGEPPass());
> +  // The SeparateConstOffsetFromGEP pass creates variadic bases that can
> be used
> +  // by multiple GEPs. Run GVN or EarlyCSE to really reuse them. GVN
> generates
> +  // significantly better code than EarlyCSE for some of our benchmarks.
> +  if (getOptLevel() == CodeGenOpt::Aggressive)
> +    addPass(createGVNPass());
> +  else
> +    addPass(createEarlyCSEPass());
> +  // Both FavorNonGenericAddrSpaces and SeparateConstOffsetFromGEP may
> leave
> +  // some dead code.  We could remove dead code in an ad-hoc manner, but
> that
> +  // requires manual work and might be error-prone.
> +  //
> +  // The FavorNonGenericAddrSpaces pass shortcuts unnecessary
> addrspacecasts,
> +  // and leave them unused.
> +  //
> +  // SeparateConstOffsetFromGEP rebuilds a new index from the old index,
> and the
> +  // old index and some of its intermediate results may become unused.
>    addPass(createDeadCodeEliminationPass());
>  }
>
>
> Modified: llvm/trunk/lib/Transforms/Scalar/Scalar.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/Scalar.cpp?rev=207783&r1=207782&r2=207783&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/Scalar.cpp (original)
> +++ llvm/trunk/lib/Transforms/Scalar/Scalar.cpp Thu May  1 13:38:36 2014
> @@ -64,6 +64,7 @@ void llvm::initializeScalarOpts(PassRegi
>    initializeStructurizeCFGPass(Registry);
>    initializeSinkingPass(Registry);
>    initializeTailCallElimPass(Registry);
> +  initializeSeparateConstOffsetFromGEPPass(Registry);
>  }
>
>  void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) {
>
> Added: llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp?rev=207783&view=auto
>
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp (added)
> +++ llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp Thu
> May  1 13:38:36 2014
> @@ -0,0 +1,583 @@
> +//===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++
> -*-===//
> +//
> +//                     The LLVM Compiler Infrastructure
> +//
> +// This file is distributed under the University of Illinois Open Source
> +// License. See LICENSE.TXT for details.
> +//
>
> +//===----------------------------------------------------------------------===//
> +//
> +// Loop unrolling may create many similar GEPs for array accesses.
> +// e.g., a 2-level loop
> +//
> +// float a[32][32]; // global variable
> +//
> +// for (int i = 0; i < 2; ++i) {
> +//   for (int j = 0; j < 2; ++j) {
> +//     ...
> +//     ... = a[x + i][y + j];
> +//     ...
> +//   }
> +// }
> +//
> +// will probably be unrolled to:
> +//
> +// gep %a, 0, %x, %y; load
> +// gep %a, 0, %x, %y + 1; load
> +// gep %a, 0, %x + 1, %y; load
> +// gep %a, 0, %x + 1, %y + 1; load
> +//
> +// LLVM's GVN does not use partial redundancy elimination yet, and is thus
> +// unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization
> incurs
> +// significant slowdown in targets with limited addressing modes. For
> instance,
> +// because the PTX target does not support the reg+reg addressing mode,
> the
> +// NVPTX backend emits PTX code that literally computes the pointer
> address of
> +// each GEP, wasting tons of registers. It emits the following PTX for the
> +// first load and similar PTX for other loads.
> +//
> +// mov.u32         %r1, %x;
> +// mov.u32         %r2, %y;
> +// mul.wide.u32    %rl2, %r1, 128;
> +// mov.u64         %rl3, a;
> +// add.s64         %rl4, %rl3, %rl2;
> +// mul.wide.u32    %rl5, %r2, 4;
> +// add.s64         %rl6, %rl4, %rl5;
> +// ld.global.f32   %f1, [%rl6];
> +//
> +// To reduce the register pressure, the optimization implemented in this
> file
> +// merges the common part of a group of GEPs, so we can compute each
> pointer
> +// address by adding a simple offset to the common part, saving many
> registers.
> +//
> +// It works by splitting each GEP into a variadic base and a constant
> offset.
> +// The variadic base can be computed once and reused by multiple GEPs,
> and the
> +// constant offsets can be nicely folded into the reg+immediate
> addressing mode
> +// (supported by most targets) without using any extra register.
> +//
> +// For instance, we transform the four GEPs and four loads in the above
> example
> +// into:
> +//
> +// base = gep a, 0, x, y
> +// load base
> +// laod base + 1  * sizeof(float)
> +// load base + 32 * sizeof(float)
> +// load base + 33 * sizeof(float)
> +//
> +// Given the transformed IR, a backend that supports the reg+immediate
> +// addressing mode can easily fold the pointer arithmetics into the
> loads. For
> +// example, the NVPTX backend can easily fold the pointer arithmetics
> into the
> +// ld.global.f32 instructions, and the resultant PTX uses much fewer
> registers.
> +//
> +// mov.u32         %r1, %tid.x;
> +// mov.u32         %r2, %tid.y;
> +// mul.wide.u32    %rl2, %r1, 128;
> +// mov.u64         %rl3, a;
> +// add.s64         %rl4, %rl3, %rl2;
> +// mul.wide.u32    %rl5, %r2, 4;
> +// add.s64         %rl6, %rl4, %rl5;
> +// ld.global.f32   %f1, [%rl6]; // so far the same as unoptimized PTX
> +// ld.global.f32   %f2, [%rl6+4]; // much better
> +// ld.global.f32   %f3, [%rl6+128]; // much better
> +// ld.global.f32   %f4, [%rl6+132]; // much better
> +//
>
> +//===----------------------------------------------------------------------===//
> +
> +#include "llvm/Analysis/TargetTransformInfo.h"
> +#include "llvm/Analysis/ValueTracking.h"
> +#include "llvm/IR/Constants.h"
> +#include "llvm/IR/DataLayout.h"
> +#include "llvm/IR/Instructions.h"
> +#include "llvm/IR/LLVMContext.h"
> +#include "llvm/IR/Module.h"
> +#include "llvm/IR/Operator.h"
> +#include "llvm/Support/CommandLine.h"
> +#include "llvm/Support/raw_ostream.h"
> +#include "llvm/Transforms/Scalar.h"
> +
> +using namespace llvm;
> +
> +static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
> +    "disable-separate-const-offset-from-gep", cl::init(false),
> +    cl::desc("Do not separate the constant offset from a GEP
> instruction"),
> +    cl::Hidden);
> +
> +namespace {
> +
> +/// \brief A helper class for separating a constant offset from a GEP
> index.
> +///
> +/// In real programs, a GEP index may be more complicated than a simple
> addition
> +/// of something and a constant integer which can be trivially splitted.
> For
> +/// example, to split ((a << 3) | 5) + b, we need to search deeper for the
> +/// constant offset, so that we can seperate the index to (a << 3) + b
> and 5.
> +///
> +/// Therefore, this class looks into the expression that computes a given
> GEP
> +/// index, and tries to find a constant integer that can be hoisted to the
> +/// outermost level of the expression as an addition. Not every constant
> in an
> +/// expression can jump out. e.g., we cannot transform (b * (a + 5)) to
> (b * a +
> +/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in
> this case,
> +/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
> +class ConstantOffsetExtractor {
> + public:
> +  /// Extracts a constant offset from the given GEP index. It outputs the
> +  /// numeric value of the extracted constant offset (0 if failed), and a
> +  /// new index representing the remainder (equal to the original index
> minus
> +  /// the constant offset).
> +  /// \p Idx The given GEP index
> +  /// \p NewIdx The new index to replace
> +  /// \p DL The datalayout of the module
> +  /// \p IP Calculating the new index requires new instructions. IP
> indicates
> +  /// where to insert them (typically right before the GEP).
> +  static int64_t Extract(Value *Idx, Value *&NewIdx, const DataLayout *DL,
> +                         Instruction *IP);
> +  /// Looks for a constant offset without extracting it. The meaning of
> the
> +  /// arguments and the return value are the same as Extract.
> +  static int64_t Find(Value *Idx, const DataLayout *DL);
> +
> + private:
> +  ConstantOffsetExtractor(const DataLayout *Layout, Instruction
> *InsertionPt)
> +      : DL(Layout), IP(InsertionPt) {}
> +  /// Searches the expression that computes V for a constant offset. If
> the
> +  /// searching is successful, update UserChain as a path from V to the
> constant
> +  /// offset.
> +  int64_t find(Value *V);
> +  /// A helper function to look into both operands of a binary operator U.
> +  /// \p IsSub Whether U is a sub operator. If so, we need to negate the
> +  /// constant offset at some point.
> +  int64_t findInEitherOperand(User *U, bool IsSub);
> +  /// After finding the constant offset and how it is reached from the GEP
> +  /// index, we build a new index which is a clone of the old one except
> the
> +  /// constant offset is removed. For example, given (a + (b + 5)) and
> knowning
> +  /// the constant offset is 5, this function returns (a + b).
> +  ///
> +  /// We cannot simply change the constant to zero because the expression
> that
> +  /// computes the index or its intermediate result may be used by others.
> +  Value *rebuildWithoutConstantOffset();
> +  // A helper function for rebuildWithoutConstantOffset that rebuilds the
> direct
> +  // user (U) of the constant offset (C).
> +  Value *rebuildLeafWithoutConstantOffset(User *U, Value *C);
> +  /// Returns a clone of U except the first occurrence of From with To.
> +  Value *cloneAndReplace(User *U, Value *From, Value *To);
> +
> +  /// Returns true if LHS and RHS have no bits in common, i.e., LHS | RHS
> == 0.
> +  bool NoCommonBits(Value *LHS, Value *RHS) const;
> +  /// Computes which bits are known to be one or zero.
> +  /// \p KnownOne Mask of all bits that are known to be one.
> +  /// \p KnownZero Mask of all bits that are known to be zero.
> +  void ComputeKnownBits(Value *V, APInt &KnownOne, APInt &KnownZero)
> const;
> +  /// Finds the first use of Used in U. Returns -1 if not found.
> +  static unsigned FindFirstUse(User *U, Value *Used);
> +
> +  /// The path from the constant offset to the old GEP index. e.g., if
> the GEP
> +  /// index is "a * b + (c + 5)". After running function find,
> UserChain[0] will
> +  /// be the constant 5, UserChain[1] will be the subexpression "c + 5",
> and
> +  /// UserChain[2] will be the entire expression "a * b + (c + 5)".
> +  ///
> +  /// This path helps rebuildWithoutConstantOffset rebuild the new GEP
> index.
> +  SmallVector<User *, 8> UserChain;
> +  /// The data layout of the module. Used in ComputeKnownBits.
> +  const DataLayout *DL;
> +  Instruction *IP;  /// Insertion position of cloned instructions.
> +};
> +
> +/// \brief A pass that tries to split every GEP in the function into a
> variadic
> +/// base and a constant offset. It is a FuntionPass because searching for
> the
> +/// constant offset may inspect other basic blocks.
> +class SeparateConstOffsetFromGEP : public FunctionPass {
> + public:
> +  static char ID;
> +  SeparateConstOffsetFromGEP() : FunctionPass(ID) {
> +
>  initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
> +  }
> +
> +  void getAnalysisUsage(AnalysisUsage &AU) const override {
> +    AU.addRequired<DataLayoutPass>();
> +    AU.addRequired<TargetTransformInfo>();
> +  }
> +  bool runOnFunction(Function &F) override;
> +
> + private:
> +  /// Tries to split the given GEP into a variadic base and a constant
> offset,
> +  /// and returns true if the splitting succeeds.
> +  bool splitGEP(GetElementPtrInst *GEP);
> +  /// Finds the constant offset within each index, and accumulates them.
> This
> +  /// function only inspects the GEP without changing it. The output
> +  /// NeedsExtraction indicates whether we can extract a non-zero constant
> +  /// offset from any index.
> +  int64_t accumulateByteOffset(GetElementPtrInst *GEP, const DataLayout
> *DL,
> +                               bool &NeedsExtraction);
> +};
> +}  // anonymous namespace
> +
> +char SeparateConstOffsetFromGEP::ID = 0;
> +INITIALIZE_PASS_BEGIN(
> +    SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
> +    "Split GEPs to a variadic base and a constant offset for better CSE",
> false,
> +    false)
> +INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
> +INITIALIZE_PASS_DEPENDENCY(DataLayoutPass)
> +INITIALIZE_PASS_END(
> +    SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
> +    "Split GEPs to a variadic base and a constant offset for better CSE",
> false,
> +    false)
> +
> +FunctionPass *llvm::createSeparateConstOffsetFromGEPPass() {
> +  return new SeparateConstOffsetFromGEP();
> +}
> +
> +int64_t ConstantOffsetExtractor::findInEitherOperand(User *U, bool IsSub)
> {
> +  assert(U->getNumOperands() == 2);
> +  int64_t ConstantOffset = find(U->getOperand(0));
> +  // If we found a constant offset in the left operand, stop and return
> that.
> +  // This shortcut might cause us to miss opportunities of combining the
> +  // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a +
> b) + 9.
> +  // However, such cases are probably already handled by -instcombine,
> +  // given this pass runs after the standard optimizations.
> +  if (ConstantOffset != 0) return ConstantOffset;
> +  ConstantOffset = find(U->getOperand(1));
> +  // If U is a sub operator, negate the constant offset found in the right
> +  // operand.
> +  return IsSub ? -ConstantOffset : ConstantOffset;
> +}
> +
> +int64_t ConstantOffsetExtractor::find(Value *V) {
> +  // TODO(jingyue): We can even trace into integer/pointer casts, such as
> +  // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle
> only
> +  // integers because it gives good enough results for our benchmarks.
> +  assert(V->getType()->isIntegerTy());
> +
> +  User *U = dyn_cast<User>(V);
> +  // We cannot do much with Values that are not a User, such as
> BasicBlock and
> +  // MDNode.
> +  if (U == nullptr) return 0;
> +
> +  int64_t ConstantOffset = 0;
> +  if (ConstantInt *CI = dyn_cast<ConstantInt>(U)) {
> +    // Hooray, we found it!
> +    ConstantOffset = CI->getSExtValue();
> +  } else if (Operator *O = dyn_cast<Operator>(U)) {
> +    // The GEP index may be more complicated than a simple addition of a
> +    // varaible and a constant. Therefore, we trace into subexpressions
> for more
> +    // hoisting opportunities.
> +    switch (O->getOpcode()) {
> +      case Instruction::Add: {
> +        ConstantOffset = findInEitherOperand(U, false);
> +        break;
> +      }
> +      case Instruction::Sub: {
> +        ConstantOffset = findInEitherOperand(U, true);
> +        break;
> +      }
> +      case Instruction::Or: {
> +        // If LHS and RHS don't have common bits, (LHS | RHS) is
> equivalent to
> +        // (LHS + RHS).
> +        if (NoCommonBits(U->getOperand(0), U->getOperand(1)))
> +          ConstantOffset = findInEitherOperand(U, false);
> +        break;
> +      }
> +      case Instruction::SExt: {
> +        // For safety, we trace into sext only when its operand is marked
> +        // "nsw" because xxx.nsw guarantees no signed wrap. e.g., we can
> safely
> +        // transform "sext (add nsw a, 5)" into "add nsw (sext a), 5".
> +        if (BinaryOperator *BO =
> dyn_cast<BinaryOperator>(U->getOperand(0))) {
> +          if (BO->hasNoSignedWrap())
> +            ConstantOffset = find(U->getOperand(0));
> +        }
> +        break;
> +      }
> +      case Instruction::ZExt: {
> +        // Similarly, we trace into zext only when its operand is marked
> with
> +        // "nuw" because zext (add nuw a, b) == add nuw (zext a), (zext
> b).
> +        if (BinaryOperator *BO =
> dyn_cast<BinaryOperator>(U->getOperand(0))) {
> +          if (BO->hasNoUnsignedWrap())
> +            ConstantOffset = find(U->getOperand(0));
> +        }
> +        break;
> +      }
> +    }
> +  }
> +  // If we found a non-zero constant offset, adds it to the path for
> future
> +  // transformation (rebuildWithoutConstantOffset). Zero is a valid
> constant
> +  // offset, but doesn't help this optimization.
> +  if (ConstantOffset != 0)
> +    UserChain.push_back(U);
> +  return ConstantOffset;
> +}
> +
> +unsigned ConstantOffsetExtractor::FindFirstUse(User *U, Value *Used) {
> +  for (unsigned I = 0, E = U->getNumOperands(); I < E; ++I) {
> +    if (U->getOperand(I) == Used)
> +      return I;
> +  }
> +  return -1;
> +}
> +
> +Value *ConstantOffsetExtractor::cloneAndReplace(User *U, Value *From,
> +                                                Value *To) {
> +  // Finds in U the first use of From. It is safe to ignore future
> occurrences
> +  // of From, because findInEitherOperand similarly stops searching the
> right
> +  // operand when the first operand has a non-zero constant offset.
> +  unsigned OpNo = FindFirstUse(U, From);
> +  assert(OpNo != (unsigned)-1 && "UserChain wasn't built correctly");
> +
> +  // ConstantOffsetExtractor::find only follows Operators (i.e.,
> Instructions
> +  // and ConstantExprs). Therefore, U is either an Instruction or a
> +  // ConstantExpr.
> +  if (Instruction *I = dyn_cast<Instruction>(U)) {
> +    Instruction *Clone = I->clone();
> +    Clone->setOperand(OpNo, To);
> +    Clone->insertBefore(IP);
> +    return Clone;
> +  }
> +  // cast<Constant>(To) is safe because a ConstantExpr only uses
> Constants.
> +  return cast<ConstantExpr>(U)
> +      ->getWithOperandReplaced(OpNo, cast<Constant>(To));
> +}
> +
> +Value *ConstantOffsetExtractor::rebuildLeafWithoutConstantOffset(User *U,
> +                                                                 Value
> *C) {
> +  assert(U->getNumOperands() <= 2 &&
> +         "We didn't trace into any operator with more than 2 operands");
> +  // If U has only one operand which is the constant offset, removing the
> +  // constant offset leaves U as a null value.
> +  if (U->getNumOperands() == 1)
> +    return Constant::getNullValue(U->getType());
> +
> +  // U->getNumOperands() == 2
> +  unsigned OpNo = FindFirstUse(U, C); // U->getOperand(OpNo) == C
> +  assert(OpNo < 2 && "UserChain wasn't built correctly");
> +  Value *TheOther = U->getOperand(1 - OpNo); // The other operand of U
> +  // If U = C - X, removing C makes U = -X; otherwise U will simply be X.
> +  if (!isa<SubOperator>(U) || OpNo == 1)
> +    return TheOther;
> +  if (isa<ConstantExpr>(U))
> +    return ConstantExpr::getNeg(cast<Constant>(TheOther));
> +  return BinaryOperator::CreateNeg(TheOther, "", IP);
> +}
> +
> +Value *ConstantOffsetExtractor::rebuildWithoutConstantOffset() {
> +  assert(UserChain.size() > 0 && "you at least found a constant, right?");
> +  // Start with the constant and go up through UserChain, each time
> building a
> +  // clone of the subexpression but with the constant removed.
> +  // e.g., to build a clone of (a + (b + (c + 5)) but with the 5 removed,
> we
> +  // first c, then (b + c), and finally (a + (b + c)).
> +  //
> +  // Fast path: if the GEP index is a constant, simply returns 0.
> +  if (UserChain.size() == 1)
> +    return ConstantInt::get(UserChain[0]->getType(), 0);
> +
> +  Value *Remainder =
> +      rebuildLeafWithoutConstantOffset(UserChain[1], UserChain[0]);
> +  for (size_t I = 2; I < UserChain.size(); ++I)
> +    Remainder = cloneAndReplace(UserChain[I], UserChain[I - 1],
> Remainder);
> +  return Remainder;
> +}
> +
> +int64_t ConstantOffsetExtractor::Extract(Value *Idx, Value *&NewIdx,
> +                                         const DataLayout *DL,
> +                                         Instruction *IP) {
> +  ConstantOffsetExtractor Extractor(DL, IP);
> +  // Find a non-zero constant offset first.
> +  int64_t ConstantOffset = Extractor.find(Idx);
> +  if (ConstantOffset == 0)
> +    return 0;
> +  // Then rebuild a new index with the constant removed.
> +  NewIdx = Extractor.rebuildWithoutConstantOffset();
> +  return ConstantOffset;
> +}
> +
> +int64_t ConstantOffsetExtractor::Find(Value *Idx, const DataLayout *DL) {
> +  return ConstantOffsetExtractor(DL, nullptr).find(Idx);
> +}
> +
> +void ConstantOffsetExtractor::ComputeKnownBits(Value *V, APInt &KnownOne,
> +                                               APInt &KnownZero) const {
> +  IntegerType *IT = cast<IntegerType>(V->getType());
> +  KnownOne = APInt(IT->getBitWidth(), 0);
> +  KnownZero = APInt(IT->getBitWidth(), 0);
> +  llvm::ComputeMaskedBits(V, KnownZero, KnownOne, DL, 0);
> +}
> +
> +bool ConstantOffsetExtractor::NoCommonBits(Value *LHS, Value *RHS) const {
> +  assert(LHS->getType() == RHS->getType() &&
> +         "LHS and RHS should have the same type");
> +  APInt LHSKnownOne, LHSKnownZero, RHSKnownOne, RHSKnownZero;
> +  ComputeKnownBits(LHS, LHSKnownOne, LHSKnownZero);
> +  ComputeKnownBits(RHS, RHSKnownOne, RHSKnownZero);
> +  return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
> +}
> +
> +int64_t SeparateConstOffsetFromGEP::accumulateByteOffset(
> +    GetElementPtrInst *GEP, const DataLayout *DL, bool &NeedsExtraction) {
> +  NeedsExtraction = false;
> +  int64_t AccumulativeByteOffset = 0;
> +  gep_type_iterator GTI = gep_type_begin(*GEP);
> +  for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
> +    if (isa<SequentialType>(*GTI)) {
> +      // Tries to extract a constant offset from this GEP index.
> +      int64_t ConstantOffset =
> +          ConstantOffsetExtractor::Find(GEP->getOperand(I), DL);
> +      if (ConstantOffset != 0) {
> +        NeedsExtraction = true;
> +        // A GEP may have multiple indices.  We accumulate the extracted
> +        // constant offset to a byte offset, and later offset the
> remainder of
> +        // the original GEP with this byte offset.
> +        AccumulativeByteOffset +=
> +            ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
> +      }
> +    }
> +  }
> +  return AccumulativeByteOffset;
> +}
> +
> +bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
> +  // Skip vector GEPs.
> +  if (GEP->getType()->isVectorTy())
> +    return false;
> +
> +  // The backend can already nicely handle the case where all indices are
> +  // constant.
> +  if (GEP->hasAllConstantIndices())
> +    return false;
> +
> +  bool Changed = false;
> +
> +  // Shortcuts integer casts. Eliminating these explicit casts can make
> +  // subsequent optimizations more obvious: ConstantOffsetExtractor
> needn't
> +  // trace into these casts.
> +  if (GEP->isInBounds()) {
> +    // Doing this to inbounds GEPs is safe because their indices are
> guaranteed
> +    // to be non-negative and in bounds.
> +    gep_type_iterator GTI = gep_type_begin(*GEP);
> +    for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
> +      if (isa<SequentialType>(*GTI)) {
> +        if (Operator *O = dyn_cast<Operator>(GEP->getOperand(I))) {
> +          if (O->getOpcode() == Instruction::SExt ||
> +              O->getOpcode() == Instruction::ZExt) {
> +            GEP->setOperand(I, O->getOperand(0));
> +            Changed = true;
> +          }
> +        }
> +      }
> +    }
> +  }
> +
> +  const DataLayout *DL = &getAnalysis<DataLayoutPass>().getDataLayout();
> +  bool NeedsExtraction;
> +  int64_t AccumulativeByteOffset =
> +      accumulateByteOffset(GEP, DL, NeedsExtraction);
> +
> +  if (!NeedsExtraction)
> +    return Changed;
> +  // Before really splitting the GEP, check whether the backend supports
> the
> +  // addressing mode we are about to produce. If no, this splitting
> probably
> +  // won't be beneficial.
> +  TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>();
> +  if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(),
> +                                 /*BaseGV=*/nullptr,
> AccumulativeByteOffset,
> +                                 /*HasBaseReg=*/true, /*Scale=*/0)) {
> +    return Changed;
> +  }
> +
> +  // Remove the constant offset in each GEP index. The resultant GEP
> computes
> +  // the variadic base.
> +  gep_type_iterator GTI = gep_type_begin(*GEP);
> +  for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
> +    if (isa<SequentialType>(*GTI)) {
> +      Value *NewIdx = nullptr;
> +      // Tries to extract a constant offset from this GEP index.
> +      int64_t ConstantOffset =
> +          ConstantOffsetExtractor::Extract(GEP->getOperand(I), NewIdx,
> DL, GEP);
> +      if (ConstantOffset != 0) {
> +        assert(NewIdx && "ConstantOffset != 0 implies NewIdx is set");
> +        GEP->setOperand(I, NewIdx);
> +        // Clear the inbounds attribute because the new index may be
> off-bound.
> +        // e.g.,
> +        //
> +        // b = add i64 a, 5
> +        // addr = gep inbounds float* p, i64 b
> +        //
> +        // is transformed to:
> +        //
> +        // addr2 = gep float* p, i64 a
> +        // addr = gep float* addr2, i64 5
> +        //
> +        // If a is -4, although the old index b is in bounds, the new
> index a is
> +        // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if
> the
> +        // inbounds keyword is not present, the offsets are added to the
> base
> +        // address with silently-wrapping two's complement arithmetic".
> +        // Therefore, the final code will be a semantically equivalent.
> +        //
> +        // TODO(jingyue): do some range analysis to keep as many inbounds
> as
> +        // possible. GEPs with inbounds are more friendly to alias
> analysis.
> +        GEP->setIsInBounds(false);
> +        Changed = true;
> +      }
> +    }
> +  }
> +
> +  // Offsets the base with the accumulative byte offset.
> +  //
> +  //   %gep                        ; the base
> +  //   ... %gep ...
> +  //
> +  // => add the offset
> +  //
> +  //   %gep2                       ; clone of %gep
> +  //   %0       = ptrtoint %gep2
> +  //   %1       = add %0, <offset>
> +  //   %new.gep = inttoptr %1
> +  //   %gep                        ; will be removed
> +  //   ... %gep ...
> +  //
> +  // => replace all uses of %gep with %new.gep and remove %gep
> +  //
> +  //   %gep2                       ; clone of %gep
> +  //   %0       = ptrtoint %gep2
> +  //   %1       = add %0, <offset>
> +  //   %new.gep = inttoptr %1
> +  //   ... %new.gep ...
> +  //
> +  // TODO(jingyue): Emit a GEP instead of an "uglygep"
> +  // (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep) to make
> the IR
> +  // prettier and more alias analysis friendly. One caveat: if the
> original GEP
> +  // ends with a StructType, we need to split the GEP at the last
> +  // SequentialType. For instance, consider the following IR:
> +  //
> +  //   %struct.S = type { float, double }
> +  //   @array = global [1024 x %struct.S]
> +  //   %p = getelementptr %array, 0, %i + 5, 1
> +  //
> +  // To separate the constant 5 from %p, we would need to split %p at the
> last
> +  // array index so that we have:
> +  //
> +  //   %addr = gep %array, 0, %i
> +  //   %p = gep %addr, 5, 1
> +  Instruction *NewGEP = GEP->clone();
> +  NewGEP->insertBefore(GEP);
> +  Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
> +  Value *Addr = new PtrToIntInst(NewGEP, IntPtrTy, "", GEP);
> +  Addr = BinaryOperator::CreateAdd(
> +      Addr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "",
> GEP);
> +  Addr = new IntToPtrInst(Addr, GEP->getType(), "", GEP);
> +
> +  GEP->replaceAllUsesWith(Addr);
> +  GEP->eraseFromParent();
> +
> +  return true;
> +}
> +
> +bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
> +  if (DisableSeparateConstOffsetFromGEP)
> +    return false;
> +
> +  bool Changed = false;
> +  for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) {
> +    for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) {
> +      if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++)) {
> +        Changed |= splitGEP(GEP);
> +      }
> +      // No need to split GEP ConstantExprs because all its indices are
> constant
> +      // already.
> +    }
> +  }
> +  return Changed;
> +}
>
> Added:
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg?rev=207783&view=auto
>
> ==============================================================================
> ---
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
> (added)
> +++
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg
> Thu May  1 13:38:36 2014
> @@ -0,0 +1,4 @@
> +targets = set(config.root.targets_to_build.split())
> +if not 'NVPTX' in targets:
> +    config.unsupported = True
> +
>
> Added:
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll?rev=207783&view=auto
>
> ==============================================================================
> ---
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
> (added)
> +++
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
> Thu May  1 13:38:36 2014
> @@ -0,0 +1,60 @@
> +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX
> +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s
> --check-prefix=PTX
> +; RUN: opt < %s -S -separate-const-offset-from-gep -gvn -dce | FileCheck
> %s --check-prefix=IR
> +
> +; Verifies the SeparateConstOffsetFromGEP pass.
> +; The following code computes
> +; *output = array[x][y] + array[x][y+1] + array[x+1][y] + array[x+1][y+1]
> +;
> +; We expect SeparateConstOffsetFromGEP to transform it to
> +;
> +; float *base = &a[x][y];
> +; *output = base[0] + base[1] + base[32] + base[33];
> +;
> +; so the backend can emit PTX that uses fewer virtual registers.
> +
> +target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
> +target triple = "nvptx64-unknown-unknown"
> +
> + at array = internal addrspace(3) constant [32 x [32 x float]]
> zeroinitializer, align 4
> +
> +define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
> +.preheader:
> +  %0 = zext i32 %y to i64
> +  %1 = zext i32 %x to i64
> +  %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array,
> i64 0, i64 %1, i64 %0
> +  %3 = addrspacecast float addrspace(3)* %2 to float*
> +  %4 = load float* %3, align 4
> +  %5 = fadd float %4, 0.000000e+00
> +  %6 = add i32 %y, 1
> +  %7 = zext i32 %6 to i64
> +  %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array,
> i64 0, i64 %1, i64 %7
> +  %9 = addrspacecast float addrspace(3)* %8 to float*
> +  %10 = load float* %9, align 4
> +  %11 = fadd float %5, %10
> +  %12 = add i32 %x, 1
> +  %13 = zext i32 %12 to i64
> +  %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array,
> i64 0, i64 %13, i64 %0
> +  %15 = addrspacecast float addrspace(3)* %14 to float*
> +  %16 = load float* %15, align 4
> +  %17 = fadd float %11, %16
> +  %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array,
> i64 0, i64 %13, i64 %7
> +  %19 = addrspacecast float addrspace(3)* %18 to float*
> +  %20 = load float* %19, align 4
> +  %21 = fadd float %17, %20
> +  store float %21, float* %output, align 4
> +  ret void
> +}
> +
> +; PTX-LABEL: sum_of_array(
> +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}}
> +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
> +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
> +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
> +
> +; IR-LABEL: @sum_of_array(
> +; IR: [[BASE_PTR:%[0-9]+]] = getelementptr inbounds [32 x [32 x float]]
> addrspace(3)* @array, i64 0, i32 %x, i32 %y
> +; IR: [[BASE_INT:%[0-9]+]] = ptrtoint float addrspace(3)* [[BASE_PTR]] to
> i64
> +; IR: %5 = add i64 [[BASE_INT]], 4
> +; IR: %10 = add i64 [[BASE_INT]], 128
> +; IR: %15 = add i64 [[BASE_INT]], 132
>
> Added:
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll?rev=207783&view=auto
>
> ==============================================================================
> ---
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
> (added)
> +++
> llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll
> Thu May  1 13:38:36 2014
> @@ -0,0 +1,101 @@
> +; RUN: opt < %s -separate-const-offset-from-gep -dce -S | FileCheck %s
> +
> +; Several unit tests for -separate-const-offset-from-gep. The
> transformation
> +; heavily relies on TargetTransformInfo, so we put these tests under
> +; target-specific folders.
> +
> +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
> +; target triple is necessary; otherwise TargetTransformInfo rejects any
> +; addressing mode.
> +target triple = "nvptx64-unknown-unknown"
> +
> +%struct.S = type { float, double }
> +
> + at struct_array = global [1024 x %struct.S] zeroinitializer, align 16
> + at float_2d_array = global [32 x [32 x float]] zeroinitializer, align 4
> +
> +; We should not extract any struct field indices, because fields in a
> struct
> +; may have different types.
> +define double* @struct(i32 %i) {
> +entry:
> +  %add = add nsw i32 %i, 5
> +  %idxprom = sext i32 %add to i64
> +  %p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0,
> i64 %idxprom, i32 1
> +  ret double* %p
> +}
> +; CHECK-LABEL: @struct
> +; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i32 %i,
> i32 1
> +
> +; We should be able to trace into sext/zext if it's directly used as a GEP
> +; index.
> +define float* @sext_zext(i32 %i, i32 %j) {
> +entry:
> +  %i1 = add i32 %i, 1
> +  %j2 = add i32 %j, 2
> +  %i1.ext = sext i32 %i1 to i64
> +  %j2.ext = zext i32 %j2 to i64
> +  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64
> 0, i64 %i1.ext, i64 %j2.ext
> +  ret float* %p
> +}
> +; CHECK-LABEL: @sext_zext
> +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i32
> %i, i32 %j
> +; CHECK: add i64 %{{[0-9]+}}, 136
> +
> +; We should be able to trace into sext/zext if it can be distributed to
> both
> +; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
> +define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) {
> +  %b1 = add nsw i32 %b, 1
> +  %b2 = sext i32 %b1 to i64
> +  %i = add i64 %a, %b2
> +  %d1 = add nuw i32 %d, 1
> +  %d2 = zext i32 %d1 to i64
> +  %j = add i64 %c, %d2
> +  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64
> 0, i64 %i, i64 %j
> +  ret float* %p
> +}
> +; CHECK-LABEL: @ext_add_no_overflow
> +; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]*
> @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}}
> +; CHECK: [[BASE_INT:%[0-9]+]] = ptrtoint float* [[BASE_PTR]] to i64
> +; CHECK: add i64 [[BASE_INT]], 132
> +
> +; We should treat "or" with no common bits (%k) as "add", and leave "or"
> with
> +; potentially common bits (%l) as is.
> +define float* @or(i64 %i) {
> +entry:
> +  %j = shl i64 %i, 2
> +  %k = or i64 %j, 3 ; no common bits
> +  %l = or i64 %j, 4 ; potentially common bits
> +  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64
> 0, i64 %k, i64 %l
> +  ret float* %p
> +}
> +; CHECK-LABEL: @or
> +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64
> %j, i64 %l
> +; CHECK: add i64 %{{[0-9]+}}, 384
> +
> +; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out =
> b +
> +; 5". When extracting the constant offset 5, make sure "*out = b + 5"
> isn't
> +; affected.
> +define float* @expr(i64 %a, i64 %b, i64* %out) {
> +entry:
> +  %b5 = add i64 %b, 5
> +  %i = add i64 %b5, %a
> +  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64
> 0, i64 %i, i64 0
> +  store i64 %b5, i64* %out
> +  ret float* %p
> +}
> +; CHECK-LABEL: @expr
> +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64
> %0, i64 0
> +; CHECK: add i64 %{{[0-9]+}}, 640
> +; CHECK: store i64 %b5, i64* %out
> +
> +; Verifies we handle "sub" correctly.
> +define float* @sub(i64 %i, i64 %j) {
> +  %i2 = sub i64 %i, 5 ; i - 5
> +  %j2 = sub i64 5, %j ; 5 - i
> +  %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64
> 0, i64 %i2, i64 %j2
> +  ret float* %p
> +}
> +; CHECK-LABEL: @sub
> +; CHECK: %[[j2:[0-9]+]] = sub i64 0, %j
> +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64
> %i, i64 %[[j2]]
> +; CHECK: add i64 %{{[0-9]+}}, -620
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20140501/2e9bd656/attachment.html>


More information about the llvm-commits mailing list