[llvm-commits] [llvm] r166112 - in /llvm/trunk: include/llvm/InitializePasses.h include/llvm/LinkAllPasses.h include/llvm/Transforms/Vectorize.h lib/Transforms/IPO/PassManagerBuilder.cpp lib/Transforms/Vectorize/CMakeLists.txt lib/Transforms/Vectorize/LoopVectorize.cpp lib/Transforms/Vectorize/Vectorize.cpp test/Transforms/LoopVectorize/ test/Transforms/LoopVectorize/gcc-examples.ll test/Transforms/LoopVectorize/lit.local.cfg test/Transforms/LoopVectorize/non-const-n.ll

Benjamin Kramer benny.kra at gmail.com
Thu Oct 18 10:32:58 PDT 2012


On 17.10.2012, at 20:25, Nadav Rotem <nrotem at apple.com> wrote:

> Author: nadav
> Date: Wed Oct 17 13:25:06 2012
> New Revision: 166112
> 
> URL: http://llvm.org/viewvc/llvm-project?rev=166112&view=rev
> Log:
> Add a loop vectorizer.
> 
> Added:
>    llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
>    llvm/trunk/test/Transforms/LoopVectorize/
>    llvm/trunk/test/Transforms/LoopVectorize/gcc-examples.ll
>    llvm/trunk/test/Transforms/LoopVectorize/lit.local.cfg
>    llvm/trunk/test/Transforms/LoopVectorize/non-const-n.ll
> Modified:
>    llvm/trunk/include/llvm/InitializePasses.h
>    llvm/trunk/include/llvm/LinkAllPasses.h
>    llvm/trunk/include/llvm/Transforms/Vectorize.h
>    llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp
>    llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt
>    llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp
> 
> Modified: llvm/trunk/include/llvm/InitializePasses.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/InitializePasses.h?rev=166112&r1=166111&r2=166112&view=diff
> ==============================================================================
> --- llvm/trunk/include/llvm/InitializePasses.h (original)
> +++ llvm/trunk/include/llvm/InitializePasses.h Wed Oct 17 13:25:06 2012
> @@ -261,6 +261,7 @@
> void initializeInstSimplifierPass(PassRegistry&);
> void initializeUnpackMachineBundlesPass(PassRegistry&);
> void initializeFinalizeMachineBundlesPass(PassRegistry&);
> +void initializeLoopVectorizePass(PassRegistry&);
> void initializeBBVectorizePass(PassRegistry&);
> void initializeMachineFunctionPrinterPassPass(PassRegistry&);
> }
> 
> Modified: llvm/trunk/include/llvm/LinkAllPasses.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/LinkAllPasses.h?rev=166112&r1=166111&r2=166112&view=diff
> ==============================================================================
> --- llvm/trunk/include/llvm/LinkAllPasses.h (original)
> +++ llvm/trunk/include/llvm/LinkAllPasses.h Wed Oct 17 13:25:06 2012
> @@ -156,6 +156,7 @@
>       (void) llvm::createCorrelatedValuePropagationPass();
>       (void) llvm::createMemDepPrinter();
>       (void) llvm::createInstructionSimplifierPass();
> +      (void) llvm::createLoopVectorizePass();
>       (void) llvm::createBBVectorizePass();
> 
>       (void)new llvm::IntervalPartition();
> 
> Modified: llvm/trunk/include/llvm/Transforms/Vectorize.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Vectorize.h?rev=166112&r1=166111&r2=166112&view=diff
> ==============================================================================
> --- llvm/trunk/include/llvm/Transforms/Vectorize.h (original)
> +++ llvm/trunk/include/llvm/Transforms/Vectorize.h Wed Oct 17 13:25:06 2012
> @@ -107,6 +107,12 @@
> createBBVectorizePass(const VectorizeConfig &C = VectorizeConfig());
> 
> //===----------------------------------------------------------------------===//
> +//
> +// LoopVectorize - Create a loop vectorization pass.
> +//
> +Pass * createLoopVectorizePass();
> +
> +//===----------------------------------------------------------------------===//
> /// @brief Vectorize the BasicBlock.
> ///
> /// @param BB The BasicBlock to be vectorized
> 
> Modified: llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp?rev=166112&r1=166111&r2=166112&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp (original)
> +++ llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp Wed Oct 17 13:25:06 2012
> @@ -176,6 +176,12 @@
>   MPM.add(createIndVarSimplifyPass());        // Canonicalize indvars
>   MPM.add(createLoopIdiomPass());             // Recognize idioms like memset.
>   MPM.add(createLoopDeletionPass());          // Delete dead loops
> +
> +  if (Vectorize) {
> +    MPM.add(createLoopVectorizePass());
> +    MPM.add(createLICMPass());
> +  }
> +
>   if (!DisableUnrollLoops)
>     MPM.add(createLoopUnrollPass());          // Unroll small loops
>   addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
> 
> Modified: llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt?rev=166112&r1=166111&r2=166112&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt (original)
> +++ llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt Wed Oct 17 13:25:06 2012
> @@ -1,6 +1,7 @@
> add_llvm_library(LLVMVectorize
>   BBVectorize.cpp
>   Vectorize.cpp
> +  LoopVectorize.cpp
>   )
> 
> add_dependencies(LLVMVectorize intrinsics_gen)
> 
> Added: llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp?rev=166112&view=auto
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp (added)
> +++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorize.cpp Wed Oct 17 13:25:06 2012
> @@ -0,0 +1,801 @@
> +//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
> +//
> +//                     The LLVM Compiler Infrastructure
> +//
> +// This file is distributed under the University of Illinois Open Source
> +// License. See LICENSE.TXT for details.
> +//
> +//===----------------------------------------------------------------------===//
> +//
> +// This is a simple loop vectorizer. We currently only support single block
> +// loops. We have a very simple and restrictive legality check: we need to read
> +// and write from disjoint memory locations. We still don't have a cost model.
> +// This pass has three parts:
> +// 1. The main loop pass that drives the different parts.
> +// 2. LoopVectorizationLegality - A helper class that checks for the legality
> +//    of the vectorization.
> +// 3. SingleBlockLoopVectorizer - A helper class that performs the actual
> +//    widening of instructions.
> +//
> +//===----------------------------------------------------------------------===//
> +#define LV_NAME "loop-vectorize"
> +#define DEBUG_TYPE LV_NAME
> +#include "llvm/Constants.h"
> +#include "llvm/DerivedTypes.h"
> +#include "llvm/Instructions.h"
> +#include "llvm/LLVMContext.h"
> +#include "llvm/Pass.h"
> +#include "llvm/Analysis/LoopPass.h"
> +#include "llvm/Value.h"
> +#include "llvm/Function.h"
> +#include "llvm/Module.h"
> +#include "llvm/Type.h"
> +#include "llvm/ADT/SmallVector.h"
> +#include "llvm/ADT/StringExtras.h"
> +#include "llvm/Analysis/AliasAnalysis.h"
> +#include "llvm/Analysis/AliasSetTracker.h"
> +#include "llvm/Transforms/Scalar.h"
> +#include "llvm/Analysis/ScalarEvolution.h"
> +#include "llvm/Analysis/ScalarEvolutionExpressions.h"
> +#include "llvm/Analysis/ScalarEvolutionExpander.h"
> +#include "llvm/Transforms/Utils/BasicBlockUtils.h"
> +#include "llvm/Analysis/ValueTracking.h"
> +#include "llvm/Analysis/LoopInfo.h"
> +#include "llvm/Support/CommandLine.h"
> +#include "llvm/Support/Debug.h"
> +#include "llvm/Support/raw_ostream.h"
> +#include "llvm/DataLayout.h"
> +#include "llvm/Transforms/Utils/Local.h"
> +#include <algorithm>
> +using namespace llvm;
> +
> +static cl::opt<unsigned>
> +DefaultVectorizationFactor("default-loop-vectorize-width",
> +                          cl::init(4), cl::Hidden,
> +                          cl::desc("Set the default loop vectorization width"));
> +
> +namespace {
> +
> +/// Vectorize a simple loop. This class performs the widening of simple single
> +/// basic block loops into vectors. It does not perform any
> +/// vectorization-legality checks, and just does it.  It widens the vectors
> +/// to a given vectorization factor (VF).
> +class SingleBlockLoopVectorizer {
> +public:
> +
> +  /// Ctor.
> +  SingleBlockLoopVectorizer(Loop *OrigLoop, ScalarEvolution *Se, LoopInfo *Li,
> +                            unsigned VecWidth):
> +  Orig(OrigLoop), SE(Se), LI(Li), VF(VecWidth),
> +   Builder(0), Induction(0), OldInduction(0) { }
> +
> +  ~SingleBlockLoopVectorizer() {
> +    delete Builder;
> +  }
> +
> +  // Perform the actual loop widening (vectorization).
> +  void vectorize() {
> +    ///Create a new empty loop. Unlink the old loop and connect the new one.
> +    copyEmptyLoop();
> +    /// Widen each instruction in the old loop to a new one in the new loop.
> +    vectorizeLoop();
> +    // Delete the old loop.
> +    deleteOldLoop();
> + }
> +
> +private:
> +  /// Create an empty loop, based on the loop ranges of the old loop.
> +  void copyEmptyLoop();
> +  /// Copy and widen the instructions from the old loop.
> +  void vectorizeLoop();
> +  /// Delete the old loop.
> +  void deleteOldLoop();
> +
> +  /// This instruction is un-vectorizable. Implement it as a sequence
> +  /// of scalars.
> +  void scalarizeInstruction(Instruction *Instr);
> +
> +  /// Create a broadcast instruction. This method generates a broadcast
> +  /// instruction (shuffle) for loop invariant values and for the induction
> +  /// value. If this is the induction variable then we extend it to N, N+1, ...
> +  /// this is needed because each iteration in the loop corresponds to a SIMD
> +  /// element.
> +  Value *getBroadcastInstrs(Value *V);
> +
> +  /// This is a helper function used by getBroadcastInstrs. It adds 0, 1, 2 ..
> +  /// for each element in the vector. Starting from zero.
> +  Value *getConsecutiveVector(Value* Val);
> +
> +  /// Check that the GEP operands are all uniform except for the last index
> +  /// which has to be the induction variable.
> +  bool isConsecutiveGep(GetElementPtrInst *Gep);
> +
> +  /// When we go over instructions in the basic block we rely on previous
> +  /// values within the current basic block or on loop invariant values.
> +  /// When we widen (vectorize) values we place them in the map. If the values
> +  /// are not within the map, they have to be loop invariant, so we simply
> +  /// broadcast them into a vector.
> +  Value *getVectorValue(Value *V);
> +
> +  /// The original loop.
> +  Loop *Orig;
> +  // Scev analysis to use.
> +  ScalarEvolution *SE;
> +  // Loop Info.
> +  LoopInfo *LI;
> +  // The vectorization factor to use.
> +  unsigned VF;
> +
> +  // The builder that we use
> +  IRBuilder<> *Builder;
> +
> +  // --- Vectorization state ---
> +
> +  /// The new Induction variable which was added to the new block.
> +  Instruction *Induction;
> +  /// The induction variable of the old basic block.
> +  Instruction *OldInduction;
> +  // Maps scalars to widened vectors.
> +  DenseMap<Value*, Value*> WidenMap;
> +};
> +
> +
> +/// Perform the vectorization legality check. This class does not look at the
> +/// profitability of vectorization, only the legality. At the moment the checks
> +/// are very simple and focus on single basic block loops with a constant
> +/// iteration count and no reductions.
> +class LoopVectorizationLegality {
> +public:
> +  LoopVectorizationLegality(Loop *Lp, ScalarEvolution *Se, DataLayout *Dl):
> +  TheLoop(Lp), SE(Se), DL(Dl) { }
> +
> +  /// Returns the maximum vectorization factor that we *can* use to vectorize
> +  /// this loop. This does not mean that it is profitable to vectorize this
> +  /// loop, only that it is legal to do so. This may be a large number. We
> +  /// can vectorize to any SIMD width below this number.
> +  unsigned getLoopMaxVF();
> +
> +private:
> +  /// Check if a single basic block loop is vectorizable.
> +  /// At this point we know that this is a loop with a constant trip count
> +  /// and we only need to check individual instructions.
> +  bool canVectorizeBlock(BasicBlock &BB);
> +
> +  // Check if a pointer value is known to be disjoint.
> +  // Example: Alloca, Global, NoAlias.
> +  bool isKnownDisjoint(Value* Val);
> +
> +  /// The loop that we evaluate.
> +  Loop *TheLoop;
> +  /// Scev analysis.
> +  ScalarEvolution *SE;
> +  /// DataLayout analysis.
> +  DataLayout *DL;
> +};
> +
> +struct LoopVectorize : public LoopPass {
> +  static char ID; // Pass identification, replacement for typeid
> +
> +  LoopVectorize() : LoopPass(ID) {
> +    initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
> +  }
> +
> +  AliasAnalysis *AA;
> +  ScalarEvolution *SE;
> +  DataLayout *DL;
> +  LoopInfo *LI;
> +
> +  virtual bool runOnLoop(Loop *L, LPPassManager &LPM) {
> +    // Only vectorize innermost loops.
> +    if (!L->empty())
> +      return false;
> +
> +    AA = &getAnalysis<AliasAnalysis>();
> +    SE = &getAnalysis<ScalarEvolution>();
> +    DL = getAnalysisIfAvailable<DataLayout>();
> +    LI = &getAnalysis<LoopInfo>();
> +
> +    BasicBlock *Header = L->getHeader();
> +    DEBUG(dbgs() << "LV: Checking a loop in \"" <<
> +          Header->getParent()->getName() << "\"\n");
> +
> +    // Check if it is legal to vectorize the loop.
> +    LoopVectorizationLegality LVL(L, SE, DL);
> +    unsigned MaxVF = LVL.getLoopMaxVF();
> +
> +    // Check that we can vectorize using the chosen vectorization width.
> +    if ((MaxVF < DefaultVectorizationFactor) ||
> +        (MaxVF % DefaultVectorizationFactor)) {
> +      DEBUG(dbgs() << "LV: non-vectorizable MaxVF ("<< MaxVF << ").\n");
> +      return false;
> +    }
> +
> +    DEBUG(dbgs() << "LV: Found a vectorizable loop ("<< MaxVF << ").\n");
> +
> +    // If we decided that is is *legal* to vectorizer the loop. Do it.
> +    SingleBlockLoopVectorizer LB(L, SE, LI, DefaultVectorizationFactor);
> +    LB.vectorize();
> +
> +    // The loop is now vectorized. Remove it from LMP.
> +    LPM.deleteLoopFromQueue(L);
> +    return true;
> +  }
> +
> +  virtual void getAnalysisUsage(AnalysisUsage &AU) const {
> +    LoopPass::getAnalysisUsage(AU);
> +    AU.addRequiredID(LoopSimplifyID);
> +    AU.addRequired<AliasAnalysis>();
> +    AU.addRequired<LoopInfo>();
> +    AU.addRequired<ScalarEvolution>();
> +  }
> +
> +};
> +
> +Value *SingleBlockLoopVectorizer::getBroadcastInstrs(Value *V) {
> +  // Instructions that access the old induction variable
> +  // actually want to get the new one.
> +  if (V == OldInduction)
> +    V = Induction;
> +  // Create the types.
> +  LLVMContext &C = V->getContext();
> +  Type *VTy = VectorType::get(V->getType(), VF);
> +  Type *I32 = IntegerType::getInt32Ty(C);
> +  Constant *Zero = ConstantInt::get(I32, 0);
> +  Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32, VF));
> +  Value *UndefVal = UndefValue::get(VTy);
> +  // Insert the value into a new vector.
> +  Value *SingleElem = Builder->CreateInsertElement(UndefVal, V, Zero);
> +  // Broadcast the scalar into all locations in the vector.
> +  Value *Shuf = Builder->CreateShuffleVector(SingleElem, UndefVal, Zeros,
> +                                             "broadcast");
> +  // We are accessing the induction variable. Make sure to promote the
> +  // index for each consecutive SIMD lane. This adds 0,1,2 ... to all lanes.
> +  if (V == Induction)
> +    return getConsecutiveVector(Shuf);
> +  return Shuf;
> +}
> +
> +Value *SingleBlockLoopVectorizer::getConsecutiveVector(Value* Val) {
> +  assert(Val->getType()->isVectorTy() && "Must be a vector");
> +  assert(Val->getType()->getScalarType()->isIntegerTy() &&
> +         "Elem must be an integer");
> +  // Create the types.
> +  Type *ITy = Val->getType()->getScalarType();
> +  VectorType *Ty = cast<VectorType>(Val->getType());
> +  unsigned VLen = Ty->getNumElements();
> +  SmallVector<Constant*, 8> Indices;
> +
> +  // Create a vector of consecutive numbers from zero to VF.
> +  for (unsigned i = 0; i < VLen; ++i)
> +    Indices.push_back(ConstantInt::get(ITy, i));
> +
> +  // Add the consecutive indices to the vector value.
> +  Constant *Cv = ConstantVector::get(Indices);
> +  assert(Cv->getType() == Val->getType() && "Invalid consecutive vec");
> +  return Builder->CreateAdd(Val, Cv, "induction");
> +}
> +
> +
> +bool SingleBlockLoopVectorizer::isConsecutiveGep(GetElementPtrInst *Gep) {
> +  if (!Gep)
> +    return false;
> +
> +  unsigned NumOperands = Gep->getNumOperands();
> +  Value *LastIndex = Gep->getOperand(NumOperands - 1);
> +
> +  // Check that all of the gep indices are uniform except for the last.
> +  for (unsigned i = 0; i < NumOperands - 1; ++i)
> +    if (!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), Orig))
> +      return false;
> +
> +  // The last operand has to be the induction in order to emit
> +  // a wide load/store.
> +  const SCEV *Last = SE->getSCEV(LastIndex);
> +  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Last)) {
> +    const SCEV *Step = AR->getStepRecurrence(*SE);
> +
> +    // The memory is consecutive because the last index is consecutive
> +    // and all other indices are loop invariant.
> +    if (Step->isOne())
> +      return true;
> +  }
> +
> +  return false;
> +}
> +
> +Value *SingleBlockLoopVectorizer::getVectorValue(Value *V) {
> +  if (WidenMap.count(V))
> +    return WidenMap[V];
> +  return getBroadcastInstrs(V);
> +}
> +
> +void SingleBlockLoopVectorizer::scalarizeInstruction(Instruction *Instr) {
> +  assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
> +  // Holds vector parameters or scalars, in case of uniform vals.
> +  SmallVector<Value*, 8> Params;
> +
> +  // Find all of the vectorized parameters.
> +  for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
> +    Value *SrcOp = Instr->getOperand(op);
> +
> +    // If we are accessing the old induction variable, use the new one.
> +    if (SrcOp == OldInduction) {
> +      Params.push_back(getBroadcastInstrs(Induction));
> +      continue;
> +    }
> +
> +    // Try using previously calculated values.
> +    Instruction *SrcInst = dyn_cast<Instruction>(SrcOp);
> +
> +    // If the src is an instruction that appeared earlier in the basic block
> +    // then it should already be vectorized. 
> +    if (SrcInst && SrcInst->getParent() == Instr->getParent()) {
> +      assert(WidenMap.count(SrcInst) && "Source operand is unavailable");
> +      // The parameter is a vector value from earlier.
> +      Params.push_back(WidenMap[SrcInst]);
> +    } else {
> +      // The parameter is a scalar from outside the loop. Maybe even a constant.
> +      Params.push_back(SrcOp);
> +    }
> +  }
> +
> +  assert(Params.size() == Instr->getNumOperands() &&
> +         "Invalid number of operands");
> +
> +  // Does this instruction return a value ?
> +  bool IsVoidRetTy = Instr->getType()->isVoidTy();
> +  Value *VecResults = 0;
> +
> +  // If we have a return value, create an empty vector. We place the scalarized
> +  // instructions in this vector.
> +  if (!IsVoidRetTy)
> +    VecResults = UndefValue::get(VectorType::get(Instr->getType(), VF));
> +
> +  // For each scalar that we create.
> +  for (unsigned i = 0; i < VF; ++i) {
> +    Instruction *Cloned = Instr->clone();
> +    if (!IsVoidRetTy)
> +      Cloned->setName(Instr->getName() + ".cloned");
> +    // Replace the operands of the cloned instrucions with extracted scalars.
> +    for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) {
> +      Value *Op = Params[op];
> +      // Param is a vector. Need to extract the right lane.
> +      if (Op->getType()->isVectorTy())
> +        Op = Builder->CreateExtractElement(Op, Builder->getInt32(i));
> +      Cloned->setOperand(op, Op);
> +    }
> +
> +    // Place the clonsed scalar in the new loop.
> +    Builder->Insert(Cloned);
> +
> +    // If the original scalar returns a value we need to place it in a vector
> +    // so that future users will be able to use it.
> +    if (!IsVoidRetTy)
> +      VecResults = Builder->CreateInsertElement(VecResults, Cloned,
> +                                               Builder->getInt32(i));
> +  }
> +
> +  if (!IsVoidRetTy)
> +    WidenMap[Instr] = VecResults;
> +}
> +
> +void SingleBlockLoopVectorizer::copyEmptyLoop() {
> +  assert(Orig->getNumBlocks() == 1 && "Invalid loop");
> +  BasicBlock *PH = Orig->getLoopPreheader();
> +  BasicBlock *ExitBlock = Orig->getExitBlock();
> +  assert(ExitBlock && "Invalid loop exit");
> +
> +  // Create a new single-basic block loop.
> +  BasicBlock *BB = BasicBlock::Create(PH->getContext(), "vectorizedloop",
> +                                      PH->getParent(), ExitBlock);
> +
> +  // Find the induction variable.
> +  BasicBlock *OldBasicBlock = Orig->getHeader();
> +  PHINode *OldInd = dyn_cast<PHINode>(OldBasicBlock->begin());
> +  assert(OldInd && "We must have a single phi node.");
> +  Type *IdxTy = OldInd->getType();
> +
> +  // Use this IR builder to create the loop instructions (Phi, Br, Cmp)
> +  // inside the loop.
> +  Builder = new IRBuilder<>(BB);
> +  Builder->SetInsertPoint(BB);
> +
> +  // Generate the induction variable.
> +  PHINode *Phi = Builder->CreatePHI(IdxTy, 2, "index");
> +  Constant *Zero = ConstantInt::get(IdxTy, 0);
> +  Constant *Step = ConstantInt::get(IdxTy, VF);
> +
> +  // Find the loop boundaries.
> +  const SCEV *ExitCount = SE->getExitCount(Orig, Orig->getHeader());
> +  assert(ExitCount != SE->getCouldNotCompute() && "Invalid loop count");
> +
> +  // Get the trip count from the count by adding 1.
> +  ExitCount = SE->getAddExpr(ExitCount,
> +                             SE->getConstant(ExitCount->getType(), 1));
> +
> +  // Expand the trip count and place the new instructions in the preheader.
> +  // Notice that the pre-header does not change, only the loop body.
> +  SCEVExpander Exp(*SE, "induction");
> +  Instruction *Loc = Orig->getLoopPreheader()->getTerminator();
> +  if (ExitCount->getType() != Phi->getType())
> +    ExitCount = SE->getSignExtendExpr(ExitCount, Phi->getType());
> +  Value *Count = Exp.expandCodeFor(ExitCount, Phi->getType(), Loc);
> +  
> +  // Create i+1 and fill the PHINode.
> +  Value *Next = Builder->CreateAdd(Phi, Step, "index.next");
> +  Phi->addIncoming(Zero, PH);
> +  Phi->addIncoming(Next, BB);
> +  // Create the compare.
> +  Value *ICmp = Builder->CreateICmpEQ(Next, Count);
> +  Builder->CreateCondBr(ICmp, ExitBlock, BB);
> +  // Fix preheader.
> +  PH->getTerminator()->setSuccessor(0, BB);
> +  Builder->SetInsertPoint(BB->getFirstInsertionPt());
> +
> +  // Save the indiction variables.
> +  Induction = Phi;
> +  OldInduction = OldInd;
> +}
> +
> +void SingleBlockLoopVectorizer::vectorizeLoop() {
> +  BasicBlock &BB = *Orig->getHeader();
> +
> +  // For each instruction in the old loop.
> +  for (BasicBlock::iterator it = BB.begin(), e = BB.end(); it != e; ++it) {
> +  Instruction *Inst = it;
> +
> +    switch (Inst->getOpcode()) {
> +      case Instruction::PHI:
> +      case Instruction::Br:
> +        // Nothing to do for PHIs and BR, since we already took care of the
> +        // loop control flow instructions.
> +        continue;
> +
> +      case Instruction::Add:
> +      case Instruction::FAdd:
> +      case Instruction::Sub:
> +      case Instruction::FSub:
> +      case Instruction::Mul:
> +      case Instruction::FMul:
> +      case Instruction::UDiv:
> +      case Instruction::SDiv:
> +      case Instruction::FDiv:
> +      case Instruction::URem:
> +      case Instruction::SRem:
> +      case Instruction::FRem:
> +      case Instruction::Shl:
> +      case Instruction::LShr:
> +      case Instruction::AShr:
> +      case Instruction::And:
> +      case Instruction::Or:
> +      case Instruction::Xor: {
> +        // Just widen binops.
> +        BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
> +        Value *A = getVectorValue(Inst->getOperand(0));
> +        Value *B = getVectorValue(Inst->getOperand(1));
> +        // Use this vector value for all users of the original instruction.
> +        WidenMap[Inst] = Builder->CreateBinOp(BinOp->getOpcode(), A, B);
> +        break;
> +      }
> +      case Instruction::Select: {
> +        // Widen selects.
> +        Value *A = getVectorValue(Inst->getOperand(0));
> +        Value *B = getVectorValue(Inst->getOperand(1));
> +        Value *C = getVectorValue(Inst->getOperand(2));
> +        WidenMap[Inst] = Builder->CreateSelect(A, B, C);
> +        break;
> +      }
> +
> +      case Instruction::ICmp:
> +      case Instruction::FCmp: {
> +        // Widen compares. Generate vector compares.
> +        bool FCmp = (Inst->getOpcode() == Instruction::FCmp);
> +        CmpInst *Cmp = dyn_cast<CmpInst>(Inst);
> +        Value *A = getVectorValue(Inst->getOperand(0));
> +        Value *B = getVectorValue(Inst->getOperand(1));
> +        if (FCmp)
> +          WidenMap[Inst] = Builder->CreateFCmp(Cmp->getPredicate(), A, B);
> +        else
> +          WidenMap[Inst] = Builder->CreateICmp(Cmp->getPredicate(), A, B);
> +        break;
> +      }
> +
> +      case Instruction::Store: {
> +        // Attempt to issue a wide store.
> +        StoreInst *SI = dyn_cast<StoreInst>(Inst);
> +        Type *StTy = VectorType::get(SI->getValueOperand()->getType(), VF);
> +        Value *Ptr = SI->getPointerOperand();
> +        unsigned Alignment = SI->getAlignment();
> +        GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
> +        // This store does not use GEPs.
> +        if (!isConsecutiveGep(Gep)) {
> +          scalarizeInstruction(Inst);
> +          break;
> +        }
> +
> +        // Create the new GEP with the new induction variable.
> +        GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
> +        unsigned NumOperands = Gep->getNumOperands();
> +        Gep2->setOperand(NumOperands - 1, Induction);
> +        Ptr = Builder->Insert(Gep2);
> +        Ptr = Builder->CreateBitCast(Ptr, StTy->getPointerTo());
> +        Value *Val = getVectorValue(SI->getValueOperand());
> +        Builder->CreateStore(Val, Ptr)->setAlignment(Alignment);
> +        break;
> +      }
> +      case Instruction::Load: {
> +        // Attempt to issue a wide load.
> +        LoadInst *LI = dyn_cast<LoadInst>(Inst);
> +        Type *RetTy = VectorType::get(LI->getType(), VF);
> +        Value *Ptr = LI->getPointerOperand();
> +        unsigned Alignment = LI->getAlignment();
> +        GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
> +
> +        // We don't have a gep. Scalarize the load.
> +        if (!isConsecutiveGep(Gep)) {
> +          scalarizeInstruction(Inst);
> +          break;
> +        }
> +
> +        // Create the new GEP with the new induction variable.
> +        GetElementPtrInst *Gep2 = cast<GetElementPtrInst>(Gep->clone());
> +        unsigned NumOperands = Gep->getNumOperands();
> +        Gep2->setOperand(NumOperands - 1, Induction);
> +        Ptr = Builder->Insert(Gep2);
> +        Ptr = Builder->CreateBitCast(Ptr, RetTy->getPointerTo());
> +        LI = Builder->CreateLoad(Ptr);
> +        LI->setAlignment(Alignment);
> +        // Use this vector value for all users of the load.
> +        WidenMap[Inst] = LI;
> +        break;
> +      }
> +      case Instruction::ZExt:
> +      case Instruction::SExt:
> +      case Instruction::FPToUI:
> +      case Instruction::FPToSI:
> +      case Instruction::FPExt:
> +      case Instruction::PtrToInt:
> +      case Instruction::IntToPtr:
> +      case Instruction::SIToFP:
> +      case Instruction::UIToFP:
> +      case Instruction::Trunc:
> +      case Instruction::FPTrunc:
> +      case Instruction::BitCast: {
> +        /// Vectorize bitcasts.
> +        CastInst *CI = dyn_cast<CastInst>(Inst);
> +        Value *A = getVectorValue(Inst->getOperand(0));
> +        Type *DestTy = VectorType::get(CI->getType()->getScalarType(), VF);
> +        WidenMap[Inst] = Builder->CreateCast(CI->getOpcode(), A, DestTy);
> +        break;
> +      }
> +
> +      default:
> +        /// All other instructions are unsupported. Scalarize them.
> +        scalarizeInstruction(Inst);
> +        break;
> +    }// end of switch.
> +  }// end of for_each instr.
> +}
> +
> +void SingleBlockLoopVectorizer::deleteOldLoop() {
> +  // The original basic block.
> +  BasicBlock *BB = Orig->getHeader();
> +  SE->forgetLoop(Orig);
> +
> +  LI->removeBlock(BB);
> +  Orig->addBasicBlockToLoop(Induction->getParent(), LI->getBase());
> +
> +  // Remove the old loop block.
> +  DeleteDeadBlock(BB);
> +}
> +
> +unsigned LoopVectorizationLegality::getLoopMaxVF() {
> +  if (!TheLoop->getLoopPreheader()) {
> +    assert(false && "No preheader!!");
> +    DEBUG(dbgs() << "LV: Loop not normalized." << "\n");
> +    return  1;
> +  }
> +
> +  // We can only vectorize single basic block loops.
> +  unsigned NumBlocks = TheLoop->getNumBlocks();
> +  if (NumBlocks != 1) {
> +    DEBUG(dbgs() << "LV: Too many blocks:" << NumBlocks << "\n");
> +    return 1;
> +  }
> +
> +  // We need to have a loop header.
> +  BasicBlock *BB = TheLoop->getHeader();
> +  DEBUG(dbgs() << "LV: Found a loop: " << BB->getName() << "\n");
> +
> +  // Find the max vectorization factor.
> +  unsigned MaxVF = SE->getSmallConstantTripMultiple(TheLoop, BB);
> +
> +
> +  // Perform an early check. Do not scan the block if we did not find a loop.
> +  if (MaxVF < 2) {
> +    DEBUG(dbgs() << "LV: Can't find a vectorizable loop structure\n");
> +    return 1;
> +  }
> +
> +  // Go over each instruction and look at memory deps.
> +  if (!canVectorizeBlock(*BB)) {
> +    DEBUG(dbgs() << "LV: Can't vectorize this loop header\n");
> +    return 1;
> +  }
> +
> +  DEBUG(dbgs() << "LV: We can vectorize this loop! VF="<<MaxVF<<"\n");
> +  
> +  // Okay! We can vectorize. Return the max trip multiple.
> +  return MaxVF;
> +}
> +
> +bool LoopVectorizationLegality::canVectorizeBlock(BasicBlock &BB) {
> +  // Holds the read and write pointers that we find.
> +  typedef SmallVector<Value*, 10> ValueVector;
> +  ValueVector Reads;
> +  ValueVector Writes;
> +
> +  unsigned NumPhis = 0;
> +  for (BasicBlock::iterator it = BB.begin(), e = BB.end(); it != e; ++it) {
> +    Instruction *I = it;
> +
> +    PHINode *Phi = dyn_cast<PHINode>(I);
> +    if (Phi) {
> +      NumPhis++;
> +      // We only look at integer phi nodes.
> +      if (!Phi->getType()->isIntegerTy()) {
> +        DEBUG(dbgs() << "LV: Found an non-int PHI.\n");
> +        return false;
> +      }
> +
> +      // If we found an induction variable.
> +      if (NumPhis > 1) {
> +        DEBUG(dbgs() << "LV: Found more than one PHI.\n");
> +        return false;
> +      }
> +
> +      // This should not happen because the loop should be normalized.
> +      if (Phi->getNumIncomingValues() != 2) {
> +        DEBUG(dbgs() << "LV: Found an invalid PHI.\n");
> +        return false;
> +      }
> +
> +      // Check that the PHI is consecutive and starts at zero.
> +      const SCEV *PhiScev = SE->getSCEV(Phi);
> +      const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
> +      if (!AR) {
> +        DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
> +        return false;
> +      }
> +
> +      const SCEV *Step = AR->getStepRecurrence(*SE);
> +      const SCEV *Start = AR->getStart();
> +
> +      if (!Step->isOne() || !Start->isZero()) {
> +        DEBUG(dbgs() << "LV: PHI does not start at zero or steps by one.\n");
> +        return false;
> +      }
> +    }
> +
> +    // IF this is a load, record its pointer. If it is not a load, abort.
> +    // Notice that we don't handle function calls that read or write.
> +    if (I->mayReadFromMemory()) {
> +      LoadInst *Ld = dyn_cast<LoadInst>(I);
> +      if (!Ld) return false;
> +      if (!Ld->isSimple()) {
> +        DEBUG(dbgs() << "LV: Found a non-simple load.\n");
> +        return false;
> +      }
> +      GetUnderlyingObjects(Ld->getPointerOperand(), Reads, DL);
> +    }
> +
> +    // Record store pointers. Abort on all other instructions that write to
> +    // memory.
> +    if (I->mayWriteToMemory()) {
> +      StoreInst *St = dyn_cast<StoreInst>(I);
> +      if (!St) return false;
> +      if (!St->isSimple()) {
> +        DEBUG(dbgs() << "LV: Found a non-simple store.\n");
> +        return false;
> +      }
> +      GetUnderlyingObjects(St->getPointerOperand(), Writes, DL);
> +    }
> +
> +    // We still don't handle functions.
> +    CallInst *CI = dyn_cast<CallInst>(I);
> +    if (CI) {
> +      DEBUG(dbgs() << "LV: Found a call site:"<<
> +            CI->getCalledFunction()->getName() << "\n");
> +      return false;
> +    }
> +
> +    // We do not re-vectorize vectors.
> +    if (!VectorType::isValidElementType(I->getType()) &&
> +        !I->getType()->isVoidTy()) {
> +      DEBUG(dbgs() << "LV: Found unvectorizable type." << "\n");
> +      return false;
> +    }
> +    //Check that all of the users of the loop are inside the BB.
> +    for (Value::use_iterator it = I->use_begin(), e = I->use_end();
> +         it != e; ++it) {
> +      Instruction *U = cast<Instruction>(*it);
> +      BasicBlock *Parent = U->getParent();
> +      if (Parent != &BB) {
> +        DEBUG(dbgs() << "LV: Found an outside user for : "<< *U << "\n");
> +        return false;
> +      }
> +    }
> +  } // next instr.
> +
> +  // Check that the underlying objects of the reads and writes are either
> +  // disjoint memory locations, or that they are no-alias arguments.
> +  ValueVector::iterator r, re, w, we;
> +  for (r = Reads.begin(), re = Reads.end(); r != re; ++r) {
> +    if (!isKnownDisjoint(*r)) {
> +      DEBUG(dbgs() << "LV: Found a bad read Ptr: "<< **r << "\n");
> +      return false;
> +    }
> +  }
> +
> +  for (w = Writes.begin(), we = Writes.end(); w != we; ++w) {
> +    if (!isKnownDisjoint(*w)) {
> +      DEBUG(dbgs() << "LV: Found a bad write Ptr: "<< **w << "\n");
> +      return false;
> +    }
> +  }
> +
> +  // Check that there are no multiple write locations to the same pointer.
> +  SmallPtrSet<Value*, 8> BasePointers;
> +  for (w = Writes.begin(), we = Writes.end(); w != we; ++w) {
> +    if (BasePointers.count(*w)) {

You can use the return value of insert() to avoid a lookup here.

> +      DEBUG(dbgs() << "LV: Multiple writes to the same index :"<< **w << "\n");
> +      return false;
> +    }
> +    BasePointers.insert(*w);
> +  }
> +
> +  // Sort the writes vector so that we can use a binary search.
> +  std::sort(Writes.begin(), Writes.end());
> +  // Check that the reads and the writes are disjoint.
> +  for (r = Reads.begin(), re = Reads.end(); r != re; ++r) {
> +    if (std::binary_search(Writes.begin(), Writes.end(), *r)) {

First you insert all elements of Writes into a set, then discard the set, sort the array and do binary searches on it. Why not query the set instead of sorting and searching?

> +      DEBUG(dbgs() << "Vectorizer: Found a read/write ptr:"<< **r << "\n");
> +      return false;
> +    }
> +  }
> +
> +  // All is okay.
> +  return true;
> +}
> +
> +/// Checks if the value is a Global variable or if it is an Arguments
> +/// marked with the NoAlias attribute.
> +bool LoopVectorizationLegality::isKnownDisjoint(Value* Val) {

Maybe use llvm::isIdentifiedObject?

- Ben

> +  assert(Val && "Invalid value");
> +  if (dyn_cast<GlobalValue>(Val))
> +    return true;
> +  if (dyn_cast<AllocaInst>(Val))
> +    return true;
> +  Argument *A = dyn_cast<Argument>(Val);
> +  if (!A)
> +    return false;
> +  return A->hasNoAliasAttr();
> +}
> +
> +} // namespace
> +
> +char LoopVectorize::ID = 0;
> +static const char lv_name[] = "Loop Vectorization";
> +INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
> +INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
> +INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
> +INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
> +INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
> +
> +namespace llvm {
> +  Pass *createLoopVectorizePass() {
> +    return new LoopVectorize();
> +  }
> +
> +}
> +
> 
> Modified: llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp?rev=166112&r1=166111&r2=166112&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp (original)
> +++ llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp Wed Oct 17 13:25:06 2012
> @@ -7,7 +7,7 @@
> //
> //===----------------------------------------------------------------------===//
> //
> -// This file implements common infrastructure for libLLVMVectorizeOpts.a, which 
> +// This file implements common infrastructure for libLLVMVectorizeOpts.a, which
> // implements several vectorization transformations over the LLVM intermediate
> // representation, including the C bindings for that library.
> //
> @@ -23,10 +23,11 @@
> 
> using namespace llvm;
> 
> -/// initializeVectorizationPasses - Initialize all passes linked into the 
> +/// initializeVectorizationPasses - Initialize all passes linked into the
> /// Vectorization library.
> void llvm::initializeVectorization(PassRegistry &Registry) {
>   initializeBBVectorizePass(Registry);
> +  initializeLoopVectorizePass(Registry);
> }
> 
> void LLVMInitializeVectorization(LLVMPassRegistryRef R) {
> @@ -37,3 +38,6 @@
>   unwrap(PM)->add(createBBVectorizePass());
> }
> 
> +void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM) {
> +  unwrap(PM)->add(createLoopVectorizePass());
> +}
> 
> Added: llvm/trunk/test/Transforms/LoopVectorize/gcc-examples.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/gcc-examples.ll?rev=166112&view=auto
> ==============================================================================
> --- llvm/trunk/test/Transforms/LoopVectorize/gcc-examples.ll (added)
> +++ llvm/trunk/test/Transforms/LoopVectorize/gcc-examples.ll Wed Oct 17 13:25:06 2012
> @@ -0,0 +1,651 @@
> +; RUN: opt < %s  -loop-vectorize -dce -instcombine -licm -S | FileCheck %s
> +
> +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
> +target triple = "x86_64-apple-macosx10.8.0"
> +
> + at b = common global [2048 x i32] zeroinitializer, align 16
> + at c = common global [2048 x i32] zeroinitializer, align 16
> + at a = common global [2048 x i32] zeroinitializer, align 16
> + at G = common global [32 x [1024 x i32]] zeroinitializer, align 16
> + at ub = common global [1024 x i32] zeroinitializer, align 16
> + at uc = common global [1024 x i32] zeroinitializer, align 16
> + at d = common global [2048 x i32] zeroinitializer, align 16
> + at fa = common global [1024 x float] zeroinitializer, align 16
> + at fb = common global [1024 x float] zeroinitializer, align 16
> + at ic = common global [1024 x i32] zeroinitializer, align 16
> + at da = common global [1024 x float] zeroinitializer, align 16
> + at db = common global [1024 x float] zeroinitializer, align 16
> + at dc = common global [1024 x float] zeroinitializer, align 16
> + at dd = common global [1024 x float] zeroinitializer, align 16
> + at dj = common global [1024 x i32] zeroinitializer, align 16
> +
> +;CHECK: @example1
> +;CHECK: load <4 x i32>
> +;CHECK: add <4 x i32>
> +;CHECK: store <4 x i32>
> +;CHECK: ret void
> +define void @example1() nounwind uwtable ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
> +  %3 = load i32* %2, align 4
> +  %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
> +  %5 = load i32* %4, align 4
> +  %6 = add nsw i32 %5, %3
> +  %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
> +  store i32 %6, i32* %7, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 256
> +  br i1 %exitcond, label %8, label %1
> +
> +; <label>:8                                       ; preds = %1
> +  ret void
> +}
> +
> +; We can't vectorize this loop because it has non constant loop bounds.
> +;CHECK: @example2
> +;CHECK-NOT: <4 x i32>
> +;CHECK: ret void
> +define void @example2(i32 %n, i32 %x) nounwind uwtable ssp {
> +  %1 = icmp sgt i32 %n, 0
> +  br i1 %1, label %.lr.ph5, label %.preheader
> +
> +..preheader_crit_edge:                            ; preds = %.lr.ph5
> +  %phitmp = sext i32 %n to i64
> +  br label %.preheader
> +
> +.preheader:                                       ; preds = %..preheader_crit_edge, %0
> +  %i.0.lcssa = phi i64 [ %phitmp, %..preheader_crit_edge ], [ 0, %0 ]
> +  %2 = icmp eq i32 %n, 0
> +  br i1 %2, label %._crit_edge, label %.lr.ph
> +
> +.lr.ph5:                                          ; preds = %0, %.lr.ph5
> +  %indvars.iv6 = phi i64 [ %indvars.iv.next7, %.lr.ph5 ], [ 0, %0 ]
> +  %3 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv6
> +  store i32 %x, i32* %3, align 4
> +  %indvars.iv.next7 = add i64 %indvars.iv6, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next7 to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, %n
> +  br i1 %exitcond, label %..preheader_crit_edge, label %.lr.ph5
> +
> +.lr.ph:                                           ; preds = %.preheader, %.lr.ph
> +  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ %i.0.lcssa, %.preheader ]
> +  %.02 = phi i32 [ %4, %.lr.ph ], [ %n, %.preheader ]
> +  %4 = add nsw i32 %.02, -1
> +  %5 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
> +  %6 = load i32* %5, align 4
> +  %7 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
> +  %8 = load i32* %7, align 4
> +  %9 = and i32 %8, %6
> +  %10 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
> +  store i32 %9, i32* %10, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %11 = icmp eq i32 %4, 0
> +  br i1 %11, label %._crit_edge, label %.lr.ph
> +
> +._crit_edge:                                      ; preds = %.lr.ph, %.preheader
> +  ret void
> +}
> +
> +; We can't vectorize this loop because it has non constant loop bounds.
> +;CHECK: @example3
> +;CHECK-NOT: <4 x i32>
> +;CHECK: ret void
> +define void @example3(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture %q) nounwind uwtable ssp {
> +  %1 = icmp eq i32 %n, 0
> +  br i1 %1, label %._crit_edge, label %.lr.ph
> +
> +.lr.ph:                                           ; preds = %0, %.lr.ph
> +  %.05 = phi i32 [ %2, %.lr.ph ], [ %n, %0 ]
> +  %.014 = phi i32* [ %5, %.lr.ph ], [ %p, %0 ]
> +  %.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
> +  %2 = add nsw i32 %.05, -1
> +  %3 = getelementptr inbounds i32* %.023, i64 1
> +  %4 = load i32* %.023, align 16
> +  %5 = getelementptr inbounds i32* %.014, i64 1
> +  store i32 %4, i32* %.014, align 16
> +  %6 = icmp eq i32 %2, 0
> +  br i1 %6, label %._crit_edge, label %.lr.ph
> +
> +._crit_edge:                                      ; preds = %.lr.ph, %0
> +  ret void
> +}
> +
> +; We can't vectorize this loop because it has non constant loop bounds.
> +;CHECK: @example4
> +;CHECK-NOT: <4 x i32>
> +;CHECK: ret void
> +define void @example4(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture %q) nounwind uwtable ssp {
> +  %1 = add nsw i32 %n, -1
> +  %2 = icmp eq i32 %n, 0
> +  br i1 %2, label %.preheader4, label %.lr.ph10
> +
> +.preheader4:                                      ; preds = %0
> +  %3 = icmp sgt i32 %1, 0
> +  br i1 %3, label %.lr.ph6, label %._crit_edge
> +
> +.lr.ph10:                                         ; preds = %0, %.lr.ph10
> +  %4 = phi i32 [ %9, %.lr.ph10 ], [ %1, %0 ]
> +  %.018 = phi i32* [ %8, %.lr.ph10 ], [ %p, %0 ]
> +  %.027 = phi i32* [ %5, %.lr.ph10 ], [ %q, %0 ]
> +  %5 = getelementptr inbounds i32* %.027, i64 1
> +  %6 = load i32* %.027, align 16
> +  %7 = add nsw i32 %6, 5
> +  %8 = getelementptr inbounds i32* %.018, i64 1
> +  store i32 %7, i32* %.018, align 16
> +  %9 = add nsw i32 %4, -1
> +  %10 = icmp eq i32 %4, 0
> +  br i1 %10, label %._crit_edge, label %.lr.ph10
> +
> +.preheader:                                       ; preds = %.lr.ph6
> +  br i1 %3, label %.lr.ph, label %._crit_edge
> +
> +.lr.ph6:                                          ; preds = %.preheader4, %.lr.ph6
> +  %indvars.iv11 = phi i64 [ %indvars.iv.next12, %.lr.ph6 ], [ 0, %.preheader4 ]
> +  %indvars.iv.next12 = add i64 %indvars.iv11, 1
> +  %11 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv.next12
> +  %12 = load i32* %11, align 4
> +  %13 = add nsw i64 %indvars.iv11, 3
> +  %14 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %13
> +  %15 = load i32* %14, align 4
> +  %16 = add nsw i32 %15, %12
> +  %17 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv11
> +  store i32 %16, i32* %17, align 4
> +  %lftr.wideiv13 = trunc i64 %indvars.iv.next12 to i32
> +  %exitcond14 = icmp eq i32 %lftr.wideiv13, %1
> +  br i1 %exitcond14, label %.preheader, label %.lr.ph6
> +
> +.lr.ph:                                           ; preds = %.preheader, %.lr.ph
> +  %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %.preheader ]
> +  %18 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
> +  %19 = load i32* %18, align 4
> +  %20 = icmp sgt i32 %19, 4
> +  %21 = select i1 %20, i32 4, i32 0
> +  %22 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
> +  store i32 %21, i32* %22, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, %1
> +  br i1 %exitcond, label %._crit_edge, label %.lr.ph
> +
> +._crit_edge:                                      ; preds = %.lr.ph10, %.preheader4, %.lr.ph, %.preheader
> +  ret void
> +}
> +
> +;CHECK: @example8
> +;CHECK: store <4 x i32>
> +;CHECK: ret void
> +define void @example8(i32 %x) nounwind uwtable ssp {
> +  br label %.preheader
> +
> +.preheader:                                       ; preds = %3, %0
> +  %indvars.iv3 = phi i64 [ 0, %0 ], [ %indvars.iv.next4, %3 ]
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %.preheader
> +  %indvars.iv = phi i64 [ 0, %.preheader ], [ %indvars.iv.next, %1 ]
> +  %2 = getelementptr inbounds [32 x [1024 x i32]]* @G, i64 0, i64 %indvars.iv3, i64 %indvars.iv
> +  store i32 %x, i32* %2, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 1024
> +  br i1 %exitcond, label %3, label %1
> +
> +; <label>:3                                       ; preds = %1
> +  %indvars.iv.next4 = add i64 %indvars.iv3, 1
> +  %lftr.wideiv5 = trunc i64 %indvars.iv.next4 to i32
> +  %exitcond6 = icmp eq i32 %lftr.wideiv5, 32
> +  br i1 %exitcond6, label %4, label %.preheader
> +
> +; <label>:4                                       ; preds = %3
> +  ret void
> +}
> +
> +; We can't vectorize because it has a reduction variable.
> +;CHECK: @example9
> +;CHECK-NOT: <4 x i32>
> +;CHECK: ret i32
> +define i32 @example9() nounwind uwtable readonly ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %diff.01 = phi i32 [ 0, %0 ], [ %7, %1 ]
> +  %2 = getelementptr inbounds [1024 x i32]* @ub, i64 0, i64 %indvars.iv
> +  %3 = load i32* %2, align 4
> +  %4 = getelementptr inbounds [1024 x i32]* @uc, i64 0, i64 %indvars.iv
> +  %5 = load i32* %4, align 4
> +  %6 = add i32 %3, %diff.01
> +  %7 = sub i32 %6, %5
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 1024
> +  br i1 %exitcond, label %8, label %1
> +
> +; <label>:8                                       ; preds = %1
> +  ret i32 %7
> +}
> +
> +;CHECK: @example10a
> +;CHECK: load <4 x i16>
> +;CHECK: add <4 x i16>
> +;CHECK: store <4 x i16>
> +;CHECK: ret void
> +define void @example10a(i16* noalias nocapture %sa, i16* noalias nocapture %sb, i16* noalias nocapture %sc, i32* noalias nocapture %ia, i32* noalias nocapture %ib, i32* noalias nocapture %ic) nounwind uwtable ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %2 = getelementptr inbounds i32* %ib, i64 %indvars.iv
> +  %3 = load i32* %2, align 4
> +  %4 = getelementptr inbounds i32* %ic, i64 %indvars.iv
> +  %5 = load i32* %4, align 4
> +  %6 = add nsw i32 %5, %3
> +  %7 = getelementptr inbounds i32* %ia, i64 %indvars.iv
> +  store i32 %6, i32* %7, align 4
> +  %8 = getelementptr inbounds i16* %sb, i64 %indvars.iv
> +  %9 = load i16* %8, align 2
> +  %10 = getelementptr inbounds i16* %sc, i64 %indvars.iv
> +  %11 = load i16* %10, align 2
> +  %12 = add i16 %11, %9
> +  %13 = getelementptr inbounds i16* %sa, i64 %indvars.iv
> +  store i16 %12, i16* %13, align 2
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 1024
> +  br i1 %exitcond, label %14, label %1
> +
> +; <label>:14                                      ; preds = %1
> +  ret void
> +}
> +
> +;CHECK: @example10b
> +;CHECK: load <4 x i16>
> +;CHECK: sext <4 x i16>
> +;CHECK: store <4 x i32>
> +;CHECK: ret void
> +define void @example10b(i16* noalias nocapture %sa, i16* noalias nocapture %sb, i16* noalias nocapture %sc, i32* noalias nocapture %ia, i32* noalias nocapture %ib, i32* noalias nocapture %ic) nounwind uwtable ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %2 = getelementptr inbounds i16* %sb, i64 %indvars.iv
> +  %3 = load i16* %2, align 2
> +  %4 = sext i16 %3 to i32
> +  %5 = getelementptr inbounds i32* %ia, i64 %indvars.iv
> +  store i32 %4, i32* %5, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 1024
> +  br i1 %exitcond, label %6, label %1
> +
> +; <label>:6                                       ; preds = %1
> +  ret void
> +}
> +
> +;CHECK: @example11
> +;CHECK: load i32
> +;CHECK: load i32
> +;CHECK: load i32
> +;CHECK: load i32
> +;CHECK: insertelement
> +;CHECK: insertelement
> +;CHECK: insertelement
> +;CHECK: insertelement
> +;CHECK: ret void
> +define void @example11() nounwind uwtable ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %2 = shl nsw i64 %indvars.iv, 1
> +  %3 = or i64 %2, 1
> +  %4 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %3
> +  %5 = load i32* %4, align 4
> +  %6 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %3
> +  %7 = load i32* %6, align 4
> +  %8 = mul nsw i32 %7, %5
> +  %9 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %2
> +  %10 = load i32* %9, align 8
> +  %11 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %2
> +  %12 = load i32* %11, align 8
> +  %13 = mul nsw i32 %12, %10
> +  %14 = sub nsw i32 %8, %13
> +  %15 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
> +  store i32 %14, i32* %15, align 4
> +  %16 = mul nsw i32 %7, %10
> +  %17 = mul nsw i32 %12, %5
> +  %18 = add nsw i32 %17, %16
> +  %19 = getelementptr inbounds [2048 x i32]* @d, i64 0, i64 %indvars.iv
> +  store i32 %18, i32* %19, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 512
> +  br i1 %exitcond, label %20, label %1
> +
> +; <label>:20                                      ; preds = %1
> +  ret void
> +}
> +
> +;CHECK: @example12
> +;CHECK: trunc <4 x i64>
> +;CHECK: store <4 x i32>
> +;CHECK: ret void
> +define void @example12() nounwind uwtable ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
> +  %3 = trunc i64 %indvars.iv to i32
> +  store i32 %3, i32* %2, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 1024
> +  br i1 %exitcond, label %4, label %1
> +
> +; <label>:4                                       ; preds = %1
> +  ret void
> +}
> +
> +; Can't vectorize because of reductions.
> +;CHECK: @example13
> +;CHECK-NOT: <4 x i32>
> +;CHECK: ret void
> +define void @example13(i32** nocapture %A, i32** nocapture %B, i32* nocapture %out) nounwind uwtable ssp {
> +  br label %.preheader
> +
> +.preheader:                                       ; preds = %14, %0
> +  %indvars.iv4 = phi i64 [ 0, %0 ], [ %indvars.iv.next5, %14 ]
> +  %1 = getelementptr inbounds i32** %A, i64 %indvars.iv4
> +  %2 = load i32** %1, align 8
> +  %3 = getelementptr inbounds i32** %B, i64 %indvars.iv4
> +  %4 = load i32** %3, align 8
> +  br label %5
> +
> +; <label>:5                                       ; preds = %.preheader, %5
> +  %indvars.iv = phi i64 [ 0, %.preheader ], [ %indvars.iv.next, %5 ]
> +  %diff.02 = phi i32 [ 0, %.preheader ], [ %11, %5 ]
> +  %6 = getelementptr inbounds i32* %2, i64 %indvars.iv
> +  %7 = load i32* %6, align 4
> +  %8 = getelementptr inbounds i32* %4, i64 %indvars.iv
> +  %9 = load i32* %8, align 4
> +  %10 = add i32 %7, %diff.02
> +  %11 = sub i32 %10, %9
> +  %indvars.iv.next = add i64 %indvars.iv, 8
> +  %12 = trunc i64 %indvars.iv.next to i32
> +  %13 = icmp slt i32 %12, 1024
> +  br i1 %13, label %5, label %14
> +
> +; <label>:14                                      ; preds = %5
> +  %15 = getelementptr inbounds i32* %out, i64 %indvars.iv4
> +  store i32 %11, i32* %15, align 4
> +  %indvars.iv.next5 = add i64 %indvars.iv4, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next5 to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 32
> +  br i1 %exitcond, label %16, label %.preheader
> +
> +; <label>:16                                      ; preds = %14
> +  ret void
> +}
> +
> +; Can't vectorize because of reductions.
> +;CHECK: @example14
> +;CHECK-NOT: <4 x i32>
> +;CHECK: ret void
> +define void @example14(i32** nocapture %in, i32** nocapture %coeff, i32* nocapture %out) nounwind uwtable ssp {
> +.preheader3:
> +  br label %.preheader
> +
> +.preheader:                                       ; preds = %11, %.preheader3
> +  %indvars.iv7 = phi i64 [ 0, %.preheader3 ], [ %indvars.iv.next8, %11 ]
> +  %sum.05 = phi i32 [ 0, %.preheader3 ], [ %10, %11 ]
> +  br label %0
> +
> +; <label>:0                                       ; preds = %0, %.preheader
> +  %indvars.iv = phi i64 [ 0, %.preheader ], [ %indvars.iv.next, %0 ]
> +  %sum.12 = phi i32 [ %sum.05, %.preheader ], [ %10, %0 ]
> +  %1 = getelementptr inbounds i32** %in, i64 %indvars.iv
> +  %2 = load i32** %1, align 8
> +  %3 = getelementptr inbounds i32* %2, i64 %indvars.iv7
> +  %4 = load i32* %3, align 4
> +  %5 = getelementptr inbounds i32** %coeff, i64 %indvars.iv
> +  %6 = load i32** %5, align 8
> +  %7 = getelementptr inbounds i32* %6, i64 %indvars.iv7
> +  %8 = load i32* %7, align 4
> +  %9 = mul nsw i32 %8, %4
> +  %10 = add nsw i32 %9, %sum.12
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 1024
> +  br i1 %exitcond, label %11, label %0
> +
> +; <label>:11                                      ; preds = %0
> +  %indvars.iv.next8 = add i64 %indvars.iv7, 1
> +  %lftr.wideiv9 = trunc i64 %indvars.iv.next8 to i32
> +  %exitcond10 = icmp eq i32 %lftr.wideiv9, 32
> +  br i1 %exitcond10, label %.preheader3.1, label %.preheader
> +
> +.preheader3.1:                                    ; preds = %11
> +  store i32 %10, i32* %out, align 4
> +  br label %.preheader.1
> +
> +.preheader.1:                                     ; preds = %24, %.preheader3.1
> +  %indvars.iv7.1 = phi i64 [ 0, %.preheader3.1 ], [ %indvars.iv.next8.1, %24 ]
> +  %sum.05.1 = phi i32 [ 0, %.preheader3.1 ], [ %23, %24 ]
> +  br label %12
> +
> +; <label>:12                                      ; preds = %12, %.preheader.1
> +  %indvars.iv.1 = phi i64 [ 0, %.preheader.1 ], [ %13, %12 ]
> +  %sum.12.1 = phi i32 [ %sum.05.1, %.preheader.1 ], [ %23, %12 ]
> +  %13 = add nsw i64 %indvars.iv.1, 1
> +  %14 = getelementptr inbounds i32** %in, i64 %13
> +  %15 = load i32** %14, align 8
> +  %16 = getelementptr inbounds i32* %15, i64 %indvars.iv7.1
> +  %17 = load i32* %16, align 4
> +  %18 = getelementptr inbounds i32** %coeff, i64 %indvars.iv.1
> +  %19 = load i32** %18, align 8
> +  %20 = getelementptr inbounds i32* %19, i64 %indvars.iv7.1
> +  %21 = load i32* %20, align 4
> +  %22 = mul nsw i32 %21, %17
> +  %23 = add nsw i32 %22, %sum.12.1
> +  %lftr.wideiv.1 = trunc i64 %13 to i32
> +  %exitcond.1 = icmp eq i32 %lftr.wideiv.1, 1024
> +  br i1 %exitcond.1, label %24, label %12
> +
> +; <label>:24                                      ; preds = %12
> +  %indvars.iv.next8.1 = add i64 %indvars.iv7.1, 1
> +  %lftr.wideiv9.1 = trunc i64 %indvars.iv.next8.1 to i32
> +  %exitcond10.1 = icmp eq i32 %lftr.wideiv9.1, 32
> +  br i1 %exitcond10.1, label %.preheader3.2, label %.preheader.1
> +
> +.preheader3.2:                                    ; preds = %24
> +  %25 = getelementptr inbounds i32* %out, i64 1
> +  store i32 %23, i32* %25, align 4
> +  br label %.preheader.2
> +
> +.preheader.2:                                     ; preds = %38, %.preheader3.2
> +  %indvars.iv7.2 = phi i64 [ 0, %.preheader3.2 ], [ %indvars.iv.next8.2, %38 ]
> +  %sum.05.2 = phi i32 [ 0, %.preheader3.2 ], [ %37, %38 ]
> +  br label %26
> +
> +; <label>:26                                      ; preds = %26, %.preheader.2
> +  %indvars.iv.2 = phi i64 [ 0, %.preheader.2 ], [ %indvars.iv.next.2, %26 ]
> +  %sum.12.2 = phi i32 [ %sum.05.2, %.preheader.2 ], [ %37, %26 ]
> +  %27 = add nsw i64 %indvars.iv.2, 2
> +  %28 = getelementptr inbounds i32** %in, i64 %27
> +  %29 = load i32** %28, align 8
> +  %30 = getelementptr inbounds i32* %29, i64 %indvars.iv7.2
> +  %31 = load i32* %30, align 4
> +  %32 = getelementptr inbounds i32** %coeff, i64 %indvars.iv.2
> +  %33 = load i32** %32, align 8
> +  %34 = getelementptr inbounds i32* %33, i64 %indvars.iv7.2
> +  %35 = load i32* %34, align 4
> +  %36 = mul nsw i32 %35, %31
> +  %37 = add nsw i32 %36, %sum.12.2
> +  %indvars.iv.next.2 = add i64 %indvars.iv.2, 1
> +  %lftr.wideiv.2 = trunc i64 %indvars.iv.next.2 to i32
> +  %exitcond.2 = icmp eq i32 %lftr.wideiv.2, 1024
> +  br i1 %exitcond.2, label %38, label %26
> +
> +; <label>:38                                      ; preds = %26
> +  %indvars.iv.next8.2 = add i64 %indvars.iv7.2, 1
> +  %lftr.wideiv9.2 = trunc i64 %indvars.iv.next8.2 to i32
> +  %exitcond10.2 = icmp eq i32 %lftr.wideiv9.2, 32
> +  br i1 %exitcond10.2, label %.preheader3.3, label %.preheader.2
> +
> +.preheader3.3:                                    ; preds = %38
> +  %39 = getelementptr inbounds i32* %out, i64 2
> +  store i32 %37, i32* %39, align 4
> +  br label %.preheader.3
> +
> +.preheader.3:                                     ; preds = %52, %.preheader3.3
> +  %indvars.iv7.3 = phi i64 [ 0, %.preheader3.3 ], [ %indvars.iv.next8.3, %52 ]
> +  %sum.05.3 = phi i32 [ 0, %.preheader3.3 ], [ %51, %52 ]
> +  br label %40
> +
> +; <label>:40                                      ; preds = %40, %.preheader.3
> +  %indvars.iv.3 = phi i64 [ 0, %.preheader.3 ], [ %indvars.iv.next.3, %40 ]
> +  %sum.12.3 = phi i32 [ %sum.05.3, %.preheader.3 ], [ %51, %40 ]
> +  %41 = add nsw i64 %indvars.iv.3, 3
> +  %42 = getelementptr inbounds i32** %in, i64 %41
> +  %43 = load i32** %42, align 8
> +  %44 = getelementptr inbounds i32* %43, i64 %indvars.iv7.3
> +  %45 = load i32* %44, align 4
> +  %46 = getelementptr inbounds i32** %coeff, i64 %indvars.iv.3
> +  %47 = load i32** %46, align 8
> +  %48 = getelementptr inbounds i32* %47, i64 %indvars.iv7.3
> +  %49 = load i32* %48, align 4
> +  %50 = mul nsw i32 %49, %45
> +  %51 = add nsw i32 %50, %sum.12.3
> +  %indvars.iv.next.3 = add i64 %indvars.iv.3, 1
> +  %lftr.wideiv.3 = trunc i64 %indvars.iv.next.3 to i32
> +  %exitcond.3 = icmp eq i32 %lftr.wideiv.3, 1024
> +  br i1 %exitcond.3, label %52, label %40
> +
> +; <label>:52                                      ; preds = %40
> +  %indvars.iv.next8.3 = add i64 %indvars.iv7.3, 1
> +  %lftr.wideiv9.3 = trunc i64 %indvars.iv.next8.3 to i32
> +  %exitcond10.3 = icmp eq i32 %lftr.wideiv9.3, 32
> +  br i1 %exitcond10.3, label %53, label %.preheader.3
> +
> +; <label>:53                                      ; preds = %52
> +  %54 = getelementptr inbounds i32* %out, i64 3
> +  store i32 %51, i32* %54, align 4
> +  ret void
> +}
> +
> +; Can't vectorize because the src and dst pointers are not disjoint.
> +;CHECK: @example21
> +;CHECK-NOT: <4 x i32>
> +;CHECK: ret i32
> +define i32 @example21(i32* nocapture %b, i32 %n) nounwind uwtable readonly ssp {
> +  %1 = icmp sgt i32 %n, 0
> +  br i1 %1, label %.lr.ph, label %._crit_edge
> +
> +.lr.ph:                                           ; preds = %0
> +  %2 = sext i32 %n to i64
> +  br label %3
> +
> +; <label>:3                                       ; preds = %.lr.ph, %3
> +  %indvars.iv = phi i64 [ %2, %.lr.ph ], [ %indvars.iv.next, %3 ]
> +  %a.02 = phi i32 [ 0, %.lr.ph ], [ %6, %3 ]
> +  %indvars.iv.next = add i64 %indvars.iv, -1
> +  %4 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
> +  %5 = load i32* %4, align 4
> +  %6 = add nsw i32 %5, %a.02
> +  %7 = trunc i64 %indvars.iv.next to i32
> +  %8 = icmp sgt i32 %7, 0
> +  br i1 %8, label %3, label %._crit_edge
> +
> +._crit_edge:                                      ; preds = %3, %0
> +  %a.0.lcssa = phi i32 [ 0, %0 ], [ %6, %3 ]
> +  ret i32 %a.0.lcssa
> +}
> +
> +; Can't vectorize because there are multiple PHIs.
> +;CHECK: @example23
> +;CHECK-NOT: <4 x i32>
> +;CHECK: ret void
> +define void @example23(i16* nocapture %src, i32* nocapture %dst) nounwind uwtable ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
> +  %.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
> +  %i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
> +  %2 = getelementptr inbounds i16* %.04, i64 1
> +  %3 = load i16* %.04, align 2
> +  %4 = zext i16 %3 to i32
> +  %5 = shl nuw nsw i32 %4, 7
> +  %6 = getelementptr inbounds i32* %.013, i64 1
> +  store i32 %5, i32* %.013, align 4
> +  %7 = add nsw i32 %i.02, 1
> +  %exitcond = icmp eq i32 %7, 256
> +  br i1 %exitcond, label %8, label %1
> +
> +; <label>:8                                       ; preds = %1
> +  ret void
> +}
> +
> +;CHECK: @example24
> +;CHECK: shufflevector <4 x i16>
> +;CHECK: ret void
> +define void @example24(i16 signext %x, i16 signext %y) nounwind uwtable ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %2 = getelementptr inbounds [1024 x float]* @fa, i64 0, i64 %indvars.iv
> +  %3 = load float* %2, align 4
> +  %4 = getelementptr inbounds [1024 x float]* @fb, i64 0, i64 %indvars.iv
> +  %5 = load float* %4, align 4
> +  %6 = fcmp olt float %3, %5
> +  %x.y = select i1 %6, i16 %x, i16 %y
> +  %7 = sext i16 %x.y to i32
> +  %8 = getelementptr inbounds [1024 x i32]* @ic, i64 0, i64 %indvars.iv
> +  store i32 %7, i32* %8, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 1024
> +  br i1 %exitcond, label %9, label %1
> +
> +; <label>:9                                       ; preds = %1
> +  ret void
> +}
> +
> +;CHECK: @example25
> +;CHECK: and <4 x i1>
> +;CHECK: zext <4 x i1>
> +;CHECK: ret void
> +define void @example25() nounwind uwtable ssp {
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %2 = getelementptr inbounds [1024 x float]* @da, i64 0, i64 %indvars.iv
> +  %3 = load float* %2, align 4
> +  %4 = getelementptr inbounds [1024 x float]* @db, i64 0, i64 %indvars.iv
> +  %5 = load float* %4, align 4
> +  %6 = fcmp olt float %3, %5
> +  %7 = getelementptr inbounds [1024 x float]* @dc, i64 0, i64 %indvars.iv
> +  %8 = load float* %7, align 4
> +  %9 = getelementptr inbounds [1024 x float]* @dd, i64 0, i64 %indvars.iv
> +  %10 = load float* %9, align 4
> +  %11 = fcmp olt float %8, %10
> +  %12 = and i1 %6, %11
> +  %13 = zext i1 %12 to i32
> +  %14 = getelementptr inbounds [1024 x i32]* @dj, i64 0, i64 %indvars.iv
> +  store i32 %13, i32* %14, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, 1024
> +  br i1 %exitcond, label %15, label %1
> +
> +; <label>:15                                      ; preds = %1
> +  ret void
> +}
> +
> 
> Added: llvm/trunk/test/Transforms/LoopVectorize/lit.local.cfg
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/lit.local.cfg?rev=166112&view=auto
> ==============================================================================
> --- llvm/trunk/test/Transforms/LoopVectorize/lit.local.cfg (added)
> +++ llvm/trunk/test/Transforms/LoopVectorize/lit.local.cfg Wed Oct 17 13:25:06 2012
> @@ -0,0 +1 @@
> +config.suffixes = ['.ll', '.c', '.cpp']
> 
> Added: llvm/trunk/test/Transforms/LoopVectorize/non-const-n.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/non-const-n.ll?rev=166112&view=auto
> ==============================================================================
> --- llvm/trunk/test/Transforms/LoopVectorize/non-const-n.ll (added)
> +++ llvm/trunk/test/Transforms/LoopVectorize/non-const-n.ll Wed Oct 17 13:25:06 2012
> @@ -0,0 +1,38 @@
> +; RUN: opt < %s  -loop-vectorize -dce -instcombine -licm -S | FileCheck %s
> +
> +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
> +target triple = "x86_64-apple-macosx10.8.0"
> +
> + at b = common global [2048 x i32] zeroinitializer, align 16
> + at c = common global [2048 x i32] zeroinitializer, align 16
> + at a = common global [2048 x i32] zeroinitializer, align 16
> +
> +;CHECK: @example1
> +;CHECK: shl i32
> +;CHECK: sext i32
> +;CHECK: load <4 x i32>
> +;CHECK: add <4 x i32>
> +;CHECK: store <4 x i32>
> +;CHECK: ret void
> +define void @example1(i32 %n) nounwind uwtable ssp {
> +  %n4 = shl i32 %n, 2
> +  br label %1
> +
> +; <label>:1                                       ; preds = %1, %0
> +  %indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
> +  %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
> +  %3 = load i32* %2, align 4
> +  %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
> +  %5 = load i32* %4, align 4
> +  %6 = add nsw i32 %5, %3
> +  %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
> +  store i32 %6, i32* %7, align 4
> +  %indvars.iv.next = add i64 %indvars.iv, 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp eq i32 %lftr.wideiv, %n4
> +  br i1 %exitcond, label %8, label %1
> +
> +; <label>:8                                       ; preds = %1
> +  ret void
> +}
> +
> 
> 
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits





More information about the llvm-commits mailing list