[llvm] r274293 - Add LoadStoreVectorizer pass

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 30 16:11:39 PDT 2016


Author: arsenm
Date: Thu Jun 30 18:11:38 2016
New Revision: 274293

URL: http://llvm.org/viewvc/llvm-project?rev=274293&view=rev
Log:
Add LoadStoreVectorizer pass

This was contributed by Apple, and I've been working on
minimal cleanups and generalizing it.

Added:
    llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
    llvm/trunk/test/Transforms/LoadStoreVectorizer/
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/lit.local.cfg
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
Modified:
    llvm/trunk/include/llvm/InitializePasses.h
    llvm/trunk/include/llvm/LinkAllPasses.h
    llvm/trunk/include/llvm/Transforms/Vectorize.h
    llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt
    llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp

Modified: llvm/trunk/include/llvm/InitializePasses.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/InitializePasses.h?rev=274293&r1=274292&r2=274293&view=diff
==============================================================================
--- llvm/trunk/include/llvm/InitializePasses.h (original)
+++ llvm/trunk/include/llvm/InitializePasses.h Thu Jun 30 18:11:38 2016
@@ -169,6 +169,7 @@ void initializeLiveStacksPass(PassRegist
 void initializeLiveVariablesPass(PassRegistry&);
 void initializeLoadCombinePass(PassRegistry&);
 void initializeLoaderPassPass(PassRegistry&);
+void initializeLoadStoreVectorizerPass(PassRegistry&);
 void initializeLocalStackSlotPassPass(PassRegistry&);
 void initializeLoopAccessAnalysisPass(PassRegistry&);
 void initializeLoopDataPrefetchPass(PassRegistry&);

Modified: llvm/trunk/include/llvm/LinkAllPasses.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/LinkAllPasses.h?rev=274293&r1=274292&r2=274293&view=diff
==============================================================================
--- llvm/trunk/include/llvm/LinkAllPasses.h (original)
+++ llvm/trunk/include/llvm/LinkAllPasses.h Thu Jun 30 18:11:38 2016
@@ -183,6 +183,7 @@ namespace {
       (void) llvm::createInstructionSimplifierPass();
       (void) llvm::createLoopVectorizePass();
       (void) llvm::createSLPVectorizerPass();
+      (void) llvm::createLoadStoreVectorizerPass(128);
       (void) llvm::createBBVectorizePass();
       (void) llvm::createPartiallyInlineLibCallsPass();
       (void) llvm::createScalarizerPass();

Modified: llvm/trunk/include/llvm/Transforms/Vectorize.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Vectorize.h?rev=274293&r1=274292&r2=274293&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Vectorize.h (original)
+++ llvm/trunk/include/llvm/Transforms/Vectorize.h Thu Jun 30 18:11:38 2016
@@ -139,6 +139,13 @@ Pass *createSLPVectorizerPass();
 bool vectorizeBasicBlock(Pass *P, BasicBlock &BB,
                          const VectorizeConfig &C = VectorizeConfig());
 
+//===----------------------------------------------------------------------===//
+//
+// LoadStoreVectorizer - Create vector loads and stores, but leave scalar
+// operations.
+//
+Pass *createLoadStoreVectorizerPass(unsigned VecRegSize = 128);
+
 } // End llvm namespace
 
 #endif

Modified: llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt?rev=274293&r1=274292&r2=274293&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt (original)
+++ llvm/trunk/lib/Transforms/Vectorize/CMakeLists.txt Thu Jun 30 18:11:38 2016
@@ -1,8 +1,9 @@
 add_llvm_library(LLVMVectorize
   BBVectorize.cpp
-  Vectorize.cpp
+  LoadStoreVectorizer.cpp
   LoopVectorize.cpp
   SLPVectorizer.cpp
+  Vectorize.cpp
 
   ADDITIONAL_HEADER_DIRS
   ${LLVM_MAIN_INCLUDE_DIR}/llvm/Transforms

Added: llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp?rev=274293&view=auto
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp (added)
+++ llvm/trunk/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp Thu Jun 30 18:11:38 2016
@@ -0,0 +1,824 @@
+//===----- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer ----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Vectorize.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/VectorUtils.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "load-store-vectorizer"
+STATISTIC(NumVectorInstructions, "Number of vector accesses generated");
+STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized");
+
+namespace {
+
+// TODO: Remove this
+static const unsigned TargetBaseAlign = 4;
+
+class Vectorizer {
+  typedef SmallVector<Value *, 8> ValueList;
+  typedef MapVector<Value *, ValueList> ValueListMap;
+
+  Function &F;
+  AliasAnalysis &AA;
+  DominatorTree &DT;
+  ScalarEvolution &SE;
+  const DataLayout &DL;
+  IRBuilder<> Builder;
+  ValueListMap StoreRefs;
+  ValueListMap LoadRefs;
+  unsigned VecRegSize;
+
+public:
+  Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT,
+             ScalarEvolution &SE, unsigned VecRegSize)
+    : F(F), AA(AA), DT(DT), SE(SE), DL(F.getParent()->getDataLayout()),
+      Builder(SE.getContext()), VecRegSize(VecRegSize) {}
+
+  bool run();
+
+private:
+  Value *getPointerOperand(Value *I);
+
+  unsigned getPointerAddressSpace(Value *I);
+
+  bool isConsecutiveAccess(Value *A, Value *B);
+
+  /// Reorders the users of I after vectorization to ensure that I dominates its
+  /// users.
+  void reorder(Instruction *I);
+
+  /// Returns the first and the last instructions in Chain.
+  std::pair<BasicBlock::iterator, BasicBlock::iterator>
+  getBoundaryInstrs(ArrayRef<Value *> Chain);
+
+  /// Erases the original instructions after vectorizing.
+  void eraseInstructions(ArrayRef<Value *> Chain);
+
+  /// "Legalize" the vector type that would be produced by combining \p
+  /// ElementSizeBits elements in \p Chain. Break into two pieces such that the
+  /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is
+  /// expected to have more than 4 elements.
+  std::pair<ArrayRef<Value *>, ArrayRef<Value *>>
+  splitOddVectorElts(ArrayRef<Value *> Chain, unsigned ElementSizeBits);
+
+  /// Checks if there are any instructions which may affect the memory accessed
+  /// in the chain between \p From and \p To. The elements of \p Chain should be
+  /// all loads or all stores.
+  bool isVectorizable(ArrayRef<Value *> Chain, BasicBlock::iterator From,
+                      BasicBlock::iterator To);
+
+  /// Collects load and store instructions to vectorize.
+  void collectInstructions(BasicBlock *BB);
+
+  /// Processes the collected instructions, the \p Map. The elements of \p Map
+  /// should be all loads or all stores.
+  bool vectorizeChains(ValueListMap &Map);
+
+  /// Finds the load/stores to consecutive memory addresses and vectorizes them.
+  bool vectorizeInstructions(ArrayRef<Value *> Instrs);
+
+  /// Vectorizes the load instructions in Chain.
+  bool vectorizeLoadChain(ArrayRef<Value *> Chain);
+
+  /// Vectorizes the store instructions in Chain.
+  bool vectorizeStoreChain(ArrayRef<Value *> Chain);
+};
+
+class LoadStoreVectorizer : public FunctionPass {
+public:
+  static char ID;
+  unsigned VecRegSize;
+
+  LoadStoreVectorizer(unsigned VecRegSize = 128) : FunctionPass(ID),
+                                                   VecRegSize(VecRegSize) {
+    initializeLoadStoreVectorizerPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnFunction(Function &F) override;
+
+  const char *getPassName() const override {
+    return "GPU Load and Store Vectorizer";
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<AAResultsWrapperPass>();
+    AU.addRequired<ScalarEvolutionWrapperPass>();
+    AU.addRequired<DominatorTreeWrapperPass>();
+    AU.setPreservesCFG();
+  }
+};
+}
+
+INITIALIZE_PASS_BEGIN(LoadStoreVectorizer, DEBUG_TYPE,
+                      "Vectorize load and Store instructions", false, false);
+INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
+INITIALIZE_PASS_END(LoadStoreVectorizer, DEBUG_TYPE,
+                    "Vectorize load and store instructions", false, false);
+
+char LoadStoreVectorizer::ID = 0;
+
+Pass *llvm::createLoadStoreVectorizerPass(unsigned VecRegSize) {
+  return new LoadStoreVectorizer(VecRegSize);
+}
+
+bool LoadStoreVectorizer::runOnFunction(Function &F) {
+  AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
+  DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+  ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
+
+  // Don't vectorize when the attribute NoImplicitFloat is used.
+  if (F.hasFnAttribute(Attribute::NoImplicitFloat))
+    return false;
+
+  Vectorizer V(F, AA, DT, SE, VecRegSize);
+  return V.run();
+}
+
+// Vectorizer Implementation
+bool Vectorizer::run() {
+  bool Changed = false;
+
+  // Scan the blocks in the function in post order.
+  for (BasicBlock *BB : post_order(&F)) {
+    collectInstructions(BB);
+    Changed |= vectorizeChains(LoadRefs);
+    Changed |= vectorizeChains(StoreRefs);
+  }
+
+  return Changed;
+}
+
+Value *Vectorizer::getPointerOperand(Value *I) {
+  if (LoadInst *LI = dyn_cast<LoadInst>(I))
+    return LI->getPointerOperand();
+  if (StoreInst *SI = dyn_cast<StoreInst>(I))
+    return SI->getPointerOperand();
+  return nullptr;
+}
+
+unsigned Vectorizer::getPointerAddressSpace(Value *I) {
+  if (LoadInst *L = dyn_cast<LoadInst>(I))
+    return L->getPointerAddressSpace();
+  if (StoreInst *S = dyn_cast<StoreInst>(I))
+    return S->getPointerAddressSpace();
+  return -1;
+}
+
+// FIXME: Merge with llvm::isConsecutiveAccess
+bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) {
+  Value *PtrA = getPointerOperand(A);
+  Value *PtrB = getPointerOperand(B);
+  unsigned ASA = getPointerAddressSpace(A);
+  unsigned ASB = getPointerAddressSpace(B);
+
+  // Check that the address spaces match and that the pointers are valid.
+  if (!PtrA || !PtrB || (ASA != ASB))
+    return false;
+
+  // Make sure that A and B are different pointers of the same size type.
+  unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
+  Type *PtrATy = PtrA->getType()->getPointerElementType();
+  Type *PtrBTy = PtrB->getType()->getPointerElementType();
+  if (PtrA == PtrB ||
+      DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) ||
+      DL.getTypeStoreSize(PtrATy->getScalarType()) !=
+      DL.getTypeStoreSize(PtrBTy->getScalarType()))
+    return false;
+
+  APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy));
+
+  APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
+  PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
+  PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
+
+  APInt OffsetDelta = OffsetB - OffsetA;
+
+  // Check if they are based on the same pointer. That makes the offsets
+  // sufficient.
+  if (PtrA == PtrB)
+    return OffsetDelta == Size;
+
+  // Compute the necessary base pointer delta to have the necessary final delta
+  // equal to the size.
+  APInt BaseDelta = Size - OffsetDelta;
+
+  // Compute the distance with SCEV between the base pointers.
+  const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
+  const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
+  const SCEV *C = SE.getConstant(BaseDelta);
+  const SCEV *X = SE.getAddExpr(PtrSCEVA, C);
+  if (X == PtrSCEVB)
+    return true;
+
+  // Sometimes even this doesn't work, because SCEV can't always see through
+  // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking
+  // things the hard way.
+
+  // Look through GEPs after checking they're the same except for the last
+  // index.
+  GetElementPtrInst *GEPA = dyn_cast<GetElementPtrInst>(getPointerOperand(A));
+  GetElementPtrInst *GEPB = dyn_cast<GetElementPtrInst>(getPointerOperand(B));
+  if (!GEPA || !GEPB || GEPA->getNumOperands() != GEPB->getNumOperands())
+    return false;
+  unsigned FinalIndex = GEPA->getNumOperands() - 1;
+  for (unsigned i = 0; i < FinalIndex; i++)
+    if (GEPA->getOperand(i) != GEPB->getOperand(i))
+      return false;
+
+  Instruction *OpA = dyn_cast<Instruction>(GEPA->getOperand(FinalIndex));
+  Instruction *OpB = dyn_cast<Instruction>(GEPB->getOperand(FinalIndex));
+  if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() ||
+      OpA->getType() != OpB->getType())
+    return false;
+
+  // Only look through a ZExt/SExt.
+  if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA))
+    return false;
+
+  OpA = dyn_cast<Instruction>(OpA->getOperand(0));
+  OpB = dyn_cast<Instruction>(OpB->getOperand(0));
+  if (!OpA || !OpB || OpA->getType() != OpB->getType())
+    return false;
+
+  // Now we need to prove that adding 1 to OpA won't overflow.
+  unsigned BitWidth = OpA->getType()->getScalarSizeInBits();
+  APInt KnownZero = APInt(BitWidth, 0);
+  APInt KnownOne = APInt(BitWidth, 0);
+  computeKnownBits(OpA, KnownZero, KnownOne, DL, 0, nullptr, OpA, &DT);
+  // If any bits are known to be zero other than the sign bit in OpA, we can
+  // add 1 to it while guaranteeing no overflow of any sort.
+  KnownZero &= ~APInt::getHighBitsSet(BitWidth, 1);
+  if (KnownZero == 0)
+    return false;
+
+  const SCEV *OffsetSCEVA = SE.getSCEV(OpA);
+  const SCEV *OffsetSCEVB = SE.getSCEV(OpB);
+  const SCEV *One = SE.getConstant(APInt(BitWidth, 1));
+  const SCEV *X2 = SE.getAddExpr(OffsetSCEVA, One);
+  return X2 == OffsetSCEVB;
+}
+
+void Vectorizer::reorder(Instruction *I) {
+  for (User *U : I->users()) {
+    Instruction *User = dyn_cast<Instruction>(U);
+    if (!User || User->getOpcode() == Instruction::PHI)
+      continue;
+
+    if (!DT.dominates(I, User)) {
+      User->removeFromParent();
+      User->insertAfter(I);
+      reorder(User);
+    }
+  }
+}
+
+std::pair<BasicBlock::iterator, BasicBlock::iterator>
+Vectorizer::getBoundaryInstrs(ArrayRef<Value *> Chain) {
+  Instruction *C0 = cast<Instruction>(Chain[0]);
+  BasicBlock::iterator FirstInstr = C0->getIterator();
+  BasicBlock::iterator LastInstr = C0->getIterator();
+
+  BasicBlock *BB = C0->getParent();
+  unsigned NumFound = 0;
+  for (Instruction &I : *BB) {
+    if (!is_contained(Chain, &I))
+      continue;
+
+    ++NumFound;
+    if (NumFound == 1) {
+      FirstInstr = I.getIterator();
+    } else if (NumFound == Chain.size()) {
+      LastInstr = I.getIterator();
+      break;
+    }
+  }
+
+  return std::make_pair(FirstInstr, LastInstr);
+}
+
+void Vectorizer::eraseInstructions(ArrayRef<Value *> Chain) {
+  SmallVector<Instruction *, 16> Instrs;
+  for (Value *V : Chain) {
+    Value *PtrOperand = getPointerOperand(V);
+    assert(PtrOperand && "Instruction must have a pointer operand.");
+    Instrs.push_back(cast<Instruction>(V));
+    if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand))
+      Instrs.push_back(GEP);
+  }
+
+  // Erase instructions.
+  for (Value *V : Instrs) {
+    Instruction *Instr = cast<Instruction>(V);
+    if (Instr->use_empty())
+      Instr->eraseFromParent();
+  }
+}
+
+std::pair<ArrayRef<Value *>, ArrayRef<Value *>>
+Vectorizer::splitOddVectorElts(ArrayRef<Value *> Chain,
+                               unsigned ElementSizeBits) {
+  unsigned ElemSizeInBytes = ElementSizeBits / 8;
+  unsigned SizeInBytes = ElemSizeInBytes * Chain.size();
+  unsigned NumRight = (SizeInBytes % 4) / ElemSizeInBytes;
+  unsigned NumLeft = Chain.size() - NumRight;
+  return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft));
+}
+
+bool Vectorizer::isVectorizable(ArrayRef<Value *> Chain,
+                                BasicBlock::iterator From,
+                                BasicBlock::iterator To) {
+  SmallVector<std::pair<Value *, unsigned>, 16> MemoryInstrs;
+  SmallVector<std::pair<Value *, unsigned>, 16> ChainInstrs;
+
+  unsigned Idx = 0;
+  for (auto I = From, E = To; I != E; ++I, ++Idx) {
+    if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
+      if (!is_contained(Chain, &*I))
+        MemoryInstrs.push_back({ &*I, Idx });
+      else
+        ChainInstrs.push_back({ &*I, Idx });
+    } else if (I->mayHaveSideEffects()) {
+      DEBUG(dbgs() << "LSV: Found side-effecting operation: " << *I << '\n');
+      return false;
+    }
+  }
+
+  for (auto EntryMem : MemoryInstrs) {
+    Value *V = EntryMem.first;
+    unsigned VIdx = EntryMem.second;
+    for (auto EntryChain : ChainInstrs) {
+      Value *VV = EntryChain.first;
+      unsigned VVIdx = EntryChain.second;
+      if (isa<LoadInst>(V) && isa<LoadInst>(VV))
+        continue;
+
+      // We can ignore the alias as long as the load comes before the store,
+      // because that means we won't be moving the load past the store to
+      // vectorize it (the vectorized load is inserted at the location of the
+      // first load in the chain).
+      if (isa<StoreInst>(V) && isa<LoadInst>(VV) && VVIdx < VIdx)
+        continue;
+
+      // Same case, but in reverse.
+      if (isa<LoadInst>(V) && isa<StoreInst>(VV) && VVIdx > VIdx)
+        continue;
+
+      Instruction *M0 = cast<Instruction>(V);
+      Instruction *M1 = cast<Instruction>(VV);
+      Value *Ptr0 = getPointerOperand(M0);
+      Value *Ptr1 = getPointerOperand(M1);
+      unsigned S0 =
+        DL.getTypeStoreSize(Ptr0->getType()->getPointerElementType());
+      unsigned S1 =
+        DL.getTypeStoreSize(Ptr1->getType()->getPointerElementType());
+
+      if (AA.alias(MemoryLocation(Ptr0, S0), MemoryLocation(Ptr1, S1))) {
+        DEBUG(
+          dbgs() << "LSV: Found alias.\n"
+                    "        Aliasing instruction and pointer:\n"
+            << *V << " aliases " << *Ptr0 << '\n'
+            << "        Aliased instruction and pointer:\n"
+            << *VV << " aliases " << *Ptr1 << '\n'
+          );
+
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+void Vectorizer::collectInstructions(BasicBlock *BB) {
+  LoadRefs.clear();
+  StoreRefs.clear();
+
+  for (Instruction &I : *BB) {
+    if (!I.mayReadOrWriteMemory())
+      continue;
+
+    if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
+      if (!LI->isSimple())
+        continue;
+
+      Type *Ty = LI->getType();
+      if (!VectorType::isValidElementType(Ty->getScalarType()))
+        continue;
+
+      // No point in looking at these if they're too big to vectorize.
+      if (DL.getTypeSizeInBits(Ty) > VecRegSize / 2)
+        continue;
+
+      // Make sure all the users of a vector are constant-index extracts.
+      if (isa<VectorType>(Ty) &&
+          !all_of(LI->users(), [LI](const User *U) {
+            const Instruction *UI = cast<Instruction>(U);
+            return isa<ExtractElementInst>(UI) &&
+                   isa<ConstantInt>(UI->getOperand(1));
+          }))
+        continue;
+
+      // TODO: Target hook to filter types.
+
+      // Save the load locations.
+      Value *Ptr = GetUnderlyingObject(LI->getPointerOperand(), DL);
+      LoadRefs[Ptr].push_back(LI);
+
+    } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) {
+      if (!SI->isSimple())
+        continue;
+
+      Type *Ty = SI->getValueOperand()->getType();
+      if (!VectorType::isValidElementType(Ty->getScalarType()))
+        continue;
+
+      if (DL.getTypeSizeInBits(Ty) > VecRegSize / 2)
+        continue;
+
+      if (isa<VectorType>(Ty) &&
+          !all_of(SI->users(), [SI](const User *U) {
+            const Instruction *UI = cast<Instruction>(U);
+            return isa<ExtractElementInst>(UI) &&
+                   isa<ConstantInt>(UI->getOperand(1));
+          }))
+        continue;
+
+      // Save store location.
+      Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL);
+      StoreRefs[Ptr].push_back(SI);
+    }
+  }
+}
+
+bool Vectorizer::vectorizeChains(ValueListMap &Map) {
+  bool Changed = false;
+
+  for (const std::pair<Value *, ValueList> &Chain : Map) {
+    unsigned Size = Chain.second.size();
+    if (Size < 2)
+      continue;
+
+    DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n");
+
+    // Process the stores in chunks of 64.
+    for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) {
+      unsigned Len = std::min<unsigned>(CE - CI, 64);
+      ArrayRef<Value *> Chunk(&Chain.second[CI], Len);
+      Changed |= vectorizeInstructions(Chunk);
+    }
+  }
+
+  return Changed;
+}
+
+bool Vectorizer::vectorizeInstructions(ArrayRef<Value *> Instrs) {
+  DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() << " instructions.\n");
+  SmallSetVector<int, 16> Heads, Tails;
+  int ConsecutiveChain[64];
+
+  // Do a quadratic search on all of the given stores and find all of the pairs
+  // of stores that follow each other.
+  for (int i = 0, e = Instrs.size(); i < e; ++i) {
+    ConsecutiveChain[i] = -1;
+    for (int j = e - 1; j >= 0; --j) {
+      if (i == j)
+        continue;
+
+      if (isConsecutiveAccess(Instrs[i], Instrs[j])) {
+        if (ConsecutiveChain[i] != -1) {
+          int CurDistance = std::abs(ConsecutiveChain[i] - i);
+          int NewDistance = std::abs(ConsecutiveChain[i] - j);
+          if (j < i || NewDistance > CurDistance)
+            continue; // Should not insert.
+        }
+
+        Tails.insert(j);
+        Heads.insert(i);
+        ConsecutiveChain[i] = j;
+      }
+    }
+  }
+
+  bool Changed = false;
+  SmallPtrSet<Value *, 16> VectorizedValues;
+
+  for (int Head : Heads) {
+    if (Tails.count(Head))
+      continue;
+
+    // We found an instr that starts a chain. Now follow the chain and try to
+    // vectorize it.
+    SmallVector<Value *, 16> Operands;
+    int I = Head;
+    while (I != -1 && (Tails.count(I) || Heads.count(I))) {
+      if (VectorizedValues.count(Instrs[I]))
+        break;
+
+      Operands.push_back(Instrs[I]);
+      I = ConsecutiveChain[I];
+    }
+
+    bool Vectorized = false;
+    if (isa<LoadInst>(*Operands.begin()))
+      Vectorized = vectorizeLoadChain(Operands);
+    else
+      Vectorized = vectorizeStoreChain(Operands);
+
+    // Mark the vectorized instructions so that we don't vectorize them again.
+    if (Vectorized)
+      VectorizedValues.insert(Operands.begin(), Operands.end());
+    Changed |= Vectorized;
+  }
+
+  return Changed;
+}
+
+bool Vectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain) {
+  StoreInst *S0 = cast<StoreInst>(Chain[0]);
+  Type *StoreTy = S0->getValueOperand()->getType();
+  unsigned Sz = DL.getTypeSizeInBits(StoreTy);
+  unsigned VF = VecRegSize / Sz;
+  unsigned ChainSize = Chain.size();
+
+  if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2)
+    return false;
+
+  // Store size should be 1B, 2B or multiple of 4B.
+  // TODO: Target hook for size constraint?
+  unsigned SzInBytes = (Sz / 8) * ChainSize;
+  if (SzInBytes > 2 && SzInBytes % 4 != 0) {
+    DEBUG(dbgs() << "LSV: Size should be 1B, 2B "
+                    "or multiple of 4B. Splitting.\n");
+    if (SzInBytes == 3)
+      return vectorizeStoreChain(Chain.slice(0, ChainSize - 1));
+
+    auto Chains = splitOddVectorElts(Chain, Sz);
+    return vectorizeStoreChain(Chains.first) |
+           vectorizeStoreChain(Chains.second);
+  }
+
+  VectorType *VecTy;
+  VectorType *VecStoreTy = dyn_cast<VectorType>(StoreTy);
+  if (VecStoreTy)
+    VecTy = VectorType::get(StoreTy->getScalarType(),
+                            Chain.size() * VecStoreTy->getNumElements());
+  else
+    VecTy = VectorType::get(StoreTy, Chain.size());
+
+  // If it's more than the max vector size, break it into two pieces.
+  // TODO: Target hook to control types to split to.
+  if (ChainSize > VF) {
+    DEBUG(dbgs() << "LSV: Vector factor is too big."
+                    " Creating two separate arrays.\n");
+    return vectorizeStoreChain(Chain.slice(0, VF)) |
+           vectorizeStoreChain(Chain.slice(VF));
+  }
+
+  DEBUG(
+    dbgs() << "LSV: Stores to vectorize:\n";
+    for (Value *V : Chain)
+      V->dump();
+  );
+
+  // Check alignment restrictions.
+  unsigned Alignment = S0->getAlignment();
+
+  // If the store is going to be misaligned, don't vectorize it.
+  // TODO: Check TLI.allowsMisalignedMemoryAccess
+  if ((Alignment % SzInBytes) != 0 && (Alignment % TargetBaseAlign) != 0) {
+    if (S0->getPointerAddressSpace() == 0) {
+      // If we're storing to an object on the stack, we control its alignment,
+      // so we can cheat and change it!
+      Value *V = GetUnderlyingObject(S0->getPointerOperand(), DL);
+      if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V)) {
+        AI->setAlignment(TargetBaseAlign);
+        Alignment = TargetBaseAlign;
+      } else {
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+
+  BasicBlock::iterator First, Last;
+  std::tie(First, Last) = getBoundaryInstrs(Chain);
+
+  if (!isVectorizable(Chain, First, Last))
+    return false;
+
+  // Set insert point.
+  Builder.SetInsertPoint(&*Last);
+  unsigned AS = S0->getPointerAddressSpace();
+
+  Value *Vec = UndefValue::get(VecTy);
+
+  if (VecStoreTy) {
+    unsigned VecWidth = VecStoreTy->getNumElements();
+    for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
+      StoreInst *Store = cast<StoreInst>(Chain[I]);
+      for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) {
+        unsigned NewIdx = J + I * VecWidth;
+        Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(),
+                                                      Builder.getInt32(J));
+        if (Extract->getType() != StoreTy->getScalarType())
+          Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType());
+
+        Value *Insert = Builder.CreateInsertElement(Vec, Extract,
+                                                    Builder.getInt32(NewIdx));
+        Vec = Insert;
+      }
+    }
+  } else {
+    for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
+      StoreInst *Store = cast<StoreInst>(Chain[I]);
+      Value *Extract = Store->getValueOperand();
+      if (Extract->getType() != StoreTy->getScalarType())
+        Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType());
+
+      Value *Insert = Builder.CreateInsertElement(Vec, Extract,
+                                                  Builder.getInt32(I));
+      Vec = Insert;
+    }
+  }
+
+  Value *Bitcast =
+    Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS));
+  StoreInst *SI = cast<StoreInst>(Builder.CreateStore(Vec, Bitcast));
+  propagateMetadata(SI, Chain);
+  SI->setAlignment(Alignment);
+
+  eraseInstructions(Chain);
+  ++NumVectorInstructions;
+  NumScalarsVectorized += Chain.size();
+  return true;
+}
+
+bool Vectorizer::vectorizeLoadChain(ArrayRef<Value *> Chain) {
+  LoadInst *L0 = cast<LoadInst>(Chain[0]);
+  Type *LoadTy = L0->getType();
+  unsigned Sz = DL.getTypeSizeInBits(LoadTy);
+  unsigned VF = VecRegSize / Sz;
+  unsigned ChainSize = Chain.size();
+
+  if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2)
+    return false;
+
+  // Load size should be 1B, 2B or multiple of 4B.
+  // TODO: Should size constraint be a target hook?
+  unsigned SzInBytes = (Sz / 8) * ChainSize;
+  if (SzInBytes > 2 && SzInBytes % 4 != 0) {
+    DEBUG(dbgs() << "LSV: Size should be 1B, 2B or multiple of 4B. Splitting.\n");
+    if (SzInBytes == 3)
+      return vectorizeLoadChain(Chain.slice(0, ChainSize - 1));
+    auto Chains = splitOddVectorElts(Chain, Sz);
+    return vectorizeLoadChain(Chains.first) | vectorizeLoadChain(Chains.second);
+  }
+
+  VectorType *VecTy;
+  VectorType *VecLoadTy = dyn_cast<VectorType>(LoadTy);
+  if (VecLoadTy)
+    VecTy = VectorType::get(LoadTy->getScalarType(),
+                            Chain.size() * VecLoadTy->getNumElements());
+  else
+    VecTy = VectorType::get(LoadTy, Chain.size());
+
+  // If it's more than the max vector size, break it into two pieces.
+  // TODO: Target hook to control types to split to.
+  if (ChainSize > VF) {
+    DEBUG(dbgs() << "LSV: Vector factor is too big. "
+                    "Creating two separate arrays.\n");
+    return vectorizeLoadChain(Chain.slice(0, VF)) |
+           vectorizeLoadChain(Chain.slice(VF));
+  }
+
+  // Check alignment restrictions.
+  unsigned Alignment = L0->getAlignment();
+
+  // If the load is going to be misaligned, don't vectorize it.
+  // TODO: Check TLI.allowsMisalignedMemoryAccess and remove TargetBaseAlign.
+  if ((Alignment % SzInBytes) != 0 && (Alignment % TargetBaseAlign) != 0) {
+    if (L0->getPointerAddressSpace() == 0) {
+      // If we're loading from an object on the stack, we control its alignment,
+      // so we can cheat and change it!
+      Value *V = GetUnderlyingObject(L0->getPointerOperand(), DL);
+      if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V)) {
+        AI->setAlignment(TargetBaseAlign);
+        Alignment = TargetBaseAlign;
+      } else {
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+
+  DEBUG(
+    dbgs() << "LSV: Loads to vectorize:\n";
+    for (Value *V : Chain)
+      V->dump();
+  );
+
+  BasicBlock::iterator First, Last;
+  std::tie(First, Last) = getBoundaryInstrs(Chain);
+
+  if (!isVectorizable(Chain, First, Last))
+    return false;
+
+  // Set insert point.
+  Builder.SetInsertPoint(&*Last);
+
+  unsigned AS = L0->getPointerAddressSpace();
+  Value *Bitcast =
+    Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS));
+
+  LoadInst *LI = cast<LoadInst>(Builder.CreateLoad(Bitcast));
+  propagateMetadata(LI, Chain);
+  LI->setAlignment(Alignment);
+
+  if (VecLoadTy) {
+    SmallVector<Instruction *, 16> InstrsToErase;
+    SmallVector<Instruction *, 16> InstrsToReorder;
+
+    unsigned VecWidth = VecLoadTy->getNumElements();
+    for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
+      for (auto Use : Chain[I]->users()) {
+        Instruction *UI = cast<Instruction>(Use);
+        unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue();
+        unsigned NewIdx = Idx + I * VecWidth;
+        Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx));
+        Instruction *Extracted = cast<Instruction>(V);
+        if (Extracted->getType() != UI->getType())
+          Extracted =
+            cast<Instruction>(Builder.CreateBitCast(Extracted, UI->getType()));
+
+        // Replace the old instruction.
+        UI->replaceAllUsesWith(Extracted);
+        InstrsToReorder.push_back(Extracted);
+        InstrsToErase.push_back(UI);
+      }
+    }
+
+    for (Instruction *ModUser : InstrsToReorder)
+      reorder(ModUser);
+
+    for (auto I : InstrsToErase)
+      I->eraseFromParent();
+  } else {
+    SmallVector<Instruction *, 16> InstrsToReorder;
+
+    for (unsigned I = 0, E = Chain.size(); I != E; ++I) {
+      Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(I));
+      Instruction *Extracted = cast<Instruction>(V);
+      Instruction *UI = cast<Instruction>(Chain[I]);
+      if (Extracted->getType() != UI->getType())
+        Extracted =
+          cast<Instruction>(Builder.CreateBitCast(Extracted, UI->getType()));
+
+      // Replace the old instruction.
+      UI->replaceAllUsesWith(Extracted);
+      InstrsToReorder.push_back(Extracted);
+    }
+
+    for (Instruction *ModUser : InstrsToReorder)
+      reorder(ModUser);
+  }
+
+  eraseInstructions(Chain);
+
+  ++NumVectorInstructions;
+  NumScalarsVectorized += Chain.size();
+  return true;
+}

Modified: llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp?rev=274293&r1=274292&r2=274293&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/Vectorize.cpp Thu Jun 30 18:11:38 2016
@@ -29,6 +29,7 @@ void llvm::initializeVectorization(PassR
   initializeBBVectorizePass(Registry);
   initializeLoopVectorizePass(Registry);
   initializeSLPVectorizerPass(Registry);
+  initializeLoadStoreVectorizerPass(Registry);
 }
 
 void LLVMInitializeVectorization(LLVMPassRegistryRef R) {

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll?rev=274293&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll Thu Jun 30 18:11:38 2016
@@ -0,0 +1,150 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+; CHECK-LABEL: @basic_merge_sext_index(
+; CHECK: sext i32 %id.x to i64
+; CHECK: load <2 x float>
+; CHECK: store <2 x float> zeroinitializer
+define void @basic_merge_sext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
+entry:
+  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %sext.id.x = sext i32 %id.x to i64
+  %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %sext.id.x
+  %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %sext.id.x
+  %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1
+  %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1
+
+  %ld.c = load float, float addrspace(1)* %c.idx.x, align 4
+  %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4
+
+  store float 0.0, float addrspace(1)* %a.idx.x, align 4
+  store float 0.0, float addrspace(1)* %a.idx.x.1, align 4
+
+  %add = fadd float %ld.c, %ld.c.idx.1
+  store float %add, float addrspace(1)* %b, align 4
+  ret void
+}
+
+; CHECK-LABEL: @basic_merge_zext_index(
+; CHECK: zext i32 %id.x to i64
+; CHECK: load <2 x float>
+; CHECK: store <2 x float>
+define void @basic_merge_zext_index(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c) #0 {
+entry:
+  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %zext.id.x = zext i32 %id.x to i64
+  %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %zext.id.x
+  %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %zext.id.x
+  %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1
+  %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1
+
+  %ld.c = load float, float addrspace(1)* %c.idx.x, align 4
+  %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4
+  store float 0.0, float addrspace(1)* %a.idx.x, align 4
+  store float 0.0, float addrspace(1)* %a.idx.x.1, align 4
+
+  %add = fadd float %ld.c, %ld.c.idx.1
+  store float %add, float addrspace(1)* %b, align 4
+  ret void
+}
+
+; CHECK-LABEL: @merge_op_zext_index(
+; CHECK: load <2 x float>
+; CHECK: store <2 x float>
+define void @merge_op_zext_index(float addrspace(1)* nocapture noalias %a, float addrspace(1)* nocapture noalias %b, float addrspace(1)* nocapture readonly noalias %c) #0 {
+entry:
+  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %shl = shl i32 %id.x, 2
+  %zext.id.x = zext i32 %shl to i64
+  %a.0 = getelementptr inbounds float, float addrspace(1)* %a, i64 %zext.id.x
+  %c.0 = getelementptr inbounds float, float addrspace(1)* %c, i64 %zext.id.x
+
+  %id.x.1 = or i32 %shl, 1
+  %id.x.1.ext = zext i32 %id.x.1 to i64
+
+  %a.1 = getelementptr inbounds float, float addrspace(1)* %a, i64 %id.x.1.ext
+  %c.1 = getelementptr inbounds float, float addrspace(1)* %c, i64 %id.x.1.ext
+
+  %ld.c.0 = load float, float addrspace(1)* %c.0, align 4
+  store float 0.0, float addrspace(1)* %a.0, align 4
+  %ld.c.1 = load float, float addrspace(1)* %c.1, align 4
+  store float 0.0, float addrspace(1)* %a.1, align 4
+
+  %add = fadd float %ld.c.0, %ld.c.1
+  store float %add, float addrspace(1)* %b, align 4
+  ret void
+}
+
+; CHECK-LABEL: @merge_op_sext_index(
+; CHECK: load <2 x float>
+; CHECK: store <2 x float>
+define void @merge_op_sext_index(float addrspace(1)* nocapture noalias %a, float addrspace(1)* nocapture noalias %b, float addrspace(1)* nocapture readonly noalias %c) #0 {
+entry:
+  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
+  %shl = shl i32 %id.x, 2
+  %zext.id.x = sext i32 %shl to i64
+  %a.0 = getelementptr inbounds float, float addrspace(1)* %a, i64 %zext.id.x
+  %c.0 = getelementptr inbounds float, float addrspace(1)* %c, i64 %zext.id.x
+
+  %id.x.1 = or i32 %shl, 1
+  %id.x.1.ext = sext i32 %id.x.1 to i64
+
+  %a.1 = getelementptr inbounds float, float addrspace(1)* %a, i64 %id.x.1.ext
+  %c.1 = getelementptr inbounds float, float addrspace(1)* %c, i64 %id.x.1.ext
+
+  %ld.c.0 = load float, float addrspace(1)* %c.0, align 4
+  store float 0.0, float addrspace(1)* %a.0, align 4
+  %ld.c.1 = load float, float addrspace(1)* %c.1, align 4
+  store float 0.0, float addrspace(1)* %a.1, align 4
+
+  %add = fadd float %ld.c.0, %ld.c.1
+  store float %add, float addrspace(1)* %b, align 4
+  ret void
+}
+
+; This case fails to vectorize if not using the extra extension
+; handling in isConsecutiveAccess.
+
+; CHECK-LABEL: @zext_trunc_phi_1(
+; CHECK: loop:
+; CHECK: load <2 x i32>
+; CHECK: store <2 x i32>
+define void @zext_trunc_phi_1(i32 addrspace(1)* nocapture noalias %a, i32 addrspace(1)* nocapture noalias %b, i32 addrspace(1)* nocapture readonly noalias %c, i32 %n, i64 %arst, i64 %aoeu) #0 {
+entry:
+  %cmp0 = icmp eq i32 %n, 0
+  br i1 %cmp0, label %exit, label %loop
+
+loop:
+  %indvars.iv = phi i64 [ %indvars.iv.next, %loop ], [ 0, %entry ]
+  %trunc.iv = trunc i64 %indvars.iv to i32
+  %idx = shl i32 %trunc.iv, 4
+
+  %idx.ext = zext i32 %idx to i64
+  %c.0 = getelementptr inbounds i32, i32 addrspace(1)* %c, i64 %idx.ext
+  %a.0 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idx.ext
+
+  %idx.1 = or i32 %idx, 1
+  %idx.1.ext = zext i32 %idx.1 to i64
+  %c.1 = getelementptr inbounds i32, i32 addrspace(1)* %c, i64 %idx.1.ext
+  %a.1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idx.1.ext
+
+  %ld.c.0 = load i32, i32 addrspace(1)* %c.0, align 4
+  store i32 %ld.c.0, i32 addrspace(1)* %a.0, align 4
+  %ld.c.1 = load i32, i32 addrspace(1)* %c.1, align 4
+  store i32 %ld.c.1, i32 addrspace(1)* %a.1, align 4
+
+  %indvars.iv.next = add i64 %indvars.iv, 1
+  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+
+  %exitcond = icmp eq i32 %lftr.wideiv, %n
+  br i1 %exitcond, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll?rev=274293&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll Thu Jun 30 18:11:38 2016
@@ -0,0 +1,62 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+; Check relative position of the inserted vector load relative to the
+; existing adds.
+
+; CHECK-LABEL: @insert_load_point(
+; CHECK: %z = add i32 %x, 4
+; CHECK: %w = add i32 %y, 9
+; CHECK: load <2 x float>
+; CHECK: %foo = add i32 %z, %w
+define void @insert_load_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
+entry:
+  %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %idx
+  %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %idx
+  %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1
+  %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1
+
+  %z = add i32 %x, 4
+  %ld.c = load float, float addrspace(1)* %c.idx.x, align 4
+  %w = add i32 %y, 9
+  %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4
+  %foo = add i32 %z, %w
+
+  store float 0.0, float addrspace(1)* %a.idx.x, align 4
+  store float 0.0, float addrspace(1)* %a.idx.x.1, align 4
+
+  %add = fadd float %ld.c, %ld.c.idx.1
+  store float %add, float addrspace(1)* %b, align 4
+  store i32 %foo, i32 addrspace(3)* null, align 4
+  ret void
+}
+
+; CHECK-LABEL: @insert_store_point(
+; CHECK: %z = add i32 %x, 4
+; CHECK: %w = add i32 %y, 9
+; CHECK: store <2 x float>
+; CHECK: %foo = add i32 %z, %w
+define void @insert_store_point(float addrspace(1)* nocapture %a, float addrspace(1)* nocapture %b, float addrspace(1)* nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
+entry:
+  %a.idx.x = getelementptr inbounds float, float addrspace(1)* %a, i64 %idx
+  %c.idx.x = getelementptr inbounds float, float addrspace(1)* %c, i64 %idx
+  %a.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %a.idx.x, i64 1
+  %c.idx.x.1 = getelementptr inbounds float, float addrspace(1)* %c.idx.x, i64 1
+
+  %ld.c = load float, float addrspace(1)* %c.idx.x, align 4
+  %ld.c.idx.1 = load float, float addrspace(1)* %c.idx.x.1, align 4
+
+  %z = add i32 %x, 4
+  store float 0.0, float addrspace(1)* %a.idx.x, align 4
+  %w = add i32 %y, 9
+  store float 0.0, float addrspace(1)* %a.idx.x.1, align 4
+  %foo = add i32 %z, %w
+
+  %add = fadd float %ld.c, %ld.c.idx.1
+  store float %add, float addrspace(1)* %b, align 4
+  store i32 %foo, i32 addrspace(3)* null, align 4
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll?rev=274293&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll Thu Jun 30 18:11:38 2016
@@ -0,0 +1,28 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+; This is OK to vectorize the load as long as the may alias store
+; occurs before the vector load.
+
+; CHECK: store double 0.000000e+00, double addrspace(1)* %a,
+; CHECK: load <2 x double>
+; CHECK: store double 0.000000e+00, double addrspace(1)* %a.idx.1
+define void @interleave(double addrspace(1)* nocapture %a, double addrspace(1)* nocapture %b, double addrspace(1)* nocapture readonly %c) #0 {
+entry:
+  %a.idx.1 = getelementptr inbounds double, double addrspace(1)* %a, i64 1
+  %c.idx.1 = getelementptr inbounds double, double addrspace(1)* %c, i64 1
+
+  %ld.c = load double, double addrspace(1)* %c, align 8 ; may alias store to %a
+  store double 0.0, double addrspace(1)* %a, align 8
+
+  %ld.c.idx.1 = load double, double addrspace(1)* %c.idx.1, align 8 ; may alias store to %a
+  store double 0.0, double addrspace(1)* %a.idx.1, align 8
+
+  %add = fadd double %ld.c, %ld.c.idx.1
+  store double %add, double addrspace(1)* %b
+
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/lit.local.cfg?rev=274293&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/lit.local.cfg (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/lit.local.cfg Thu Jun 30 18:11:38 2016
@@ -0,0 +1,3 @@
+if not 'AMDGPU' in config.root.targets:
+    config.unsupported = True
+

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll?rev=274293&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll Thu Jun 30 18:11:38 2016
@@ -0,0 +1,635 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -load-store-vectorizer -S -o - %s | FileCheck %s
+; Copy of test/CodeGen/AMDGPU/merge-stores.ll with some additions
+
+; TODO: Vector element tests
+; TODO: Non-zero base offset for load and store combinations
+; TODO: Same base addrspacecasted
+
+
+; CHECK-LABEL: @merge_global_store_2_constants_i8(
+; CHECK: store <2 x i8> <i8 -56, i8 123>, <2 x i8> addrspace(1)* %{{[0-9]+}}, align 2
+define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
+
+  store i8 123, i8 addrspace(1)* %out.gep.1
+  store i8 456, i8 addrspace(1)* %out, align 2
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_i8_natural_align
+; CHECK: store <2 x i8> <i8 -56, i8 123>, <2 x i8> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
+
+  store i8 123, i8 addrspace(1)* %out.gep.1
+  store i8 456, i8 addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_i16
+; CHECK: store <2 x i16> <i16 456, i16 123>, <2 x i16> addrspace(1)* %{{[0-9]+}}, align 4
+define void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
+
+  store i16 123, i16 addrspace(1)* %out.gep.1
+  store i16 456, i16 addrspace(1)* %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_0_i16
+; CHECK: store <2 x i16> zeroinitializer, <2 x i16> addrspace(1)* %{{[0-9]+}}, align 4
+define void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
+
+  store i16 0, i16 addrspace(1)* %out.gep.1
+  store i16 0, i16 addrspace(1)* %out, align 4
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_i16_natural_align
+; CHECK: store <2 x i16> <i16 456, i16 123>, <2 x i16> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
+
+  store i16 123, i16 addrspace(1)* %out.gep.1
+  store i16 456, i16 addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_half_natural_align
+; CHECK: store <2 x half> <half 0xH3C00, half 0xH4000>, <2 x half> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_2_constants_half_natural_align(half addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1
+
+  store half 2.0, half addrspace(1)* %out.gep.1
+  store half 1.0, half addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_i32
+; CHECK: store <2 x i32> <i32 456, i32 123>, <2 x i32> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+
+  store i32 123, i32 addrspace(1)* %out.gep.1
+  store i32 456, i32 addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_i32_f32
+; CHECK: store <2 x i32> <i32 456, i32 1065353216>, <2 x i32> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.1.bc = bitcast i32 addrspace(1)* %out.gep.1 to float addrspace(1)*
+  store float 1.0, float addrspace(1)* %out.gep.1.bc
+  store i32 456, i32 addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_f32_i32
+; CHECK  store <2 x float> <float 4.000000e+00, float 0x370EC00000000000>, <2 x float> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
+  %out.gep.1.bc = bitcast float addrspace(1)* %out.gep.1 to i32 addrspace(1)*
+  store i32 123, i32 addrspace(1)* %out.gep.1.bc
+  store float 4.0, float addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_constants_i32
+; CHECK: store <4 x i32> <i32 1234, i32 123, i32 456, i32 333>, <4 x i32> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
+
+  store i32 123, i32 addrspace(1)* %out.gep.1
+  store i32 456, i32 addrspace(1)* %out.gep.2
+  store i32 333, i32 addrspace(1)* %out.gep.3
+  store i32 1234, i32 addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_constants_f32_order
+; CHECK: store <4 x float> <float 8.000000e+00, float 1.000000e+00, float 2.000000e+00, float 4.000000e+00>, <4 x float> addrspace(1)* %{{[0-9]+}}
+define void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
+
+  store float 8.0, float addrspace(1)* %out
+  store float 1.0, float addrspace(1)* %out.gep.1
+  store float 2.0, float addrspace(1)* %out.gep.2
+  store float 4.0, float addrspace(1)* %out.gep.3
+  ret void
+}
+
+; First store is out of order.
+; CHECK-LABEL: @merge_global_store_4_constants_f32
+; CHECK: store <4 x float> <float 8.000000e+00, float 1.000000e+00, float 2.000000e+00, float 4.000000e+00>, <4 x float> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
+
+  store float 1.0, float addrspace(1)* %out.gep.1
+  store float 2.0, float addrspace(1)* %out.gep.2
+  store float 4.0, float addrspace(1)* %out.gep.3
+  store float 8.0, float addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_constants_mixed_i32_f32
+; CHECK: store <4 x float> <float 8.000000e+00, float 0x36D6000000000000, float 2.000000e+00, float 0x36E1000000000000>, <4 x float> addrspace(1)* %{{[0-9]+}}
+define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
+
+  %out.gep.1.bc = bitcast float addrspace(1)* %out.gep.1 to i32 addrspace(1)*
+  %out.gep.3.bc = bitcast float addrspace(1)* %out.gep.3 to i32 addrspace(1)*
+
+  store i32 11, i32 addrspace(1)* %out.gep.1.bc
+  store float 2.0, float addrspace(1)* %out.gep.2
+  store i32 17, i32 addrspace(1)* %out.gep.3.bc
+  store float 8.0, float addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_3_constants_i32
+; CHECK: store <3 x i32> <i32 1234, i32 123, i32 456>, <3 x i32> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+
+  store i32 123, i32 addrspace(1)* %out.gep.1
+  store i32 456, i32 addrspace(1)* %out.gep.2
+  store i32 1234, i32 addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_constants_i64
+; CHECK: store <2 x i64> <i64 456, i64 123>, <2 x i64> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1
+
+  store i64 123, i64 addrspace(1)* %out.gep.1
+  store i64 456, i64 addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_constants_i64
+; CHECK: store <2 x i64> <i64 456, i64 333>, <2 x i64> addrspace(1)* %{{[0-9]+$}}
+; CHECK: store <2 x i64> <i64 1234, i64 123>, <2 x i64> addrspace(1)* %{{[0-9]+$}}
+define void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1
+  %out.gep.2 = getelementptr i64, i64 addrspace(1)* %out, i64 2
+  %out.gep.3 = getelementptr i64, i64 addrspace(1)* %out, i64 3
+
+  store i64 123, i64 addrspace(1)* %out.gep.1
+  store i64 456, i64 addrspace(1)* %out.gep.2
+  store i64 333, i64 addrspace(1)* %out.gep.3
+  store i64 1234, i64 addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_adjacent_loads_i32
+; CHECK: [[LOAD:%[0-9]+]] = load <2 x i32>
+; CHECK: [[ELT0:%[0-9]+]] = extractelement <2 x i32> [[LOAD]], i32 0
+; CHECK: [[ELT1:%[0-9]+]] = extractelement <2 x i32> [[LOAD]], i32 1
+; CHECK: [[INSERT0:%[0-9]+]] = insertelement <2 x i32> undef, i32 [[ELT0]], i32 0
+; CHECK: [[INSERT1:%[0-9]+]] = insertelement <2 x i32> [[INSERT0]], i32 [[ELT1]], i32 1
+; CHECK: store <2 x i32> [[INSERT1]]
+define void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
+
+  %lo = load i32, i32 addrspace(1)* %in
+  %hi = load i32, i32 addrspace(1)* %in.gep.1
+
+  store i32 %lo, i32 addrspace(1)* %out
+  store i32 %hi, i32 addrspace(1)* %out.gep.1
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_adjacent_loads_i32_nonzero_base
+; CHECK: extractelement
+; CHECK: extractelement
+; CHECK: insertelement
+; CHECK: insertelement
+; CHECK: store <2 x i32>
+define void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+  %in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 2
+  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 3
+
+  %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 3
+  %lo = load i32, i32 addrspace(1)* %in.gep.0
+  %hi = load i32, i32 addrspace(1)* %in.gep.1
+
+  store i32 %lo, i32 addrspace(1)* %out.gep.0
+  store i32 %hi, i32 addrspace(1)* %out.gep.1
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_2_adjacent_loads_shuffle_i32
+; CHECK: [[LOAD:%[0-9]+]] = load <2 x i32>
+; CHECK: [[ELT0:%[0-9]+]] = extractelement <2 x i32> [[LOAD]], i32 0
+; CHECK: [[ELT1:%[0-9]+]] = extractelement <2 x i32> [[LOAD]], i32 1
+; CHECK: [[INSERT0:%[0-9]+]] = insertelement <2 x i32> undef, i32 [[ELT1]], i32 0
+; CHECK: [[INSERT1:%[0-9]+]] = insertelement <2 x i32> [[INSERT0]], i32 [[ELT0]], i32 1
+; CHECK: store <2 x i32> [[INSERT1]]
+define void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
+
+  %lo = load i32, i32 addrspace(1)* %in
+  %hi = load i32, i32 addrspace(1)* %in.gep.1
+
+  store i32 %hi, i32 addrspace(1)* %out
+  store i32 %lo, i32 addrspace(1)* %out.gep.1
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i32
+; CHECK: load <4 x i32>
+; CHECK: store <4 x i32>
+define void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
+  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
+  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2
+  %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3
+
+  %x = load i32, i32 addrspace(1)* %in
+  %y = load i32, i32 addrspace(1)* %in.gep.1
+  %z = load i32, i32 addrspace(1)* %in.gep.2
+  %w = load i32, i32 addrspace(1)* %in.gep.3
+
+  store i32 %x, i32 addrspace(1)* %out
+  store i32 %y, i32 addrspace(1)* %out.gep.1
+  store i32 %z, i32 addrspace(1)* %out.gep.2
+  store i32 %w, i32 addrspace(1)* %out.gep.3
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_3_adjacent_loads_i32
+; CHECK: load <3 x i32>
+; CHECK: store <3 x i32>
+define void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
+  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2
+
+  %x = load i32, i32 addrspace(1)* %in
+  %y = load i32, i32 addrspace(1)* %in.gep.1
+  %z = load i32, i32 addrspace(1)* %in.gep.2
+
+  store i32 %x, i32 addrspace(1)* %out
+  store i32 %y, i32 addrspace(1)* %out.gep.1
+  store i32 %z, i32 addrspace(1)* %out.gep.2
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_adjacent_loads_f32
+; CHECK: load <4 x float>
+; CHECK: store <4 x float>
+define void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3
+  %in.gep.1 = getelementptr float, float addrspace(1)* %in, i32 1
+  %in.gep.2 = getelementptr float, float addrspace(1)* %in, i32 2
+  %in.gep.3 = getelementptr float, float addrspace(1)* %in, i32 3
+
+  %x = load float, float addrspace(1)* %in
+  %y = load float, float addrspace(1)* %in.gep.1
+  %z = load float, float addrspace(1)* %in.gep.2
+  %w = load float, float addrspace(1)* %in.gep.3
+
+  store float %x, float addrspace(1)* %out
+  store float %y, float addrspace(1)* %out.gep.1
+  store float %z, float addrspace(1)* %out.gep.2
+  store float %w, float addrspace(1)* %out.gep.3
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i32_nonzero_base
+; CHECK: load <4 x i32>
+; CHECK: store <4 x i32>
+define void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+  %in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 11
+  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 12
+  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 13
+  %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 14
+  %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i32 7
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 8
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 9
+  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 10
+
+  %x = load i32, i32 addrspace(1)* %in.gep.0
+  %y = load i32, i32 addrspace(1)* %in.gep.1
+  %z = load i32, i32 addrspace(1)* %in.gep.2
+  %w = load i32, i32 addrspace(1)* %in.gep.3
+
+  store i32 %x, i32 addrspace(1)* %out.gep.0
+  store i32 %y, i32 addrspace(1)* %out.gep.1
+  store i32 %z, i32 addrspace(1)* %out.gep.2
+  store i32 %w, i32 addrspace(1)* %out.gep.3
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_adjacent_loads_inverse_i32
+; CHECK: load <4 x i32>
+; CHECK: store <4 x i32>
+define void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
+  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
+  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2
+  %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3
+
+  %x = load i32, i32 addrspace(1)* %in
+  %y = load i32, i32 addrspace(1)* %in.gep.1
+  %z = load i32, i32 addrspace(1)* %in.gep.2
+  %w = load i32, i32 addrspace(1)* %in.gep.3
+
+  ; Make sure the barrier doesn't stop this
+  tail call void @llvm.amdgcn.s.barrier() #1
+
+  store i32 %w, i32 addrspace(1)* %out.gep.3
+  store i32 %z, i32 addrspace(1)* %out.gep.2
+  store i32 %y, i32 addrspace(1)* %out.gep.1
+  store i32 %x, i32 addrspace(1)* %out
+
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_adjacent_loads_shuffle_i32
+; CHECK: load <4 x i32>
+; CHECK: store <4 x i32>
+define void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
+  %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1
+  %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2
+  %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3
+
+  %x = load i32, i32 addrspace(1)* %in
+  %y = load i32, i32 addrspace(1)* %in.gep.1
+  %z = load i32, i32 addrspace(1)* %in.gep.2
+  %w = load i32, i32 addrspace(1)* %in.gep.3
+
+  ; Make sure the barrier doesn't stop this
+  tail call void @llvm.amdgcn.s.barrier() #1
+
+  store i32 %w, i32 addrspace(1)* %out
+  store i32 %z, i32 addrspace(1)* %out.gep.1
+  store i32 %y, i32 addrspace(1)* %out.gep.2
+  store i32 %x, i32 addrspace(1)* %out.gep.3
+
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i8
+; CHECK: load <4 x i8>
+; CHECK: extractelement <4 x i8>
+; CHECK: extractelement <4 x i8>
+; CHECK: extractelement <4 x i8>
+; CHECK: extractelement <4 x i8>
+; CHECK: insertelement <4 x i8>
+; CHECK: insertelement <4 x i8>
+; CHECK: insertelement <4 x i8>
+; CHECK: insertelement <4 x i8>
+; CHECK: store <4 x i8>
+define void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1
+  %out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2
+  %out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3
+  %in.gep.1 = getelementptr i8, i8 addrspace(1)* %in, i8 1
+  %in.gep.2 = getelementptr i8, i8 addrspace(1)* %in, i8 2
+  %in.gep.3 = getelementptr i8, i8 addrspace(1)* %in, i8 3
+
+  %x = load i8, i8 addrspace(1)* %in, align 4
+  %y = load i8, i8 addrspace(1)* %in.gep.1
+  %z = load i8, i8 addrspace(1)* %in.gep.2
+  %w = load i8, i8 addrspace(1)* %in.gep.3
+
+  store i8 %x, i8 addrspace(1)* %out, align 4
+  store i8 %y, i8 addrspace(1)* %out.gep.1
+  store i8 %z, i8 addrspace(1)* %out.gep.2
+  store i8 %w, i8 addrspace(1)* %out.gep.3
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i8_natural_align
+; CHECK: load <4 x i8>
+; CHECK: store <4 x i8>
+define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1
+  %out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2
+  %out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3
+  %in.gep.1 = getelementptr i8, i8 addrspace(1)* %in, i8 1
+  %in.gep.2 = getelementptr i8, i8 addrspace(1)* %in, i8 2
+  %in.gep.3 = getelementptr i8, i8 addrspace(1)* %in, i8 3
+
+  %x = load i8, i8 addrspace(1)* %in
+  %y = load i8, i8 addrspace(1)* %in.gep.1
+  %z = load i8, i8 addrspace(1)* %in.gep.2
+  %w = load i8, i8 addrspace(1)* %in.gep.3
+
+  store i8 %x, i8 addrspace(1)* %out
+  store i8 %y, i8 addrspace(1)* %out.gep.1
+  store i8 %z, i8 addrspace(1)* %out.gep.2
+  store i8 %w, i8 addrspace(1)* %out.gep.3
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_4_vector_elts_loads_v4i32
+; CHECK: load <4 x i32>
+; CHECK: store <4 x i32>
+define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
+  %vec = load <4 x i32>, <4 x i32> addrspace(1)* %in
+
+  %x = extractelement <4 x i32> %vec, i32 0
+  %y = extractelement <4 x i32> %vec, i32 1
+  %z = extractelement <4 x i32> %vec, i32 2
+  %w = extractelement <4 x i32> %vec, i32 3
+
+  store i32 %x, i32 addrspace(1)* %out
+  store i32 %y, i32 addrspace(1)* %out.gep.1
+  store i32 %z, i32 addrspace(1)* %out.gep.2
+  store i32 %w, i32 addrspace(1)* %out.gep.3
+  ret void
+}
+
+; CHECK-LABEL: @merge_local_store_2_constants_i8
+; CHECK: store <2 x i8> <i8 -56, i8 123>, <2 x i8> addrspace(3)* %{{[0-9]+}}, align 2
+define void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 {
+  %out.gep.1 = getelementptr i8, i8 addrspace(3)* %out, i32 1
+
+  store i8 123, i8 addrspace(3)* %out.gep.1
+  store i8 456, i8 addrspace(3)* %out, align 2
+  ret void
+}
+
+; CHECK-LABEL: @merge_local_store_2_constants_i32
+; CHECK: store <2 x i32> <i32 456, i32 123>, <2 x i32> addrspace(3)* %{{[0-9]+$}}
+define void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
+
+  store i32 123, i32 addrspace(3)* %out.gep.1
+  store i32 456, i32 addrspace(3)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_local_store_2_constants_i32_align_2
+; CHECK: store i32
+; CHECK: store i32
+define void @merge_local_store_2_constants_i32_align_2(i32 addrspace(3)* %out) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
+
+  store i32 123, i32 addrspace(3)* %out.gep.1, align 2
+  store i32 456, i32 addrspace(3)* %out, align 2
+  ret void
+}
+
+; CHECK-LABEL: @merge_local_store_4_constants_i32
+; CHECK: store <4 x i32> <i32 1234, i32 123, i32 456, i32 333>, <4 x i32> addrspace(3)*
+define void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(3)* %out, i32 2
+  %out.gep.3 = getelementptr i32, i32 addrspace(3)* %out, i32 3
+
+  store i32 123, i32 addrspace(3)* %out.gep.1
+  store i32 456, i32 addrspace(3)* %out.gep.2
+  store i32 333, i32 addrspace(3)* %out.gep.3
+  store i32 1234, i32 addrspace(3)* %out
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_5_constants_i32
+; CHECK: store <4 x i32> <i32 9, i32 12, i32 16, i32 -12>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
+; CHECK: store i32
+define void @merge_global_store_5_constants_i32(i32 addrspace(1)* %out) {
+  store i32 9, i32 addrspace(1)* %out, align 4
+  %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
+  store i32 12, i32 addrspace(1)* %idx1, align 4
+  %idx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2
+  store i32 16, i32 addrspace(1)* %idx2, align 4
+  %idx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3
+  store i32 -12, i32 addrspace(1)* %idx3, align 4
+  %idx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4
+  store i32 11, i32 addrspace(1)* %idx4, align 4
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_6_constants_i32
+; CHECK: store <4 x i32> <i32 13, i32 15, i32 62, i32 63>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
+; CHECK: store <2 x i32> <i32 11, i32 123>, <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4
+define void @merge_global_store_6_constants_i32(i32 addrspace(1)* %out) {
+  store i32 13, i32 addrspace(1)* %out, align 4
+  %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
+  store i32 15, i32 addrspace(1)* %idx1, align 4
+  %idx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2
+  store i32 62, i32 addrspace(1)* %idx2, align 4
+  %idx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3
+  store i32 63, i32 addrspace(1)* %idx3, align 4
+  %idx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4
+  store i32 11, i32 addrspace(1)* %idx4, align 4
+  %idx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 5
+  store i32 123, i32 addrspace(1)* %idx5, align 4
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_7_constants_i32
+; CHECK: store <4 x i32> <i32 34, i32 999, i32 65, i32 33>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
+; CHECK: store <3 x i32> <i32 98, i32 91, i32 212>, <3 x i32> addrspace(1)* %{{[0-9]+}}, align 4
+define void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) {
+  store i32 34, i32 addrspace(1)* %out, align 4
+  %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
+  store i32 999, i32 addrspace(1)* %idx1, align 4
+  %idx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2
+  store i32 65, i32 addrspace(1)* %idx2, align 4
+  %idx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3
+  store i32 33, i32 addrspace(1)* %idx3, align 4
+  %idx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4
+  store i32 98, i32 addrspace(1)* %idx4, align 4
+  %idx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 5
+  store i32 91, i32 addrspace(1)* %idx5, align 4
+  %idx6 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 6
+  store i32 212, i32 addrspace(1)* %idx6, align 4
+  ret void
+}
+
+; CHECK-LABEL: @merge_global_store_8_constants_i32
+; CHECK: store <4 x i32> <i32 34, i32 999, i32 65, i32 33>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
+; CHECK: store <4 x i32> <i32 98, i32 91, i32 212, i32 999>, <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4
+define void @merge_global_store_8_constants_i32(i32 addrspace(1)* %out) {
+  store i32 34, i32 addrspace(1)* %out, align 4
+  %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
+  store i32 999, i32 addrspace(1)* %idx1, align 4
+  %idx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2
+  store i32 65, i32 addrspace(1)* %idx2, align 4
+  %idx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3
+  store i32 33, i32 addrspace(1)* %idx3, align 4
+  %idx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4
+  store i32 98, i32 addrspace(1)* %idx4, align 4
+  %idx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 5
+  store i32 91, i32 addrspace(1)* %idx5, align 4
+  %idx6 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 6
+  store i32 212, i32 addrspace(1)* %idx6, align 4
+  %idx7 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 7
+  store i32 999, i32 addrspace(1)* %idx7, align 4
+  ret void
+}
+
+; CHECK-LABEL: @copy_v3i32_align4
+; CHECK: %vec = load <3 x i32>, <3 x i32> addrspace(1)* %in, align 4
+; CHECK: store <3 x i32> %vec, <3 x i32> addrspace(1)* %out
+define void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> addrspace(1)* noalias %in) #0 {
+  %vec = load <3 x i32>, <3 x i32> addrspace(1)* %in, align 4
+  store <3 x i32> %vec, <3 x i32> addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @copy_v3i64_align4
+; CHECK: %vec = load <3 x i64>, <3 x i64> addrspace(1)* %in, align 4
+; CHECK: store <3 x i64> %vec, <3 x i64> addrspace(1)* %out
+define void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %out, <3 x i64> addrspace(1)* noalias %in) #0 {
+  %vec = load <3 x i64>, <3 x i64> addrspace(1)* %in, align 4
+  store <3 x i64> %vec, <3 x i64> addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @copy_v3f32_align4
+; CHECK: %vec = load <3 x float>, <3 x float> addrspace(1)* %in, align 4
+; CHECK: store <3 x float>
+define void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x float> addrspace(1)* noalias %in) #0 {
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %in, align 4
+  %fadd = fadd <3 x float> %vec, <float 1.0, float 2.0, float 4.0>
+  store <3 x float> %fadd, <3 x float> addrspace(1)* %out
+  ret void
+}
+
+; CHECK-LABEL: @copy_v3f64_align4
+; CHECK: %vec = load <3 x double>, <3 x double> addrspace(1)* %in, align 4
+; CHECK: store <3 x double> %fadd, <3 x double> addrspace(1)* %out
+define void @copy_v3f64_align4(<3 x double> addrspace(1)* noalias %out, <3 x double> addrspace(1)* noalias %in) #0 {
+  %vec = load <3 x double>, <3 x double> addrspace(1)* %in, align 4
+  %fadd = fadd <3 x double> %vec, <double 1.0, double 2.0, double 4.0>
+  store <3 x double> %fadd, <3 x double> addrspace(1)* %out
+  ret void
+}
+
+declare void @llvm.amdgcn.s.barrier() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { convergent nounwind }

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll?rev=274293&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll Thu Jun 30 18:11:38 2016
@@ -0,0 +1,91 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+; CHECK-LABEL: @merge_v2i32_v2i32(
+; CHECK: load <4 x i32>
+; CHECK: store <4 x i32> zeroinitializer
+define void @merge_v2i32_v2i32(<2 x i32> addrspace(1)* nocapture %a, <2 x i32> addrspace(1)* nocapture readonly %b) #0 {
+entry:
+  %a.1 = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %a, i64 1
+  %b.1 = getelementptr inbounds <2 x i32>, <2 x i32> addrspace(1)* %b, i64 1
+
+  %ld.c = load <2 x i32>, <2 x i32> addrspace(1)* %b, align 4
+  %ld.c.idx.1 = load <2 x i32>, <2 x i32> addrspace(1)* %b.1, align 4
+
+  store <2 x i32> zeroinitializer, <2 x i32> addrspace(1)* %a, align 4
+  store <2 x i32> zeroinitializer, <2 x i32> addrspace(1)* %a.1, align 4
+
+  ret void
+}
+
+; CHECK-LABEL: @merge_v1i32_v1i32(
+; CHECK: load <2 x i32>
+; CHECK: store <2 x i32> zeroinitializer
+define void @merge_v1i32_v1i32(<1 x i32> addrspace(1)* nocapture %a, <1 x i32> addrspace(1)* nocapture readonly %b) #0 {
+entry:
+  %a.1 = getelementptr inbounds <1 x i32>, <1 x i32> addrspace(1)* %a, i64 1
+  %b.1 = getelementptr inbounds <1 x i32>, <1 x i32> addrspace(1)* %b, i64 1
+
+  %ld.c = load <1 x i32>, <1 x i32> addrspace(1)* %b, align 4
+  %ld.c.idx.1 = load <1 x i32>, <1 x i32> addrspace(1)* %b.1, align 4
+
+  store <1 x i32> zeroinitializer, <1 x i32> addrspace(1)* %a, align 4
+  store <1 x i32> zeroinitializer, <1 x i32> addrspace(1)* %a.1, align 4
+
+  ret void
+}
+
+; CHECK-LABEL: @no_merge_v3i32_v3i32(
+; CHECK: load <3 x i32>
+; CHECK: load <3 x i32>
+; CHECK: store <3 x i32> zeroinitializer
+; CHECK: store <3 x i32> zeroinitializer
+define void @no_merge_v3i32_v3i32(<3 x i32> addrspace(1)* nocapture %a, <3 x i32> addrspace(1)* nocapture readonly %b) #0 {
+entry:
+  %a.1 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %a, i64 1
+  %b.1 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %b, i64 1
+
+  %ld.c = load <3 x i32>, <3 x i32> addrspace(1)* %b, align 4
+  %ld.c.idx.1 = load <3 x i32>, <3 x i32> addrspace(1)* %b.1, align 4
+
+  store <3 x i32> zeroinitializer, <3 x i32> addrspace(1)* %a, align 4
+  store <3 x i32> zeroinitializer, <3 x i32> addrspace(1)* %a.1, align 4
+
+  ret void
+}
+
+; CHECK-LABEL: @merge_v2i16_v2i16(
+; CHECK: load <4 x i16>
+; CHECK: store <4 x i16> zeroinitializer
+define void @merge_v2i16_v2i16(<2 x i16> addrspace(1)* nocapture %a, <2 x i16> addrspace(1)* nocapture readonly %b) #0 {
+entry:
+  %a.1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %a, i64 1
+  %b.1 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %b, i64 1
+
+  %ld.c = load <2 x i16>, <2 x i16> addrspace(1)* %b, align 4
+  %ld.c.idx.1 = load <2 x i16>, <2 x i16> addrspace(1)* %b.1, align 4
+
+  store <2 x i16> zeroinitializer, <2 x i16> addrspace(1)* %a, align 4
+  store <2 x i16> zeroinitializer, <2 x i16> addrspace(1)* %a.1, align 4
+
+  ret void
+}
+
+; Ideally this would be merged
+; CHECK-LABEL: @merge_load_i32_v2i16(
+; CHECK: load i32,
+; CHECK: load <2 x i16>
+define void @merge_load_i32_v2i16(i32 addrspace(1)* nocapture %a) #0 {
+entry:
+  %a.1 = getelementptr inbounds i32, i32 addrspace(1)* %a, i32 1
+  %a.1.cast = bitcast i32 addrspace(1)* %a.1 to <2 x i16> addrspace(1)*
+
+  %ld.0 = load i32, i32 addrspace(1)* %a
+  %ld.1 = load <2 x i16>, <2 x i16> addrspace(1)* %a.1.cast
+
+  ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll?rev=274293&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll (added)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll Thu Jun 30 18:11:38 2016
@@ -0,0 +1,20 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -load-store-vectorizer -S -o - %s | FileCheck %s
+
+; CHECK-LABEL: @no_implicit_float(
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: store i32
+; CHECK: store i32
+define void @no_implicit_float(i32 addrspace(1)* %out) #0 {
+  %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+  %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2
+  %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3
+
+  store i32 123, i32 addrspace(1)* %out.gep.1
+  store i32 456, i32 addrspace(1)* %out.gep.2
+  store i32 333, i32 addrspace(1)* %out.gep.3
+  store i32 1234, i32 addrspace(1)* %out
+  ret void
+}
+
+attributes #0 = { nounwind noimplicitfloat }




More information about the llvm-commits mailing list