[llvm-branch-commits] [llvm] eb95114 - [SLPVectorizer] WIP Implement initial memory versioning (WIP!)

Florian Hahn via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Jun 11 06:30:45 PDT 2021


Author: Florian Hahn
Date: 2021-06-11T14:06:36+01:00
New Revision: eb951149ea161a0b7cfba9f7645c4fdf3a7d9ecb

URL: https://github.com/llvm/llvm-project/commit/eb951149ea161a0b7cfba9f7645c4fdf3a7d9ecb
DIFF: https://github.com/llvm/llvm-project/commit/eb951149ea161a0b7cfba9f7645c4fdf3a7d9ecb.diff

LOG: [SLPVectorizer] WIP Implement initial memory versioning (WIP!)

This patch is just an initial sketch to get a discussion going on how to
best support generating runtime checks for may-aliasing memory accesses.

The key question to start with is where to best collect and generate
runtime checks. Currently codegen for a block is eager; for each block,
if we find a vectorizable tree, we vectorize it and then analyze the
rest of the block. This makes it hard to collect *all* runtime checks
for a block before changing the code.

Perhaps for now we need to limit the checks to the first vectorizable
tree in a block? This is what the patch tries to do.

There are a couple of mechanical/technical issues that need to be
addressed, but I think the questions above are key to answer/address
first.

Other than that, the patch does not yet consider the cost of cloning
the block and the runtime checks. It also does not introduce phis
for values used outside the cloned block, so it will generate invalid IR
in those cases for now.

Differential Revision: https://reviews.llvm.org/D102834

Added: 
    

Modified: 
    llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
    llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
    llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 1c8bdffee400..9c26f0fa8600 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -35,6 +35,7 @@
 #include "llvm/Analysis/AssumptionCache.h"
 #include "llvm/Analysis/CodeMetrics.h"
 #include "llvm/Analysis/DemandedBits.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
 #include "llvm/Analysis/GlobalsModRef.h"
 #include "llvm/Analysis/IVDescriptors.h"
 #include "llvm/Analysis/LoopAccessAnalysis.h"
@@ -62,6 +63,7 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/MDBuilder.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR/NoFolder.h"
 #include "llvm/IR/Operator.h"
@@ -85,8 +87,11 @@
 #include "llvm/Support/KnownBits.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Cloning.h"
 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
 #include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
 #include "llvm/Transforms/Vectorize.h"
 #include <algorithm>
 #include <cassert>
@@ -107,6 +112,8 @@ using namespace slpvectorizer;
 #define DEBUG_TYPE "SLP"
 
 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
+STATISTIC(NumRuntimeCheckBlocks, "Number of blocks guarded by runtime checks");
+STATISTIC(NumRuntimeCheckBlocksNoVec, "Number of blocks guarded by runtime checks");
 
 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
                                   cl::desc("Run the SLP vectorization passes"));
@@ -581,6 +588,44 @@ static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) {
   return Index;
 }
 
+static bool extendMemBounds(
+    Instruction &I, bool Insert, ScalarEvolution &SE,
+    MapVector<Value *, std::pair<const SCEV *, const SCEV *>> &MemBounds) {
+
+  BasicBlock *BB = I.getParent();
+  auto GetPtr = [](Instruction *I) -> Value * {
+    if (auto *L = dyn_cast<LoadInst>(I))
+      return L->getPointerOperand();
+    if (auto *S = dyn_cast<StoreInst>(I))
+      return S->getPointerOperand();
+    return nullptr;
+  };
+  auto *Ptr = GetPtr(&I);
+  if (!Ptr)
+    return false;
+  auto *PtrSCEV = SE.getSCEV(Ptr);
+
+  Value *Obj = getUnderlyingObject(Ptr);
+  if (!Obj)
+    return false;
+
+  if (!SE.properlyDominates(PtrSCEV, BB))
+    return false;
+
+  if (Insert)
+    MemBounds.insert({Obj, {PtrSCEV, PtrSCEV}});
+  auto BoundsIter = MemBounds.find(Obj);
+  if (BoundsIter == MemBounds.end())
+    return false;
+
+  if (SE.isKnownPredicate(CmpInst::ICMP_ULT, PtrSCEV, BoundsIter->second.first))
+    BoundsIter->second.first = PtrSCEV;
+  if (SE.isKnownPredicate(CmpInst::ICMP_UGT, PtrSCEV,
+                          BoundsIter->second.second))
+    BoundsIter->second.second = PtrSCEV;
+
+  return true;
+}
 namespace slpvectorizer {
 
 /// Bottom Up SLP Vectorizer.
@@ -589,6 +634,16 @@ class BoUpSLP {
   struct ScheduleData;
 
 public:
+  // Map of objects to start & end pointers we need to generate runtime checks
+  // for.
+  MapVector<Value *, std::pair<const SCEV *, const SCEV *>> MemBounds;
+  /// Cache for alias results.
+  /// TODO: consider moving this to the AliasAnalysis itself.
+  using AliasCacheKey = std::pair<Instruction *, Instruction *>;
+  DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
+
+  bool CollectMemAccess = false;
+
   using ValueList = SmallVector<Value *, 8>;
   using InstrList = SmallVector<Instruction *, 16>;
   using ValueSet = SmallPtrSet<Value *, 16>;
@@ -667,6 +722,7 @@ class BoUpSLP {
     }
     MinBWs.clear();
     InstrElementSize.clear();
+    MemBounds.clear();
   }
 
   unsigned getTreeSize() const { return VectorizableTree.size(); }
@@ -674,6 +730,25 @@ class BoUpSLP {
   /// Perform LICM and CSE on the newly generated gather sequences.
   void optimizeGatherSequence();
 
+  /// Remove instructions in DeletedInstructions.
+  void removeDeletedInstructions() {
+    for (const auto &Pair : DeletedInstructions) {
+      // Replace operands of ignored instructions with Undefs in case if they
+      // were marked for deletion.
+      if (Pair.getSecond()) {
+        Value *Undef = UndefValue::get(Pair.getFirst()->getType());
+        Pair.getFirst()->replaceAllUsesWith(Undef);
+      }
+      Pair.getFirst()->dropAllReferences();
+    }
+    for (const auto &Pair : DeletedInstructions) {
+      assert(Pair.getFirst()->use_empty() &&
+             "trying to erase instruction with users.");
+      Pair.getFirst()->eraseFromParent();
+    }
+    DeletedInstructions.clear();
+  }
+
   /// \returns The best order of instructions for vectorization.
   Optional<ArrayRef<unsigned>> bestOrder() const {
     assert(llvm::all_of(
@@ -1965,11 +2040,6 @@ class BoUpSLP {
     return aliased;
   }
 
-  using AliasCacheKey = std::pair<Instruction *, Instruction *>;
-
-  /// Cache for alias results.
-  /// TODO: consider moving this to the AliasAnalysis itself.
-  DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
 
   /// Removes an instruction from its block and eventually deletes it.
   /// It's like Instruction::eraseFromParent() except that the actual deletion
@@ -2554,27 +2624,7 @@ template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
 
 } // end namespace llvm
 
-BoUpSLP::~BoUpSLP() {
-  for (const auto &Pair : DeletedInstructions) {
-    // Replace operands of ignored instructions with Undefs in case if they were
-    // marked for deletion.
-    if (Pair.getSecond()) {
-      Value *Undef = UndefValue::get(Pair.getFirst()->getType());
-      Pair.getFirst()->replaceAllUsesWith(Undef);
-    }
-    Pair.getFirst()->dropAllReferences();
-  }
-  for (const auto &Pair : DeletedInstructions) {
-    assert(Pair.getFirst()->use_empty() &&
-           "trying to erase instruction with users.");
-    Pair.getFirst()->eraseFromParent();
-  }
-#ifdef EXPENSIVE_CHECKS
-  // If we could guarantee that this call is not extremely slow, we could
-  // remove the ifdef limitation (see PR47712).
-  assert(!verifyFunction(*F, &dbgs()));
-#endif
-}
+BoUpSLP::~BoUpSLP() { removeDeletedInstructions(); }
 
 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) {
   for (auto *V : AV) {
@@ -6195,6 +6245,7 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
           while (DepDest) {
             assert(isInSchedulingRegion(DepDest));
 
+            ScheduleData *DestBundle = DepDest->FirstInBundle;
             // We have two limits to reduce the complexity:
             // 1) AliasedCheckLimit: It's a small limit to reduce calls to
             //    SLP->isAliased (which is the expensive part in this loop).
@@ -6212,9 +6263,41 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
               // balance between reduced runtime and accurate dependencies.
               numAliased++;
 
+              // If this bundle is not scheduled and no versioned code has been
+              // generated yet, try to collect the bounds of the accesses to
+              // generate runtime checks.
+              if (!DestBundle->IsScheduled && SLP->CollectMemAccess) {
+                // FIXME Naming
+                auto GetPtr = [](Instruction *I) -> Value * {
+                  if (auto *L = dyn_cast<LoadInst>(I))
+                    return L->getPointerOperand();
+                  if (auto *S = dyn_cast<StoreInst>(I))
+                    return S->getPointerOperand();
+                  return nullptr;
+                };
+                auto *Src = GetPtr(SrcInst);
+                auto *Dst = GetPtr(DepDest->Inst);
+
+                if (SrcInst->getParent() == DepDest->Inst->getParent() && Src &&
+                    Dst) {
+                  auto GetPtr = [](Instruction *I) -> Value * {
+                    if (auto *L = dyn_cast<LoadInst>(I))
+                      return getUnderlyingObject(L->getPointerOperand());
+                    if (auto *S = dyn_cast<StoreInst>(I))
+                      return getUnderlyingObject(S->getPointerOperand());
+                    return nullptr;
+                  };
+                  bool AddedSrc =
+                      extendMemBounds(*SrcInst, true, *SLP->SE, SLP->MemBounds);
+                  bool AddedDst = extendMemBounds(*DepDest->Inst, true,
+                                                  *SLP->SE, SLP->MemBounds);
+                  if (!AddedSrc || !AddedDst ||
+                      GetPtr(SrcInst) == GetPtr(DepDest->Inst))
+                    SLP->MemBounds.clear();
+                }
+              }
               DepDest->MemoryDependencies.push_back(BundleMember);
               BundleMember->Dependencies++;
-              ScheduleData *DestBundle = DepDest->FirstInBundle;
               if (!DestBundle->IsScheduled) {
                 BundleMember->incrementUnscheduledDeps(1);
               }
@@ -6694,7 +6777,7 @@ PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &A
     return PreservedAnalyses::all();
 
   PreservedAnalyses PA;
-  PA.preserveSet<CFGAnalyses>();
+  // PA.preserveSet<CFGAnalyses>();
   return PA;
 }
 
@@ -6741,6 +6824,9 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
   // Update DFS numbers now so that we can use them for ordering.
   DT->updateDFSNumbers();
 
+  SmallVector<BasicBlock *, 4> BlocksToRetry;
+  SmallVector<MapVector<Value *, std::pair<const SCEV *, const SCEV *>>, 4>
+      BoundsToUse;
   // Scan the blocks in the function in post order.
   for (auto BB : post_order(&F.getEntryBlock())) {
     collectSeedInstructions(BB);
@@ -6749,7 +6835,40 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
     if (!Stores.empty()) {
       LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
                         << " underlying objects.\n");
-      Changed |= vectorizeStoreChains(R);
+      R.MemBounds.clear();
+
+      auto NoOrSingleSucc = [](BasicBlock *BB) {
+        return succ_begin(BB) == succ_end(BB) ||
+               std::next(succ_begin(BB)) == succ_end(BB);
+      };
+      auto NoOrSinglePred = [](BasicBlock *BB) {
+        return pred_begin(BB) == pred_end(BB) ||
+               std::next(pred_begin(BB)) == pred_end(BB);
+      };
+
+      auto AllUsesInside = [](BasicBlock *BB) {
+        return all_of(*BB, [BB](Instruction &I) {
+          return all_of(I.users(), [BB](User *U) {
+            return cast<Instruction>(U)->getParent() == BB;
+          });
+        });
+      };
+      auto TermSupported = [](BasicBlock *BB) {
+        auto *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
+        return isa<BranchInst>(BB->getTerminator()) ||
+               (RetI && !RetI->getReturnValue());
+      };
+      R.CollectMemAccess = NoOrSingleSucc(BB) && NoOrSinglePred(BB) &&
+                           AllUsesInside(BB) && TermSupported(BB);
+
+      bool VectorizedChains = vectorizeStoreChains(R);
+      if (!VectorizedChains && !R.MemBounds.empty()) {
+        BlocksToRetry.push_back(BB);
+        BoundsToUse.push_back(R.MemBounds);
+      }
+      R.CollectMemAccess = false;
+      R.MemBounds.clear();
+      Changed |= VectorizedChains;
     }
 
     // Vectorize trees that end at reductions.
@@ -6765,6 +6884,190 @@ bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
     }
   }
 
+  R.AliasCache.clear();
+  for (unsigned I = 0; I != BlocksToRetry.size(); I++) {
+    // First, clean up delete instructions, so they are not re-used during SCEV
+    // expansion.
+    R.removeDeletedInstructions();
+    BasicBlock *BB = BlocksToRetry[I];
+    auto &MemBounds = BoundsToUse[I];
+
+    for (Instruction &I : *BB)
+      extendMemBounds(I, false, *SE, MemBounds);
+    NumRuntimeCheckBlocks++;
+    LLVMContext &Ctx = BB->getContext();
+    DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
+    std::string OriginalName = BB->getName().str();
+    auto *CheckBlock = splitBlockBefore(BB, &*BB->getFirstNonPHI(), &DTU, LI,
+                                        nullptr, OriginalName + ".slpmemcheck");
+    auto *MergeBlock = BB;
+    BB = splitBlockBefore(BB, BB->getTerminator(), &DTU, LI, nullptr,
+                          OriginalName + ".slpversioned");
+
+    ValueToValueMapTy VMap;
+    auto *Scalar = CloneBasicBlock(BB, VMap, "", BB->getParent());
+    Scalar->setName(OriginalName + ".scalar");
+    MergeBlock->setName(OriginalName + ".merge");
+    SmallVector<BasicBlock *> Tmp;
+    Tmp.push_back(Scalar);
+    remapInstructionsInBlocks(Tmp, VMap);
+
+    Value *MemoryRuntimeCheck = nullptr;
+    Instruction *FirstInst = nullptr;
+    SCEVExpander Exp(*SE, BB->getParent()->getParent()->getDataLayout(),
+                     "memcheck");
+    SmallVector<std::pair<Value *, Value *>, 4> ExpandedBounds;
+    Type *PtrArithTy =
+        Type::getInt8PtrTy(BB->getParent()->getParent()->getContext(), 0);
+    for (auto &KV : MemBounds) {
+      ExpandedBounds.emplace_back(
+          Exp.expandCodeFor(KV.second.first, PtrArithTy,
+                            CheckBlock->getTerminator()),
+          Exp.expandCodeFor(KV.second.second, PtrArithTy,
+                            CheckBlock->getTerminator()));
+    }
+    auto GetFirstInst = [](Instruction *FirstInst, Value *V,
+                           Instruction *Loc) -> Instruction * {
+      if (FirstInst)
+        return FirstInst;
+      if (Instruction *I = dyn_cast<Instruction>(V))
+        return I->getParent() == Loc->getParent() ? I : nullptr;
+      return nullptr;
+    };
+
+    Instruction *Loc = CheckBlock->getTerminator();
+    IRBuilder<> ChkBuilder(CheckBlock->getTerminator());
+    for (unsigned i = 0; i < MemBounds.size(); ++i) {
+      for (unsigned j = i + 1; j < MemBounds.size(); ++j) {
+        Value *ALow = ExpandedBounds[i].first;
+        Value *AHigh = ExpandedBounds[i].second;
+        Value *BLow = ExpandedBounds[j].first;
+        Value *BHigh = ExpandedBounds[j].second;
+
+        unsigned AS0 = ALow->getType()->getPointerAddressSpace();
+        unsigned AS1 = BLow->getType()->getPointerAddressSpace();
+
+        Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
+        Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
+        Value *Start0 = ChkBuilder.CreateBitCast(ALow, PtrArithTy0, "bc");
+        Value *Start1 = ChkBuilder.CreateBitCast(BLow, PtrArithTy1, "bc");
+        Value *End0 = ChkBuilder.CreateBitCast(AHigh, PtrArithTy1, "bc");
+        Value *End1 = ChkBuilder.CreateBitCast(BHigh, PtrArithTy0, "bc");
+        // [A|B].Start points to the first accessed byte under base [A|B].
+        // [A|B].End points to the last accessed byte, plus one.
+        // There is no conflict when the intervals are disjoint:
+        // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
+        //
+        // bound0 = (B.Start < A.End)
+        // bound1 = (A.Start < B.End)
+        //  IsConflict = bound0 & bound1
+        Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0");
+        FirstInst = GetFirstInst(FirstInst, Cmp0, Loc);
+        Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1");
+        FirstInst = GetFirstInst(FirstInst, Cmp1, Loc);
+        Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
+        FirstInst = GetFirstInst(FirstInst, IsConflict, Loc);
+        if (MemoryRuntimeCheck) {
+          IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict,
+                                           "conflict.rdx");
+          FirstInst = GetFirstInst(FirstInst, IsConflict, Loc);
+        }
+        MemoryRuntimeCheck = IsConflict;
+      }
+    }
+
+    ChkBuilder.CreateCondBr(MemoryRuntimeCheck, Scalar, BB);
+    CheckBlock->getTerminator()->eraseFromParent();
+    DTU.applyUpdates({{DT->Insert, CheckBlock, Scalar}});
+    Changed = true;
+
+    MDBuilder MDB(Ctx);
+    MDNode *Domain = MDB.createAnonymousAliasScopeDomain("SLPVerDomain");
+
+    DenseMap<const std::pair<const SCEV *, const SCEV *> *, MDNode *>
+        GroupToScope;
+    for (const auto &Group : MemBounds)
+      GroupToScope[&Group.second] = MDB.createAnonymousAliasScope(Domain);
+
+    for (Instruction &I : *BB) {
+      auto GetPtr = [](Instruction *I) -> Value * {
+        if (auto *L = dyn_cast<LoadInst>(I))
+          return L->getPointerOperand();
+        if (auto *S = dyn_cast<StoreInst>(I))
+          return S->getPointerOperand();
+        return nullptr;
+      };
+      auto *Ptr = GetPtr(&I);
+      if (!Ptr)
+        continue;
+      auto *PtrSCEV = SE->getSCEV(Ptr);
+
+      Value *Obj = getUnderlyingObject(Ptr);
+      if (!Obj)
+        continue;
+
+      auto BoundsIter = MemBounds.find(Obj);
+      if (BoundsIter == MemBounds.end())
+        continue;
+      auto *LowerBound = BoundsIter->second.first;
+      auto *UpperBound = BoundsIter->second.second;
+      auto *Scope = GroupToScope.find(&BoundsIter->second)->second;
+      if (SE->isKnownPredicate(CmpInst::ICMP_UGE, PtrSCEV, LowerBound) &&
+          SE->isKnownPredicate(CmpInst::ICMP_ULE, PtrSCEV, UpperBound)) {
+        I.setMetadata(
+            LLVMContext::MD_alias_scope,
+            MDNode::concatenate(I.getMetadata(LLVMContext::MD_alias_scope),
+                                MDNode::get(Ctx, Scope)));
+
+        SmallVector<Metadata *, 4> NonAliasing;
+        for (auto &KV : GroupToScope) {
+          if (KV.first == &BoundsIter->second)
+            continue;
+          NonAliasing.push_back(KV.second);
+        }
+        I.setMetadata(
+            LLVMContext::MD_noalias,
+            MDNode::concatenate(I.getMetadata(LLVMContext::MD_noalias),
+                                MDNode::get(Ctx, NonAliasing)));
+      }
+    }
+
+    DTU.flush();
+    DT->updateDFSNumbers();
+
+    collectSeedInstructions(BB);
+
+    bool Vectorized = false;
+    // Vectorize trees that end at stores.
+    if (!Stores.empty()) {
+      LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
+                        << " underlying objects.\n");
+      Vectorized |= vectorizeStoreChains(R);
+    }
+    R.removeDeletedInstructions();
+    InstructionCost ScalarCost = 0;
+    for (Instruction &I : *Scalar)
+      ScalarCost += TTI->getInstructionCost(&I, TTI::TCK_RecipThroughput);
+    InstructionCost SLPCost = 0;
+    for (Instruction &I : *BB)
+     SLPCost += TTI->getInstructionCost(&I, TTI::TCK_RecipThroughput);
+
+    if (SLPCost >= ScalarCost) {
+      //assert(!Vectorized);
+      Instruction *OldTerm = CheckBlock->getTerminator();
+      OldTerm->eraseFromParent();
+      IRBuilder<> Builder(CheckBlock);
+      Builder.CreateBr(Scalar);
+      DTU.applyUpdates({{DT->Delete, CheckBlock,BB }});
+      DTU.deleteBB(BB);
+      DTU.applyUpdates({{DT->Delete, BB, MergeBlock }});
+      MergeBlockIntoPredecessor(MergeBlock, &DTU, LI);
+      MergeBlockIntoPredecessor(Scalar, &DTU, LI);
+      NumRuntimeCheckBlocksNoVec++;
+    }
+    Changed |= Vectorized;
+  }
+
   if (Changed) {
     R.optimizeGatherSequence();
     LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");

diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
index 3ecb0f20cbbd..bdc934ae11af 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
@@ -3,16 +3,38 @@
 
 define void @needs_versioning_not_profitable(i32* %dst, i32* %src) {
 ; CHECK-LABEL: @needs_versioning_not_profitable(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4
-; CHECK-NEXT:    [[R_0:%.*]] = ashr i32 [[SRC_0]], 16
-; CHECK-NEXT:    store i32 [[R_0]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT:  entry.slpmemcheck:
+; CHECK-NEXT:    [[DST8:%.*]] = bitcast i32* [[DST:%.*]] to i8*
+; CHECK-NEXT:    [[SRC10:%.*]] = bitcast i32* [[SRC:%.*]] to i8*
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[DST]], i64 1
+; CHECK-NEXT:    [[SCEVGEP9:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[SCEVGEP11:%.*]] = getelementptr i32, i32* [[SRC]], i64 1
+; CHECK-NEXT:    [[SCEVGEP1112:%.*]] = bitcast i32* [[SCEVGEP11]] to i8*
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[DST8]], [[SCEVGEP1112]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[SRC10]], [[SCEVGEP9]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[ENTRY_SCALAR:%.*]], label [[ENTRY_SLPVERSIONED:%.*]]
+; CHECK:       entry.slpversioned:
 ; CHECK-NEXT:    [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
-; CHECK-NEXT:    [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4
-; CHECK-NEXT:    [[R_1:%.*]] = ashr i32 [[SRC_1]], 16
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[SRC]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, <2 x i32>* [[TMP0]], align 4, !alias.scope !0, !noalias !3
+; CHECK-NEXT:    [[TMP2:%.*]] = ashr <2 x i32> [[TMP1]], <i32 16, i32 16>
 ; CHECK-NEXT:    [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
-; CHECK-NEXT:    store i32 [[R_1]], i32* [[DST_GEP_1]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[DST]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> [[TMP2]], <2 x i32>* [[TMP3]], align 4, !alias.scope !3, !noalias !0
+; CHECK-NEXT:    br label [[ENTRY_MERGE:%.*]]
+; CHECK:       entry.merge:
 ; CHECK-NEXT:    ret void
+; CHECK:       entry.scalar:
+; CHECK-NEXT:    [[SRC_02:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT:    [[R_03:%.*]] = ashr i32 [[SRC_02]], 16
+; CHECK-NEXT:    store i32 [[R_03]], i32* [[DST]], align 4
+; CHECK-NEXT:    [[SRC_GEP_14:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
+; CHECK-NEXT:    [[SRC_15:%.*]] = load i32, i32* [[SRC_GEP_14]], align 4
+; CHECK-NEXT:    [[R_16:%.*]] = ashr i32 [[SRC_15]], 16
+; CHECK-NEXT:    [[DST_GEP_17:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
+; CHECK-NEXT:    store i32 [[R_16]], i32* [[DST_GEP_17]], align 4
+; CHECK-NEXT:    br label [[ENTRY_MERGE]]
 ;
 entry:
   %src.0 = load i32, i32* %src, align 4
@@ -28,26 +50,58 @@ entry:
 
 define void @needs_versioning_profitable(i32* %dst, i32* %src) {
 ; CHECK-LABEL: @needs_versioning_profitable(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4
+; CHECK-NEXT:  entry.slpmemcheck:
+; CHECK-NEXT:    [[DST16:%.*]] = bitcast i32* [[DST:%.*]] to i8*
+; CHECK-NEXT:    [[SRC18:%.*]] = bitcast i32* [[SRC:%.*]] to i8*
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[DST]], i64 2
+; CHECK-NEXT:    [[SCEVGEP17:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[SCEVGEP19:%.*]] = getelementptr i32, i32* [[SRC]], i64 3
+; CHECK-NEXT:    [[SCEVGEP1920:%.*]] = bitcast i32* [[SCEVGEP19]] to i8*
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[DST16]], [[SCEVGEP1920]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[SRC18]], [[SCEVGEP17]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[ENTRY_SCALAR:%.*]], label [[ENTRY_SLPVERSIONED:%.*]]
+; CHECK:       entry.slpversioned:
+; CHECK-NEXT:    [[SRC_0:%.*]] = load i32, i32* [[SRC]], align 4, !alias.scope !5, !noalias !8
 ; CHECK-NEXT:    [[R_0:%.*]] = ashr i32 [[SRC_0]], 16
-; CHECK-NEXT:    store i32 [[R_0]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R_0]], i32* [[DST]], align 4, !alias.scope !8, !noalias !5
 ; CHECK-NEXT:    [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
 ; CHECK-NEXT:    [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4
 ; CHECK-NEXT:    [[R_1:%.*]] = ashr i32 [[SRC_1]], 16
 ; CHECK-NEXT:    [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
 ; CHECK-NEXT:    store i32 [[R_1]], i32* [[DST_GEP_1]], align 4
 ; CHECK-NEXT:    [[SRC_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
-; CHECK-NEXT:    [[SRC_2:%.*]] = load i32, i32* [[SRC_GEP_2]], align 4
-; CHECK-NEXT:    [[R_2:%.*]] = ashr i32 [[SRC_2]], 16
 ; CHECK-NEXT:    [[DST_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
-; CHECK-NEXT:    store i32 [[R_2]], i32* [[DST_GEP_2]], align 4
 ; CHECK-NEXT:    [[SRC_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
-; CHECK-NEXT:    [[SRC_3:%.*]] = load i32, i32* [[SRC_GEP_3]], align 4
-; CHECK-NEXT:    [[R_3:%.*]] = ashr i32 [[SRC_3]], 16
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[SRC_GEP_2]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, <2 x i32>* [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = ashr <2 x i32> [[TMP1]], <i32 16, i32 16>
 ; CHECK-NEXT:    [[DST_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
-; CHECK-NEXT:    store i32 [[R_3]], i32* [[DST_GEP_3]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i32* [[DST_GEP_2]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> [[TMP2]], <2 x i32>* [[TMP3]], align 4
+; CHECK-NEXT:    br label [[ENTRY_MERGE:%.*]]
+; CHECK:       entry.merge:
 ; CHECK-NEXT:    ret void
+; CHECK:       entry.scalar:
+; CHECK-NEXT:    [[SRC_02:%.*]] = load i32, i32* [[SRC]], align 4
+; CHECK-NEXT:    [[R_03:%.*]] = ashr i32 [[SRC_02]], 16
+; CHECK-NEXT:    store i32 [[R_03]], i32* [[DST]], align 4
+; CHECK-NEXT:    [[SRC_GEP_14:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
+; CHECK-NEXT:    [[SRC_15:%.*]] = load i32, i32* [[SRC_GEP_14]], align 4
+; CHECK-NEXT:    [[R_16:%.*]] = ashr i32 [[SRC_15]], 16
+; CHECK-NEXT:    [[DST_GEP_17:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
+; CHECK-NEXT:    store i32 [[R_16]], i32* [[DST_GEP_17]], align 4
+; CHECK-NEXT:    [[SRC_GEP_28:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
+; CHECK-NEXT:    [[SRC_29:%.*]] = load i32, i32* [[SRC_GEP_28]], align 4
+; CHECK-NEXT:    [[R_210:%.*]] = ashr i32 [[SRC_29]], 16
+; CHECK-NEXT:    [[DST_GEP_211:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
+; CHECK-NEXT:    store i32 [[R_210]], i32* [[DST_GEP_211]], align 4
+; CHECK-NEXT:    [[SRC_GEP_312:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
+; CHECK-NEXT:    [[SRC_313:%.*]] = load i32, i32* [[SRC_GEP_312]], align 4
+; CHECK-NEXT:    [[R_314:%.*]] = ashr i32 [[SRC_313]], 16
+; CHECK-NEXT:    [[DST_GEP_315:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
+; CHECK-NEXT:    store i32 [[R_314]], i32* [[DST_GEP_315]], align 4
+; CHECK-NEXT:    br label [[ENTRY_MERGE]]
 ;
 entry:
   %src.0 = load i32, i32* %src, align 4
@@ -99,11 +153,22 @@ entry:
 
 define void @version_multiple(i32* nocapture %out_block, i32* nocapture readonly %counter) {
 ; CHECK-LABEL: @version_multiple(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COUNTER:%.*]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[OUT_BLOCK:%.*]], align 4
+; CHECK-NEXT:  entry.slpmemcheck:
+; CHECK-NEXT:    [[OUT_BLOCK12:%.*]] = bitcast i32* [[OUT_BLOCK:%.*]] to i8*
+; CHECK-NEXT:    [[COUNTER14:%.*]] = bitcast i32* [[COUNTER:%.*]] to i8*
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[OUT_BLOCK]], i64 2
+; CHECK-NEXT:    [[SCEVGEP13:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[SCEVGEP15:%.*]] = getelementptr i32, i32* [[COUNTER]], i64 3
+; CHECK-NEXT:    [[SCEVGEP1516:%.*]] = bitcast i32* [[SCEVGEP15]] to i8*
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[OUT_BLOCK12]], [[SCEVGEP1516]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[COUNTER14]], [[SCEVGEP13]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[ENTRY_SCALAR:%.*]], label [[ENTRY_SLPVERSIONED:%.*]]
+; CHECK:       entry.slpversioned:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COUNTER]], align 4, !alias.scope !10, !noalias !13
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[OUT_BLOCK]], align 4, !alias.scope !13, !noalias !10
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[TMP1]], [[TMP0]]
-; CHECK-NEXT:    store i32 [[XOR]], i32* [[OUT_BLOCK]], align 4
+; CHECK-NEXT:    store i32 [[XOR]], i32* [[OUT_BLOCK]], align 4, !alias.scope !13, !noalias !10
 ; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 1
@@ -111,18 +176,43 @@ define void @version_multiple(i32* nocapture %out_block, i32* nocapture readonly
 ; CHECK-NEXT:    [[XOR_1:%.*]] = xor i32 [[TMP3]], [[TMP2]]
 ; CHECK-NEXT:    store i32 [[XOR_1]], i32* [[ARRAYIDX2_1]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 2
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 2
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT:    [[XOR_2:%.*]] = xor i32 [[TMP5]], [[TMP4]]
-; CHECK-NEXT:    store i32 [[XOR_2]], i32* [[ARRAYIDX2_2]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 3
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i32* [[ARRAYIDX_2]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i32>, <2 x i32>* [[TMP4]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 3
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT:    [[XOR_3:%.*]] = xor i32 [[TMP7]], [[TMP6]]
-; CHECK-NEXT:    store i32 [[XOR_3]], i32* [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[ARRAYIDX2_2]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x i32>, <2 x i32>* [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = xor <2 x i32> [[TMP7]], [[TMP5]]
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[ARRAYIDX2_2]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> [[TMP8]], <2 x i32>* [[TMP9]], align 4
+; CHECK-NEXT:    br label [[ENTRY_MERGE:%.*]]
+; CHECK:       entry.merge:
 ; CHECK-NEXT:    ret void
+; CHECK:       entry.scalar:
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, i32* [[COUNTER]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, i32* [[OUT_BLOCK]], align 4
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP11]], [[TMP10]]
+; CHECK-NEXT:    store i32 [[XOR2]], i32* [[OUT_BLOCK]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_13:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX_13]], align 4
+; CHECK-NEXT:    [[ARRAYIDX2_14:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 1
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX2_14]], align 4
+; CHECK-NEXT:    [[XOR_15:%.*]] = xor i32 [[TMP13]], [[TMP12]]
+; CHECK-NEXT:    store i32 [[XOR_15]], i32* [[ARRAYIDX2_14]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_26:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 2
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX_26]], align 4
+; CHECK-NEXT:    [[ARRAYIDX2_27:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 2
+; CHECK-NEXT:    [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX2_27]], align 4
+; CHECK-NEXT:    [[XOR_28:%.*]] = xor i32 [[TMP15]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[XOR_28]], i32* [[ARRAYIDX2_27]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_39:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 3
+; CHECK-NEXT:    [[TMP16:%.*]] = load i32, i32* [[ARRAYIDX_39]], align 4
+; CHECK-NEXT:    [[ARRAYIDX2_310:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 3
+; CHECK-NEXT:    [[TMP17:%.*]] = load i32, i32* [[ARRAYIDX2_310]], align 4
+; CHECK-NEXT:    [[XOR_311:%.*]] = xor i32 [[TMP17]], [[TMP16]]
+; CHECK-NEXT:    store i32 [[XOR_311]], i32* [[ARRAYIDX2_310]], align 4
+; CHECK-NEXT:    br label [[ENTRY_MERGE]]
 ;
 entry:
   %0 = load i32, i32* %counter, align 4
@@ -294,13 +384,20 @@ declare void @clobber()
 
 define void @slp_not_beneficial(i32* %A, i32* %B) {
 ; CHECK-LABEL: @slp_not_beneficial(
-; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 4
-; CHECK-NEXT:    store i32 0, i32* [[TMP]], align 8
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 5
-; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 8
-; CHECK-NEXT:    store i32 [[TMP5]], i32* [[TMP3]], align 8
+; CHECK-NEXT:  bb.slpmemcheck:
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 4
+; CHECK-NEXT:    [[SCEVGEP6:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[SCEVGEP7:%.*]] = getelementptr i32, i32* [[B:%.*]], i64 4
+; CHECK-NEXT:    [[SCEVGEP78:%.*]] = bitcast i32* [[SCEVGEP7]] to i8*
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[SCEVGEP6]], [[SCEVGEP78]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[SCEVGEP78]], [[SCEVGEP6]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 4
+; CHECK-NEXT:    store i32 0, i32* [[TMP2]], align 8
+; CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 5
+; CHECK-NEXT:    [[TMP44:%.*]] = getelementptr inbounds i32, i32* [[B]], i32 4
+; CHECK-NEXT:    [[TMP55:%.*]] = load i32, i32* [[TMP44]], align 8
+; CHECK-NEXT:    store i32 [[TMP55]], i32* [[TMP33]], align 8
 ; CHECK-NEXT:    ret void
 ;
 bb:
@@ -315,23 +412,49 @@ bb:
 
 define void @widget(double* %ptr, double* %ptr.2) {
 ; CHECK-LABEL: @widget(
-; CHECK-NEXT:  bb1:
+; CHECK-NEXT:  bb1.slpmemcheck:
+; CHECK-NEXT:    [[PTR13:%.*]] = bitcast double* [[PTR:%.*]] to i8*
+; CHECK-NEXT:    [[PTR_215:%.*]] = bitcast double* [[PTR_2:%.*]] to i8*
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr double, double* [[PTR]], i64 1
+; CHECK-NEXT:    [[SCEVGEP14:%.*]] = bitcast double* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[PTR13]], [[PTR_215]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[PTR_215]], [[SCEVGEP14]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT:    br i1 [[FOUND_CONFLICT]], label [[BB1_SCALAR:%.*]], label [[BB1_SLPVERSIONED:%.*]]
+; CHECK:       bb1.slpversioned:
 ; CHECK-NEXT:    [[TMP3:%.*]] = load double, double* null, align 8
-; CHECK-NEXT:    [[TMP4:%.*]] = fmul double undef, [[TMP3]]
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds double, double* [[PTR:%.*]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = load double, double* [[TMP5]], align 8
-; CHECK-NEXT:    [[TMP7:%.*]] = fadd double [[TMP6]], [[TMP4]]
-; CHECK-NEXT:    store double [[TMP7]], double* [[TMP5]], align 8
-; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds double, double* [[PTR_2:%.*]], i64 0
-; CHECK-NEXT:    [[TMP9:%.*]] = load double, double* [[TMP8]], align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = fmul double undef, [[TMP9]]
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds double, double* [[PTR]], i32 0
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds double, double* [[PTR_2]], i64 0
+; CHECK-NEXT:    [[TMP9:%.*]] = load double, double* [[TMP8]], align 8, !alias.scope !15, !noalias !18
+; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x double> poison, double [[TMP3]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[TMP9]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul <2 x double> poison, [[TMP1]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds double, double* [[PTR]], i32 1
-; CHECK-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
-; CHECK-NEXT:    [[TMP13:%.*]] = fadd double [[TMP12]], [[TMP10]]
-; CHECK-NEXT:    store double [[TMP13]], double* [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast double* [[TMP5]] to <2 x double>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 8, !alias.scope !18, !noalias !15
+; CHECK-NEXT:    [[TMP5:%.*]] = fadd <2 x double> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast double* [[TMP5]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8, !alias.scope !18, !noalias !15
+; CHECK-NEXT:    br label [[BB1_MERGE:%.*]]
+; CHECK:       bb1.merge:
 ; CHECK-NEXT:    br label [[BB15:%.*]]
 ; CHECK:       bb15:
 ; CHECK-NEXT:    br label [[BB15]]
+; CHECK:       bb1.scalar:
+; CHECK-NEXT:    [[TMP32:%.*]] = load double, double* null, align 8
+; CHECK-NEXT:    [[TMP43:%.*]] = fmul double undef, [[TMP32]]
+; CHECK-NEXT:    [[TMP54:%.*]] = getelementptr inbounds double, double* [[PTR]], i32 0
+; CHECK-NEXT:    [[TMP65:%.*]] = load double, double* [[TMP54]], align 8
+; CHECK-NEXT:    [[TMP76:%.*]] = fadd double [[TMP65]], [[TMP43]]
+; CHECK-NEXT:    store double [[TMP76]], double* [[TMP54]], align 8
+; CHECK-NEXT:    [[TMP87:%.*]] = getelementptr inbounds double, double* [[PTR_2]], i64 0
+; CHECK-NEXT:    [[TMP98:%.*]] = load double, double* [[TMP87]], align 8
+; CHECK-NEXT:    [[TMP109:%.*]] = fmul double undef, [[TMP98]]
+; CHECK-NEXT:    [[TMP1110:%.*]] = getelementptr inbounds double, double* [[PTR]], i32 1
+; CHECK-NEXT:    [[TMP1211:%.*]] = load double, double* [[TMP1110]], align 8
+; CHECK-NEXT:    [[TMP1312:%.*]] = fadd double [[TMP1211]], [[TMP109]]
+; CHECK-NEXT:    store double [[TMP1312]], double* [[TMP1110]], align 8
+; CHECK-NEXT:    br label [[BB1_MERGE]]
 ;
 bb1:                                              ; preds = %bb
   %tmp3 = load double, double* null, align 8

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll b/llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll
index cf76d0a98da2..4db147a53076 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll
@@ -3,29 +3,38 @@
 
 define void @version_multiple(i32* nocapture %out_block, i32* nocapture readonly %counter) {
 ; CHECK-LABEL: @version_multiple(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COUNTER:%.*]], align 4
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[OUT_BLOCK:%.*]], align 4
-; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[TMP1]], [[TMP0]]
-; CHECK-NEXT:    store i32 [[XOR]], i32* [[OUT_BLOCK]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 1
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
-; CHECK-NEXT:    [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 1
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT:    [[XOR_1:%.*]] = xor i32 [[TMP3]], [[TMP2]]
-; CHECK-NEXT:    store i32 [[XOR_1]], i32* [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 2
-; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
-; CHECK-NEXT:    [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 2
-; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT:    [[XOR_2:%.*]] = xor i32 [[TMP5]], [[TMP4]]
-; CHECK-NEXT:    store i32 [[XOR_2]], i32* [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 3
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
-; CHECK-NEXT:    [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 3
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT:    [[XOR_3:%.*]] = xor i32 [[TMP7]], [[TMP6]]
-; CHECK-NEXT:    store i32 [[XOR_3]], i32* [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT:  entry.slpmemcheck:
+; CHECK-NEXT:    [[OUT_BLOCK12:%.*]] = bitcast i32* [[OUT_BLOCK:%.*]] to i8*
+; CHECK-NEXT:    [[COUNTER14:%.*]] = bitcast i32* [[COUNTER:%.*]] to i8*
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr i32, i32* [[OUT_BLOCK]], i64 1
+; CHECK-NEXT:    [[SCEVGEP13:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[SCEVGEP15:%.*]] = getelementptr i32, i32* [[COUNTER]], i64 1
+; CHECK-NEXT:    [[SCEVGEP1516:%.*]] = bitcast i32* [[SCEVGEP15]] to i8*
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[OUT_BLOCK12]], [[SCEVGEP1516]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[COUNTER14]], [[SCEVGEP13]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[COUNTER]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[OUT_BLOCK]], align 4
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    store i32 [[XOR2]], i32* [[OUT_BLOCK]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_13:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_13]], align 4
+; CHECK-NEXT:    [[ARRAYIDX2_14:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX2_14]], align 4
+; CHECK-NEXT:    [[XOR_15:%.*]] = xor i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    store i32 [[XOR_15]], i32* [[ARRAYIDX2_14]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_26:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 2
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_26]], align 4
+; CHECK-NEXT:    [[ARRAYIDX2_27:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 2
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX2_27]], align 4
+; CHECK-NEXT:    [[XOR_28:%.*]] = xor i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT:    store i32 [[XOR_28]], i32* [[ARRAYIDX2_27]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_39:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 3
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_39]], align 4
+; CHECK-NEXT:    [[ARRAYIDX2_310:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 3
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX2_310]], align 4
+; CHECK-NEXT:    [[XOR_311:%.*]] = xor i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    store i32 [[XOR_311]], i32* [[ARRAYIDX2_310]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -61,7 +70,7 @@ define void @delete_pointer_bound(float* %a, float* %b, i1 %c) #0 {
 ; CHECK-NEXT:    [[TMP0:%.*]] = insertelement <2 x float*> poison, float* [[B:%.*]], i32 0
 ; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x float*> [[TMP0]], float* [[B]], i32 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr float, <2 x float*> [[TMP1]], <2 x i64> <i64 10, i64 14>
-; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[C:%.*]], label [[THEN_SLPMEMCHECK:%.*]], label [[ELSE:%.*]]
 ; CHECK:       else:
 ; CHECK-NEXT:    [[TMP3:%.*]] = call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> [[TMP2]], i32 4, <2 x i1> <i1 true, i1 true>, <2 x float> undef)
 ; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
@@ -69,17 +78,24 @@ define void @delete_pointer_bound(float* %a, float* %b, i1 %c) #0 {
 ; CHECK-NEXT:    [[I71:%.*]] = shufflevector <8 x float> undef, <8 x float> [[TMP4]], <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 10, i32 5, i32 6, i32 13>
 ; CHECK-NEXT:    call void @use(<8 x float> [[I71]])
 ; CHECK-NEXT:    ret void
-; CHECK:       then:
-; CHECK-NEXT:    [[A_8:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 8
-; CHECK-NEXT:    store float 0.000000e+00, float* [[A_8]], align 4
+; CHECK:       then.slpmemcheck:
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr float, float* [[A:%.*]], i64 8
+; CHECK-NEXT:    [[SCEVGEP8:%.*]] = bitcast float* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[SCEVGEP9:%.*]] = getelementptr float, float* [[B]], i64 14
+; CHECK-NEXT:    [[SCEVGEP910:%.*]] = bitcast float* [[SCEVGEP9]] to i8*
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[SCEVGEP8]], [[SCEVGEP910]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[SCEVGEP910]], [[SCEVGEP8]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT:    [[A_83:%.*]] = getelementptr inbounds float, float* [[A]], i64 8
+; CHECK-NEXT:    store float 0.000000e+00, float* [[A_83]], align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x float*> [[TMP2]], i32 1
-; CHECK-NEXT:    [[L6:%.*]] = load float, float* [[TMP5]], align 4
-; CHECK-NEXT:    [[A_5:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
-; CHECK-NEXT:    store float [[L6]], float* [[A_5]], align 4
-; CHECK-NEXT:    [[A_6:%.*]] = getelementptr inbounds float, float* [[A]], i64 6
-; CHECK-NEXT:    store float 0.000000e+00, float* [[A_6]], align 4
-; CHECK-NEXT:    [[A_7:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
-; CHECK-NEXT:    store float 0.000000e+00, float* [[A_7]], align 4
+; CHECK-NEXT:    [[L64:%.*]] = load float, float* [[TMP5]], align 4
+; CHECK-NEXT:    [[A_55:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
+; CHECK-NEXT:    store float [[L64]], float* [[A_55]], align 4
+; CHECK-NEXT:    [[A_66:%.*]] = getelementptr inbounds float, float* [[A]], i64 6
+; CHECK-NEXT:    store float 0.000000e+00, float* [[A_66]], align 4
+; CHECK-NEXT:    [[A_77:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
+; CHECK-NEXT:    store float 0.000000e+00, float* [[A_77]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -122,6 +138,7 @@ define double @preserve_loop_info(%struct.zot* %arg) {
 ; CHECK-LABEL: @preserve_loop_info(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP:%.*]] = alloca [3 x double], align 16
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast [3 x double]* [[TMP]] to i8*
 ; CHECK-NEXT:    br label [[OUTER_HEADER:%.*]]
 ; CHECK:       outer.header:
 ; CHECK-NEXT:    br label [[INNER:%.*]]
@@ -133,14 +150,21 @@ define double @preserve_loop_info(%struct.zot* %arg) {
 ; CHECK-NEXT:    [[TMP5:%.*]] = load [3 x double]*, [3 x double]** undef, align 8
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [3 x double], [3 x double]* [[TMP]], i64 0, i64 0
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [3 x double], [3 x double]* [[TMP]], i64 0, i64 1
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr [3 x double], [3 x double]* [[TMP]], i64 0, i64 1
+; CHECK-NEXT:    [[SCEVGEP5:%.*]] = bitcast double* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    [[SCEVGEP6:%.*]] = getelementptr [3 x double], [3 x double]* [[TMP5]], i64 undef, i64 1
+; CHECK-NEXT:    [[SCEVGEP67:%.*]] = bitcast double* [[SCEVGEP6]] to i8*
 ; CHECK-NEXT:    br label [[LOOP_3HEADER:%.*]]
 ; CHECK:       loop.3header:
-; CHECK-NEXT:    br i1 undef, label [[LOOP_3LATCH:%.*]], label [[BB9:%.*]]
-; CHECK:       bb9:
-; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [3 x double], [3 x double]* [[TMP5]], i64 undef, i64 1
+; CHECK-NEXT:    br i1 undef, label [[LOOP_3LATCH:%.*]], label [[BB9_SLPMEMCHECK:%.*]]
+; CHECK:       bb9.slpmemcheck:
+; CHECK-NEXT:    [[BOUND0:%.*]] = icmp ult i8* [[TMP4]], [[SCEVGEP67]]
+; CHECK-NEXT:    [[BOUND1:%.*]] = icmp ult i8* [[SCEVGEP67]], [[SCEVGEP5]]
+; CHECK-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT:    [[TMP102:%.*]] = getelementptr inbounds [3 x double], [3 x double]* [[TMP5]], i64 undef, i64 1
 ; CHECK-NEXT:    store double undef, double* [[TMP6]], align 16
-; CHECK-NEXT:    [[TMP12:%.*]] = load double, double* [[TMP10]], align 8
-; CHECK-NEXT:    store double [[TMP12]], double* [[TMP7]], align 8
+; CHECK-NEXT:    [[TMP123:%.*]] = load double, double* [[TMP102]], align 8
+; CHECK-NEXT:    store double [[TMP123]], double* [[TMP7]], align 8
 ; CHECK-NEXT:    br label [[LOOP_3LATCH]]
 ; CHECK:       loop.3latch:
 ; CHECK-NEXT:    br i1 undef, label [[BB14:%.*]], label [[LOOP_3HEADER]]


        


More information about the llvm-branch-commits mailing list