[llvm-branch-commits] [clang] 860a8bd - Merge branch 'main' into revert-178287-wasm-ld-crash-fix

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Feb 17 03:57:37 PST 2026


Author: Omair Javaid
Date: 2026-02-17T16:57:21+05:00
New Revision: 860a8bddfc8780e270622e2206a7f01d0e4d8402

URL: https://github.com/llvm/llvm-project/commit/860a8bddfc8780e270622e2206a7f01d0e4d8402
DIFF: https://github.com/llvm/llvm-project/commit/860a8bddfc8780e270622e2206a7f01d0e4d8402.diff

LOG: Merge branch 'main' into revert-178287-wasm-ld-crash-fix

Added: 
    clang/test/Sema/warn-lifetime-safety-cfg-bailout.cpp
    llvm/test/Transforms/LoopVectorize/VPlan/predicator.ll

Modified: 
    clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeSafety.h
    clang/include/clang/Basic/DiagnosticASTKinds.td
    clang/include/clang/Basic/LangOptions.def
    clang/include/clang/Options/Options.td
    clang/lib/Analysis/LifetimeSafety/LifetimeSafety.cpp
    clang/unittests/Analysis/LifetimeSafetyTest.cpp
    llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
    llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
    llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
    llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
    llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
    llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
    llvm/lib/Transforms/Vectorize/VPlanTransforms.h
    llvm/test/CodeGen/X86/win_cst_pool.ll
    llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
    llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
    llvm/test/Transforms/LoopVectorize/VPlan/vplan-print-after-all.ll

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeSafety.h b/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeSafety.h
index 6148f86091110..7761a5c24c606 100644
--- a/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeSafety.h
+++ b/clang/include/clang/Analysis/Analyses/LifetimeSafety/LifetimeSafety.h
@@ -28,6 +28,7 @@
 #include "clang/Analysis/Analyses/LifetimeSafety/MovedLoans.h"
 #include "clang/Analysis/Analyses/LifetimeSafety/Origins.h"
 #include "clang/Analysis/AnalysisDeclContext.h"
+#include <cstdint>
 #include <memory>
 
 namespace clang::lifetimes {
@@ -39,6 +40,12 @@ enum class Confidence : uint8_t {
   Definite // Reported as a definite error (-Wlifetime-safety-permissive)
 };
 
+struct LifetimeSafetyOpts {
+  /// Maximum number of CFG blocks to analyze. Functions with larger CFGs will
+  /// be skipped.
+  size_t MaxCFGBlocks;
+};
+
 /// Enum to track functions visible across or within TU.
 enum class SuggestionScope {
   CrossTU, // For suggestions on declarations visible across Translation Units.
@@ -130,7 +137,8 @@ struct LifetimeFactory {
 class LifetimeSafetyAnalysis {
 public:
   LifetimeSafetyAnalysis(AnalysisDeclContext &AC,
-                         LifetimeSafetySemaHelper *SemaHelper);
+                         LifetimeSafetySemaHelper *SemaHelper,
+                         const LifetimeSafetyOpts &LSOpts);
 
   void run();
 
@@ -144,6 +152,7 @@ class LifetimeSafetyAnalysis {
 private:
   AnalysisDeclContext &AC;
   LifetimeSafetySemaHelper *SemaHelper;
+  const LifetimeSafetyOpts LSOpts;
   LifetimeFactory Factory;
   std::unique_ptr<FactManager> FactMgr;
   std::unique_ptr<LiveOriginsAnalysis> LiveOrigins;

diff  --git a/clang/include/clang/Basic/DiagnosticASTKinds.td b/clang/include/clang/Basic/DiagnosticASTKinds.td
index f36c02851a6a1..5446c0d89597b 100644
--- a/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -404,10 +404,8 @@ def note_constexpr_infer_alloc_token_type_inference_failed : Note<
   "could not infer allocation type for __builtin_infer_alloc_token">;
 def note_constexpr_infer_alloc_token_no_metadata : Note<
   "could not get token metadata for inferred type">;
-def note_constexpr_infer_alloc_token_stateful_mode : Note<
-  "stateful alloc token mode not supported in constexpr">;
-def err_experimental_clang_interp_failed : Error<
-  "the experimental clang interpreter failed to evaluate an expression">;
+def note_constexpr_infer_alloc_token_stateful_mode
+    : Note<"stateful alloc token mode not supported in constexpr">;
 
 def warn_attribute_needs_aggregate : Warning<
   "%0 attribute is ignored in non-aggregate type %1">,

diff  --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def
index 45e2777def4fa..08f102839b89e 100644
--- a/clang/include/clang/Basic/LangOptions.def
+++ b/clang/include/clang/Basic/LangOptions.def
@@ -504,6 +504,8 @@ LANGOPT(BoundsSafety, 1, 0, NotCompatible, "Bounds safety extension for C")
 
 LANGOPT(EnableLifetimeSafety, 1, 0, NotCompatible, "Lifetime safety analysis for C++")
 
+LANGOPT(LifetimeSafetyMaxCFGBlocks, 32, 0, NotCompatible, "Skip LifetimeSafety analysis for functions with CFG block count exceeding this threshold. Specify 0 for no limit")
+
 LANGOPT(EnableLifetimeSafetyInference, 1, 0, NotCompatible, "Lifetime safety inference analysis for C++")
 
 // TODO: Remove flag and default to end-of-TU analysis for lifetime safety after performance validation.

diff  --git a/clang/include/clang/Options/Options.td b/clang/include/clang/Options/Options.td
index a274017953b1d..24b31fb3fefcc 100644
--- a/clang/include/clang/Options/Options.td
+++ b/clang/include/clang/Options/Options.td
@@ -1967,6 +1967,14 @@ defm lifetime_safety : BoolFOption<
   NegFlag<SetFalse, [], [CC1Option], "Disable">,
   BothFlags<[], [CC1Option], " lifetime safety for C++">>;
 
+def lifetime_safety_max_cfg_blocks
+    : Joined<["-"], "lifetime-safety-max-cfg-blocks=">,
+      Group<m_Group>,
+      Visibility<[ClangOption, CC1Option]>,
+      HelpText<"Skip LifetimeSafety analysis for functions with CFG block "
+               "count exceeding this threshold. Specify 0 for no limit.">,
+      MarshallingInfoInt<LangOpts<"LifetimeSafetyMaxCFGBlocks">>;
+
 defm lifetime_safety_inference
     : BoolFOption<"lifetime-safety-inference",
                   LangOpts<"EnableLifetimeSafetyInference">, DefaultFalse,

diff  --git a/clang/lib/Analysis/LifetimeSafety/LifetimeSafety.cpp b/clang/lib/Analysis/LifetimeSafety/LifetimeSafety.cpp
index a6bea74c50b49..714f979fa5ee7 100644
--- a/clang/lib/Analysis/LifetimeSafety/LifetimeSafety.cpp
+++ b/clang/lib/Analysis/LifetimeSafety/LifetimeSafety.cpp
@@ -49,13 +49,25 @@ static void DebugOnlyFunction(AnalysisDeclContext &AC, const CFG &Cfg,
 #endif
 
 LifetimeSafetyAnalysis::LifetimeSafetyAnalysis(
-    AnalysisDeclContext &AC, LifetimeSafetySemaHelper *SemaHelper)
-    : AC(AC), SemaHelper(SemaHelper) {}
+    AnalysisDeclContext &AC, LifetimeSafetySemaHelper *SemaHelper,
+    const LifetimeSafetyOpts &LSOpts)
+    : AC(AC), SemaHelper(SemaHelper), LSOpts(LSOpts) {}
 
 void LifetimeSafetyAnalysis::run() {
   llvm::TimeTraceScope TimeProfile("LifetimeSafetyAnalysis");
 
   const CFG &Cfg = *AC.getCFG();
+  if (LSOpts.MaxCFGBlocks > 0 && Cfg.getNumBlockIDs() > LSOpts.MaxCFGBlocks) {
+    DEBUG_WITH_TYPE(
+        "LifetimeSafety", std::string FuncName = "<unknown>";
+        if (const Decl *D = AC.getDecl()) if (const auto *ND =
+                                                  dyn_cast<NamedDecl>(D))
+            FuncName = ND->getQualifiedNameAsString();
+        llvm::dbgs() << "LifetimeSafety: Skipping function " << FuncName
+                     << "due to large CFG: " << Cfg.getNumBlockIDs()
+                     << " blocks (threshold: " << LSOpts.MaxCFGBlocks << ")\n");
+    return;
+  }
 
   FactMgr = std::make_unique<FactManager>(AC, Cfg);
 
@@ -111,7 +123,11 @@ void collectLifetimeStats(AnalysisDeclContext &AC, OriginManager &OM,
 void runLifetimeSafetyAnalysis(AnalysisDeclContext &AC,
                                LifetimeSafetySemaHelper *SemaHelper,
                                LifetimeSafetyStats &Stats, bool CollectStats) {
-  internal::LifetimeSafetyAnalysis Analysis(AC, SemaHelper);
+  LifetimeSafetyOpts LSOpts;
+  LSOpts.MaxCFGBlocks =
+      AC.getASTContext().getLangOpts().LifetimeSafetyMaxCFGBlocks;
+
+  internal::LifetimeSafetyAnalysis Analysis(AC, SemaHelper, LSOpts);
   Analysis.run();
   if (CollectStats)
     collectLifetimeStats(AC, Analysis.getFactManager().getOriginMgr(), Stats);

diff  --git a/clang/test/Sema/warn-lifetime-safety-cfg-bailout.cpp b/clang/test/Sema/warn-lifetime-safety-cfg-bailout.cpp
new file mode 100644
index 0000000000000..7c5d61e23e710
--- /dev/null
+++ b/clang/test/Sema/warn-lifetime-safety-cfg-bailout.cpp
@@ -0,0 +1,48 @@
+// RUN: %clang_cc1 -fsyntax-only -Wlifetime-safety -lifetime-safety-max-cfg-blocks=3 -Wno-dangling -verify=bailout %s
+// RUN: %clang_cc1 -fsyntax-only -Wlifetime-safety -Wno-dangling -verify=bailout -verify=nobailout %s
+
+struct MyObj {
+  int id;
+  ~MyObj() {}  // Non-trivial destructor
+  MyObj operator+(MyObj);
+  void use() const;
+};
+
+struct [[gsl::Pointer()]] View {
+  View(const MyObj&); // Borrows from MyObj
+  View();
+  void use() const;
+};
+
+class TriviallyDestructedClass {
+  View a, b;
+};
+
+//===----------------------------------------------------------------------===//
+// Basic Definite Use-After-Free (-W...permissive)
+// These are cases where the pointer is guaranteed to be dangling at the use site.
+//===----------------------------------------------------------------------===//
+
+void single_block_cfg() {
+  MyObj* p;
+  {
+    MyObj s;
+    p = &s;     // bailout-warning {{object whose reference is captured does not live long enough}}
+  }             // bailout-note {{destroyed here}}
+  (void)*p;     // bailout-note {{later used here}}
+}
+
+void multiple_block_cfg() {
+  MyObj* p;
+  int a = 10;
+  MyObj safe;
+  {
+    if (a > 5) {
+      MyObj s;
+      p = &s;    // nobailout-warning {{object whose reference is captured does not live long enough}}
+    } else {     // nobailout-note {{destroyed here}}
+      p = &safe;
+    }     
+  }             
+  p->use();      // nobailout-note {{later used here}}
+}

diff  --git a/clang/unittests/Analysis/LifetimeSafetyTest.cpp b/clang/unittests/Analysis/LifetimeSafetyTest.cpp
index 45611f856b3b2..a27f746fffb60 100644
--- a/clang/unittests/Analysis/LifetimeSafetyTest.cpp
+++ b/clang/unittests/Analysis/LifetimeSafetyTest.cpp
@@ -63,7 +63,10 @@ class LifetimeTestRunner {
     BuildOptions.AddLifetime = true;
 
     // Run the main analysis.
-    Analysis = std::make_unique<LifetimeSafetyAnalysis>(*AnalysisCtx, nullptr);
+    LifetimeSafetyOpts LSOpts;
+    LSOpts.MaxCFGBlocks = 0;
+    Analysis =
+        std::make_unique<LifetimeSafetyAnalysis>(*AnalysisCtx, nullptr, LSOpts);
     Analysis->run();
 
     AnnotationToPointMap = Analysis->getFactManager().getTestPoints();

diff  --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 47ee485a2cca9..83446e7d670f0 100644
--- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -2126,9 +2126,25 @@ static std::string scalarConstantToHexString(const Constant *C) {
   if (isa<UndefValue>(C)) {
     return APIntToHexString(APInt::getZero(Ty->getPrimitiveSizeInBits()));
   } else if (const auto *CFP = dyn_cast<ConstantFP>(C)) {
-    return APIntToHexString(CFP->getValueAPF().bitcastToAPInt());
+    if (CFP->getType()->isFloatingPointTy())
+      return APIntToHexString(CFP->getValueAPF().bitcastToAPInt());
+
+    std::string HexString;
+    unsigned NumElements =
+        cast<FixedVectorType>(CFP->getType())->getNumElements();
+    for (unsigned I = 0; I < NumElements; ++I)
+      HexString += APIntToHexString(CFP->getValueAPF().bitcastToAPInt());
+    return HexString;
   } else if (const auto *CI = dyn_cast<ConstantInt>(C)) {
-    return APIntToHexString(CI->getValue());
+    if (CI->getType()->isIntegerTy())
+      return APIntToHexString(CI->getValue());
+
+    std::string HexString;
+    unsigned NumElements =
+        cast<FixedVectorType>(CI->getType())->getNumElements();
+    for (unsigned I = 0; I < NumElements; ++I)
+      HexString += APIntToHexString(CI->getValue());
+    return HexString;
   } else {
     unsigned NumElements;
     if (auto *VTy = dyn_cast<VectorType>(Ty))

diff  --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 8091bdc512421..3123a6f9ffe77 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -531,8 +531,9 @@ static Register buildBuiltinVariableLoad(
   return LoadedRegister;
 }
 
-/// Helper external function for assigning SPIRVType to a register, ensuring the
-/// register class and type are set in MRI. Defined in SPIRVPreLegalizer.cpp.
+/// Helper external function for assigning a SPIRV type to a register, ensuring
+/// the register class and type are set in MRI. Defined in
+/// SPIRVPreLegalizer.cpp.
 extern void updateRegType(Register Reg, Type *Ty, SPIRVTypeInst SpirvTy,
                           SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
                           MachineRegisterInfo &MRI);
@@ -3617,7 +3618,7 @@ lowerBuiltinType(const Type *OpaqueType,
     // "Lower" the BuiltinType into TargetType. The following get<...>Type
     // methods use the implementation details from TableGen records or
     // TargetExtType parameters to either create a new OpType<...> machine
-    // instruction or get an existing equivalent SPIRVType from
+    // instruction or get an existing equivalent SPIRV type from
     // GlobalRegistry.
 
     switch (TypeRecord->Opcode) {

diff  --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index 89b6975cdbdbd..3849a8b223b36 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -195,7 +195,7 @@ static void buildOpBitcast(SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB,
 // We lower G_BITCAST to OpBitcast here to avoid a MachineVerifier error.
 // The verifier checks if the source and destination LLTs of a G_BITCAST are
 // 
diff erent, but this check is too strict for SPIR-V's typed pointers, which
-// may have the same LLT but 
diff erent SPIRVType (e.g. pointers to 
diff erent
+// may have the same LLT but 
diff erent SPIRV type (e.g. pointers to 
diff erent
 // pointee types). By lowering to OpBitcast here, we bypass the verifier's
 // check. See discussion in https://github.com/llvm/llvm-project/pull/110270
 // for more context.
@@ -295,9 +295,9 @@ static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR,
 //  %1 = G_ZEXT %2
 //  G_MEMCPY ... %2 ...
 //
-// New registers have no SPIRVType and no register class info.
+// New registers have no SPIRV type and no register class info.
 //
-// Set SPIRVType for GV, propagate it from GV to other instructions,
+// Set SPIRV type for GV, propagate it from GV to other instructions,
 // also set register classes.
 static SPIRVTypeInst propagateSPIRVType(MachineInstr *MI,
                                         SPIRVGlobalRegistry *GR,
@@ -628,7 +628,7 @@ generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
   }
 
   // Address the case when IRTranslator introduces instructions with new
-  // registers without SPIRVType associated.
+  // registers without associated SPIRV type.
   for (MachineBasicBlock &MBB : MF) {
     for (MachineInstr &MI : MBB) {
       switch (MI.getOpcode()) {

diff  --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index e056f0c1f6390..b0a8d86a46578 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -989,88 +989,14 @@ struct DSEState {
   SmallVector<Instruction *> ToRemove;
 
   // Class contains self-reference, make sure it's not copied/moved.
-  DSEState(const DSEState &) = delete;
-  DSEState &operator=(const DSEState &) = delete;
-
   DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
            PostDominatorTree &PDT, const TargetLibraryInfo &TLI,
-           const LoopInfo &LI)
-      : F(F), AA(AA), EA(DT, &LI), BatchAA(AA, &EA), MSSA(MSSA), DT(DT),
-        PDT(PDT), TLI(TLI), DL(F.getDataLayout()), LI(LI) {
-    // Collect blocks with throwing instructions not modeled in MemorySSA and
-    // alloc-like objects.
-    unsigned PO = 0;
-    for (BasicBlock *BB : post_order(&F)) {
-      PostOrderNumbers[BB] = PO++;
-      for (Instruction &I : *BB) {
-        MemoryAccess *MA = MSSA.getMemoryAccess(&I);
-        if (I.mayThrow() && !MA)
-          ThrowingBlocks.insert(I.getParent());
-
-        auto *MD = dyn_cast_or_null<MemoryDef>(MA);
-        if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit &&
-            (getLocForWrite(&I) || isMemTerminatorInst(&I) ||
-             (EnableInitializesImprovement && hasInitializesAttr(&I))))
-          MemDefs.push_back(MD);
-      }
-    }
-
-    // Treat byval, inalloca or dead on return arguments the same as Allocas,
-    // stores to them are dead at the end of the function.
-    for (Argument &AI : F.args()) {
-      if (AI.hasPassPointeeByValueCopyAttr()) {
-        InvisibleToCallerAfterRet.insert({&AI, true});
-        continue;
-      }
-
-      if (!AI.getType()->isPointerTy())
-        continue;
-
-      const DeadOnReturnInfo &Info = AI.getDeadOnReturnInfo();
-      if (Info.coversAllReachableMemory())
-        InvisibleToCallerAfterRet.insert({&AI, true});
-      else if (uint64_t DeadBytes = Info.getNumberOfDeadBytes())
-        InvisibleToCallerAfterRetBounded.insert({&AI, DeadBytes});
-    }
-
-    // Collect whether there is any irreducible control flow in the function.
-    ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
-
-    AnyUnreachableExit = any_of(PDT.roots(), [](const BasicBlock *E) {
-      return isa<UnreachableInst>(E->getTerminator());
-    });
-  }
-
-  static void pushMemUses(MemoryAccess *Acc,
-                          SmallVectorImpl<MemoryAccess *> &WorkList,
-                          SmallPtrSetImpl<MemoryAccess *> &Visited) {
-    for (Use &U : Acc->uses()) {
-      auto *MA = cast<MemoryAccess>(U.getUser());
-      if (Visited.insert(MA).second)
-        WorkList.push_back(MA);
-    }
-  };
+           const LoopInfo &LI);
+  DSEState(const DSEState &) = delete;
+  DSEState &operator=(const DSEState &) = delete;
 
   LocationSize strengthenLocationSize(const Instruction *I,
-                                      LocationSize Size) const {
-    if (auto *CB = dyn_cast<CallBase>(I)) {
-      LibFunc F;
-      if (TLI.getLibFunc(*CB, F) && TLI.has(F) &&
-          (F == LibFunc_memset_chk || F == LibFunc_memcpy_chk)) {
-        // Use the precise location size specified by the 3rd argument
-        // for determining KillingI overwrites DeadLoc if it is a memset_chk
-        // instruction. memset_chk will write either the amount specified as 3rd
-        // argument or the function will immediately abort and exit the program.
-        // NOTE: AA may determine NoAlias if it can prove that the access size
-        // is larger than the allocation size due to that being UB. To avoid
-        // returning potentially invalid NoAlias results by AA, limit the use of
-        // the precise location size to isOverwrite.
-        if (const auto *Len = dyn_cast<ConstantInt>(CB->getArgOperand(2)))
-          return LocationSize::precise(Len->getZExtValue());
-      }
-    }
-    return Size;
-  }
+                                      LocationSize Size) const;
 
   /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p
   /// KillingI instruction) completely overwrites a store to the 'DeadLoc'
@@ -1084,375 +1010,49 @@ struct DSEState {
                               const Instruction *DeadI,
                               const MemoryLocation &KillingLoc,
                               const MemoryLocation &DeadLoc,
-                              int64_t &KillingOff, int64_t &DeadOff) {
-    // AliasAnalysis does not always account for loops. Limit overwrite checks
-    // to dependencies for which we can guarantee they are independent of any
-    // loops they are in.
-    if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
-      return OW_Unknown;
-
-    LocationSize KillingLocSize =
-        strengthenLocationSize(KillingI, KillingLoc.Size);
-    const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts();
-    const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts();
-    const Value *DeadUndObj = getUnderlyingObject(DeadPtr);
-    const Value *KillingUndObj = getUnderlyingObject(KillingPtr);
-
-    // Check whether the killing store overwrites the whole object, in which
-    // case the size/offset of the dead store does not matter.
-    if (DeadUndObj == KillingUndObj && KillingLocSize.isPrecise() &&
-        isIdentifiedObject(KillingUndObj)) {
-      std::optional<TypeSize> KillingUndObjSize =
-          getPointerSize(KillingUndObj, DL, TLI, &F);
-      if (KillingUndObjSize && *KillingUndObjSize == KillingLocSize.getValue())
-        return OW_Complete;
-    }
-
-    // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
-    // get imprecise values here, though (except for unknown sizes).
-    if (!KillingLocSize.isPrecise() || !DeadLoc.Size.isPrecise()) {
-      // In case no constant size is known, try to an IR values for the number
-      // of bytes written and check if they match.
-      const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI);
-      const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI);
-      if (KillingMemI && DeadMemI) {
-        const Value *KillingV = KillingMemI->getLength();
-        const Value *DeadV = DeadMemI->getLength();
-        if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc))
-          return OW_Complete;
-      }
-
-      // Masked stores have imprecise locations, but we can reason about them
-      // to some extent.
-      return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA);
-    }
-
-    const TypeSize KillingSize = KillingLocSize.getValue();
-    const TypeSize DeadSize = DeadLoc.Size.getValue();
-    // Bail on doing Size comparison which depends on AA for now
-    // TODO: Remove AnyScalable once Alias Analysis deal with scalable vectors
-    const bool AnyScalable =
-        DeadSize.isScalable() || KillingLocSize.isScalable();
-
-    if (AnyScalable)
-      return OW_Unknown;
-    // Query the alias information
-    AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
-
-    // If the start pointers are the same, we just have to compare sizes to see if
-    // the killing store was larger than the dead store.
-    if (AAR == AliasResult::MustAlias) {
-      // Make sure that the KillingSize size is >= the DeadSize size.
-      if (KillingSize >= DeadSize)
-        return OW_Complete;
-    }
-
-    // If we hit a partial alias we may have a full overwrite
-    if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
-      int32_t Off = AAR.getOffset();
-      if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
-        return OW_Complete;
-    }
-
-    // If we can't resolve the same pointers to the same object, then we can't
-    // analyze them at all.
-    if (DeadUndObj != KillingUndObj) {
-      // Non aliasing stores to 
diff erent objects don't overlap. Note that
-      // if the killing store is known to overwrite whole object (out of
-      // bounds access overwrites whole object as well) then it is assumed to
-      // completely overwrite any store to the same object even if they don't
-      // actually alias (see next check).
-      if (AAR == AliasResult::NoAlias)
-        return OW_None;
-      return OW_Unknown;
-    }
-
-    // Okay, we have stores to two completely 
diff erent pointers.  Try to
-    // decompose the pointer into a "base + constant_offset" form.  If the base
-    // pointers are equal, then we can reason about the two stores.
-    DeadOff = 0;
-    KillingOff = 0;
-    const Value *DeadBasePtr =
-        GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL);
-    const Value *KillingBasePtr =
-        GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL);
-
-    // If the base pointers still 
diff er, we have two completely 
diff erent
-    // stores.
-    if (DeadBasePtr != KillingBasePtr)
-      return OW_Unknown;
-
-    // The killing access completely overlaps the dead store if and only if
-    // both start and end of the dead one is "inside" the killing one:
-    //    |<->|--dead--|<->|
-    //    |-----killing------|
-    // Accesses may overlap if and only if start of one of them is "inside"
-    // another one:
-    //    |<->|--dead--|<-------->|
-    //    |-------killing--------|
-    //           OR
-    //    |-------dead-------|
-    //    |<->|---killing---|<----->|
-    //
-    // We have to be careful here as *Off is signed while *.Size is unsigned.
-
-    // Check if the dead access starts "not before" the killing one.
-    if (DeadOff >= KillingOff) {
-      // If the dead access ends "not after" the killing access then the
-      // dead one is completely overwritten by the killing one.
-      if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
-        return OW_Complete;
-      // If start of the dead access is "before" end of the killing access
-      // then accesses overlap.
-      else if ((uint64_t)(DeadOff - KillingOff) < KillingSize)
-        return OW_MaybePartial;
-    }
-    // If start of the killing access is "before" end of the dead access then
-    // accesses overlap.
-    else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) {
-      return OW_MaybePartial;
-    }
-
-    // Can reach here only if accesses are known not to overlap.
-    return OW_None;
-  }
+                              int64_t &KillingOff, int64_t &DeadOff);
 
   bool isInvisibleToCallerAfterRet(const Value *V, const Value *Ptr,
-                                   const LocationSize StoreSize) {
-    if (isa<AllocaInst>(V))
-      return true;
-
-    auto IBounded = InvisibleToCallerAfterRetBounded.find(V);
-    if (IBounded != InvisibleToCallerAfterRetBounded.end()) {
-      int64_t ValueOffset;
-      [[maybe_unused]] const Value *BaseValue =
-          GetPointerBaseWithConstantOffset(Ptr, ValueOffset, DL);
-      // If we are not able to find a constant offset from the UO, we have to
-      // pessimistically assume that the store writes to memory out of the
-      // dead_on_return bounds.
-      if (BaseValue != V)
-        return false;
-      // This store is only invisible after return if we are in bounds of the
-      // range marked dead.
-      if (StoreSize.hasValue() &&
-          ValueOffset + StoreSize.getValue() <= IBounded->second &&
-          ValueOffset >= 0)
-        return true;
-    }
-    auto I = InvisibleToCallerAfterRet.insert({V, false});
-    if (I.second && isInvisibleToCallerOnUnwind(V) && isNoAliasCall(V))
-      I.first->second = capturesNothing(PointerMayBeCaptured(
-          V, /*ReturnCaptures=*/true, CaptureComponents::Provenance));
-    return I.first->second;
-  }
-
-  bool isInvisibleToCallerOnUnwind(const Value *V) {
-    bool RequiresNoCaptureBeforeUnwind;
-    if (!isNotVisibleOnUnwind(V, RequiresNoCaptureBeforeUnwind))
-      return false;
-    if (!RequiresNoCaptureBeforeUnwind)
-      return true;
-
-    auto I = CapturedBeforeReturn.insert({V, true});
-    if (I.second)
-      // NOTE: This could be made more precise by PointerMayBeCapturedBefore
-      // with the killing MemoryDef. But we refrain from doing so for now to
-      // limit compile-time and this does not cause any changes to the number
-      // of stores removed on a large test set in practice.
-      I.first->second = capturesAnything(PointerMayBeCaptured(
-          V, /*ReturnCaptures=*/false, CaptureComponents::Provenance));
-    return !I.first->second;
-  }
-
-  std::optional<MemoryLocation> getLocForWrite(Instruction *I) const {
-    if (!I->mayWriteToMemory())
-      return std::nullopt;
+                                   const LocationSize StoreSize);
 
-    if (auto *CB = dyn_cast<CallBase>(I))
-      return MemoryLocation::getForDest(CB, TLI);
+  bool isInvisibleToCallerOnUnwind(const Value *V);
 
-    return MemoryLocation::getOrNone(I);
-  }
+  std::optional<MemoryLocation> getLocForWrite(Instruction *I) const;
 
   // Returns a list of <MemoryLocation, bool> pairs written by I.
   // The bool means whether the write is from Initializes attr.
   SmallVector<std::pair<MemoryLocation, bool>, 1>
-  getLocForInst(Instruction *I, bool ConsiderInitializesAttr) {
-    SmallVector<std::pair<MemoryLocation, bool>, 1> Locations;
-    if (isMemTerminatorInst(I)) {
-      if (auto Loc = getLocForTerminator(I))
-        Locations.push_back(std::make_pair(Loc->first, false));
-      return Locations;
-    }
-
-    if (auto Loc = getLocForWrite(I))
-      Locations.push_back(std::make_pair(*Loc, false));
-
-    if (ConsiderInitializesAttr) {
-      for (auto &MemLoc : getInitializesArgMemLoc(I)) {
-        Locations.push_back(std::make_pair(MemLoc, true));
-      }
-    }
-    return Locations;
-  }
+  getLocForInst(Instruction *I, bool ConsiderInitializesAttr);
 
   /// Assuming this instruction has a dead analyzable write, can we delete
   /// this instruction?
-  bool isRemovable(Instruction *I) {
-    assert(getLocForWrite(I) && "Must have analyzable write");
-
-    // Don't remove volatile/atomic stores.
-    if (StoreInst *SI = dyn_cast<StoreInst>(I))
-      return SI->isUnordered();
-
-    if (auto *CB = dyn_cast<CallBase>(I)) {
-      // Don't remove volatile memory intrinsics.
-      if (auto *MI = dyn_cast<MemIntrinsic>(CB))
-        return !MI->isVolatile();
-
-      // Never remove dead lifetime intrinsics, e.g. because they are followed
-      // by a free.
-      if (CB->isLifetimeStartOrEnd())
-        return false;
-
-      return CB->use_empty() && CB->willReturn() && CB->doesNotThrow() &&
-             !CB->isTerminator();
-    }
-
-    return false;
-  }
+  bool isRemovable(Instruction *I);
 
   /// Returns true if \p UseInst completely overwrites \p DefLoc
   /// (stored by \p DefInst).
   bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
-                           Instruction *UseInst) {
-    // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
-    // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
-    // MemoryDef.
-    if (!UseInst->mayWriteToMemory())
-      return false;
-
-    if (auto *CB = dyn_cast<CallBase>(UseInst))
-      if (CB->onlyAccessesInaccessibleMemory())
-        return false;
-
-    int64_t InstWriteOffset, DepWriteOffset;
-    if (auto CC = getLocForWrite(UseInst))
-      return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
-                         DepWriteOffset) == OW_Complete;
-    return false;
-  }
+                           Instruction *UseInst);
 
   /// Returns true if \p Def is not read before returning from the function.
-  bool isWriteAtEndOfFunction(MemoryDef *Def, const MemoryLocation &DefLoc) {
-    LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
-                      << *Def->getMemoryInst()
-                      << ") is at the end the function \n");
-    SmallVector<MemoryAccess *, 4> WorkList;
-    SmallPtrSet<MemoryAccess *, 8> Visited;
-
-    pushMemUses(Def, WorkList, Visited);
-    for (unsigned I = 0; I < WorkList.size(); I++) {
-      if (WorkList.size() >= MemorySSAScanLimit) {
-        LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
-        return false;
-      }
-
-      MemoryAccess *UseAccess = WorkList[I];
-      if (isa<MemoryPhi>(UseAccess)) {
-        // AliasAnalysis does not account for loops. Limit elimination to
-        // candidates for which we can guarantee they always store to the same
-        // memory location.
-        if (!isGuaranteedLoopInvariant(DefLoc.Ptr))
-          return false;
-
-        pushMemUses(cast<MemoryPhi>(UseAccess), WorkList, Visited);
-        continue;
-      }
-      // TODO: Checking for aliasing is expensive. Consider reducing the amount
-      // of times this is called and/or caching it.
-      Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
-      if (isReadClobber(DefLoc, UseInst)) {
-        LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
-        return false;
-      }
-
-      if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
-        pushMemUses(UseDef, WorkList, Visited);
-    }
-    return true;
-  }
+  bool isWriteAtEndOfFunction(MemoryDef *Def, const MemoryLocation &DefLoc);
 
   /// If \p I is a memory  terminator like llvm.lifetime.end or free, return a
   /// pair with the MemoryLocation terminated by \p I and a boolean flag
   /// indicating whether \p I is a free-like call.
   std::optional<std::pair<MemoryLocation, bool>>
-  getLocForTerminator(Instruction *I) const {
-    if (auto *CB = dyn_cast<CallBase>(I)) {
-      if (CB->getIntrinsicID() == Intrinsic::lifetime_end)
-        return {
-            std::make_pair(MemoryLocation::getForArgument(CB, 0, &TLI), false)};
-      if (Value *FreedOp = getFreedOperand(CB, &TLI))
-        return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)};
-    }
-
-    return std::nullopt;
-  }
+  getLocForTerminator(Instruction *I) const;
 
   /// Returns true if \p I is a memory terminator instruction like
   /// llvm.lifetime.end or free.
-  bool isMemTerminatorInst(Instruction *I) const {
-    auto *CB = dyn_cast<CallBase>(I);
-    return CB && (CB->getIntrinsicID() == Intrinsic::lifetime_end ||
-                  getFreedOperand(CB, &TLI) != nullptr);
-  }
+  bool isMemTerminatorInst(Instruction *I) const;
 
   /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
   /// instruction \p AccessI.
   bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
-                       Instruction *MaybeTerm) {
-    std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
-        getLocForTerminator(MaybeTerm);
-
-    if (!MaybeTermLoc)
-      return false;
-
-    // If the terminator is a free-like call, all accesses to the underlying
-    // object can be considered terminated.
-    if (getUnderlyingObject(Loc.Ptr) !=
-        getUnderlyingObject(MaybeTermLoc->first.Ptr))
-      return false;
-
-    auto TermLoc = MaybeTermLoc->first;
-    if (MaybeTermLoc->second) {
-      const Value *LocUO = getUnderlyingObject(Loc.Ptr);
-      return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
-    }
-    int64_t InstWriteOffset = 0;
-    int64_t DepWriteOffset = 0;
-    return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
-                       DepWriteOffset) == OW_Complete;
-  }
+                       Instruction *MaybeTerm);
 
   // Returns true if \p Use may read from \p DefLoc.
-  bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
-    if (isNoopIntrinsic(UseInst))
-      return false;
-
-    // Monotonic or weaker atomic stores can be re-ordered and do not need to be
-    // treated as read clobber.
-    if (auto SI = dyn_cast<StoreInst>(UseInst))
-      return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
-
-    if (!UseInst->mayReadFromMemory())
-      return false;
-
-    if (auto *CB = dyn_cast<CallBase>(UseInst))
-      if (CB->onlyAccessesInaccessibleMemory())
-        return false;
-
-    return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
-  }
+  bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst);
 
   /// Returns true if a dependency between \p Current and \p KillingDef is
   /// guaranteed to be loop invariant for the loops that they are in. Either
@@ -1461,36 +1061,12 @@ struct DSEState {
   /// during execution of the containing function.
   bool isGuaranteedLoopIndependent(const Instruction *Current,
                                    const Instruction *KillingDef,
-                                   const MemoryLocation &CurrentLoc) {
-    // If the dependency is within the same block or loop level (being careful
-    // of irreducible loops), we know that AA will return a valid result for the
-    // memory dependency. (Both at the function level, outside of any loop,
-    // would also be valid but we currently disable that to limit compile time).
-    if (Current->getParent() == KillingDef->getParent())
-      return true;
-    const Loop *CurrentLI = LI.getLoopFor(Current->getParent());
-    if (!ContainsIrreducibleLoops && CurrentLI &&
-        CurrentLI == LI.getLoopFor(KillingDef->getParent()))
-      return true;
-    // Otherwise check the memory location is invariant to any loops.
-    return isGuaranteedLoopInvariant(CurrentLoc.Ptr);
-  }
+                                   const MemoryLocation &CurrentLoc);
 
   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
   /// loop. In particular, this guarantees that it only references a single
   /// MemoryLocation during execution of the containing function.
-  bool isGuaranteedLoopInvariant(const Value *Ptr) {
-    Ptr = Ptr->stripPointerCasts();
-    if (auto *GEP = dyn_cast<GEPOperator>(Ptr))
-      if (GEP->hasAllConstantIndices())
-        Ptr = GEP->getPointerOperand()->stripPointerCasts();
-
-    if (auto *I = dyn_cast<Instruction>(Ptr)) {
-      return I->getParent()->isEntryBlock() ||
-             (!ContainsIrreducibleLoops && !LI.getLoopFor(I->getParent()));
-    }
-    return true;
-  }
+  bool isGuaranteedLoopInvariant(const Value *Ptr);
 
   // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess,
   // with no read access between them or on any other path to a function exit
@@ -1503,884 +1079,1375 @@ struct DSEState {
                   const MemoryLocation &KillingLoc, const Value *KillingUndObj,
                   unsigned &ScanLimit, unsigned &WalkerStepLimit,
                   bool IsMemTerm, unsigned &PartialLimit,
-                  bool IsInitializesAttrMemLoc) {
-    if (ScanLimit == 0 || WalkerStepLimit == 0) {
-      LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
-      return std::nullopt;
-    }
+                  bool IsInitializesAttrMemLoc);
 
-    MemoryAccess *Current = StartAccess;
-    Instruction *KillingI = KillingDef->getMemoryInst();
-    LLVM_DEBUG(dbgs() << "  trying to get dominating access\n");
-
-    // Only optimize defining access of KillingDef when directly starting at its
-    // defining access. The defining access also must only access KillingLoc. At
-    // the moment we only support instructions with a single write location, so
-    // it should be sufficient to disable optimizations for instructions that
-    // also read from memory.
-    bool CanOptimize = OptimizeMemorySSA &&
-                       KillingDef->getDefiningAccess() == StartAccess &&
-                       !KillingI->mayReadFromMemory();
-
-    // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
-    std::optional<MemoryLocation> CurrentLoc;
-    for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
-      LLVM_DEBUG({
-        dbgs() << "   visiting " << *Current;
-        if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
-          dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
-                 << ")";
-        dbgs() << "\n";
-      });
-
-      // Reached TOP.
-      if (MSSA.isLiveOnEntryDef(Current)) {
-        LLVM_DEBUG(dbgs() << "   ...  found LiveOnEntryDef\n");
-        if (CanOptimize && Current != KillingDef->getDefiningAccess())
-          // The first clobbering def is... none.
-          KillingDef->setOptimized(Current);
-        return std::nullopt;
-      }
+  /// Delete dead memory defs and recursively add their operands to ToRemove if
+  /// they became dead.
+  void
+  deleteDeadInstruction(Instruction *SI,
+                        SmallPtrSetImpl<MemoryAccess *> *Deleted = nullptr);
 
-      // Cost of a step. Accesses in the same block are more likely to be valid
-      // candidates for elimination, hence consider them cheaper.
-      unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
-                              ? MemorySSASameBBStepCost
-                              : MemorySSAOtherBBStepCost;
-      if (WalkerStepLimit <= StepCost) {
-        LLVM_DEBUG(dbgs() << "   ...  hit walker step limit\n");
-        return std::nullopt;
-      }
-      WalkerStepLimit -= StepCost;
+  // Check for any extra throws between \p KillingI and \p DeadI that block
+  // DSE.  This only checks extra maythrows (those that aren't MemoryDef's).
+  // MemoryDef that may throw are handled during the walk from one def to the
+  // next.
+  bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
+                       const Value *KillingUndObj);
 
-      // Return for MemoryPhis. They cannot be eliminated directly and the
-      // caller is responsible for traversing them.
-      if (isa<MemoryPhi>(Current)) {
-        LLVM_DEBUG(dbgs() << "   ...  found MemoryPhi\n");
-        return Current;
-      }
+  // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following
+  // instructions act as barriers:
+  //  * A memory instruction that may throw and \p KillingI accesses a non-stack
+  //  object.
+  //  * Atomic stores stronger that monotonic.
+  bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI);
 
-      // Below, check if CurrentDef is a valid candidate to be eliminated by
-      // KillingDef. If it is not, check the next candidate.
-      MemoryDef *CurrentDef = cast<MemoryDef>(Current);
-      Instruction *CurrentI = CurrentDef->getMemoryInst();
+  /// Eliminate writes to objects that are not visible in the caller and are not
+  /// accessed before returning from the function.
+  bool eliminateDeadWritesAtEndOfFunction();
 
-      if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
-        CanOptimize = false;
-        continue;
-      }
+  /// If we have a zero initializing memset following a call to malloc,
+  /// try folding it into a call to calloc.
+  bool tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO);
 
-      // Before we try to remove anything, check for any extra throwing
-      // instructions that block us from DSEing
-      if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
-        LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
-        return std::nullopt;
-      }
+  // Check if there is a dominating condition, that implies that the value
+  // being stored in a ptr is already present in the ptr.
+  bool dominatingConditionImpliesValue(MemoryDef *Def);
 
-      // Check for anything that looks like it will be a barrier to further
-      // removal
-      if (isDSEBarrier(KillingUndObj, CurrentI)) {
-        LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
-        return std::nullopt;
-      }
+  /// \returns true if \p Def is a no-op store, either because it
+  /// directly stores back a loaded value or stores zero to a calloced object.
+  bool storeIsNoop(MemoryDef *Def, const Value *DefUO);
 
-      // If Current is known to be on path that reads DefLoc or is a read
-      // clobber, bail out, as the path is not profitable. We skip this check
-      // for intrinsic calls, because the code knows how to handle memcpy
-      // intrinsics.
-      if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI))
-        return std::nullopt;
+  bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL);
 
-      // Quick check if there are direct uses that are read-clobbers.
-      if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) {
-            if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
-              return !MSSA.dominates(StartAccess, UseOrDef) &&
-                     isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
-            return false;
-          })) {
-        LLVM_DEBUG(dbgs() << "   ...  found a read clobber\n");
-        return std::nullopt;
-      }
+  /// Eliminates writes to locations where the value that is being written
+  /// is already stored at the same location.
+  bool eliminateRedundantStoresOfExistingValues();
 
-      // If Current does not have an analyzable write location or is not
-      // removable, skip it.
-      CurrentLoc = getLocForWrite(CurrentI);
-      if (!CurrentLoc || !isRemovable(CurrentI)) {
-        CanOptimize = false;
-        continue;
-      }
+  // Return the locations written by the initializes attribute.
+  // Note that this function considers:
+  // 1. Unwind edge: use "initializes" attribute only if the callee has
+  //    "nounwind" attribute, or the argument has "dead_on_unwind" attribute,
+  //    or the argument is invisible to caller on unwind. That is, we don't
+  //    perform incorrect DSE on unwind edges in the current function.
+  // 2. Argument alias: for aliasing arguments, the "initializes" attribute is
+  //    the intersected range list of their "initializes" attributes.
+  SmallVector<MemoryLocation, 1> getInitializesArgMemLoc(const Instruction *I);
 
-      // AliasAnalysis does not account for loops. Limit elimination to
-      // candidates for which we can guarantee they always store to the same
-      // memory location and not located in 
diff erent loops.
-      if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
-        LLVM_DEBUG(dbgs() << "  ... not guaranteed loop independent\n");
-        CanOptimize = false;
-        continue;
-      }
+  // Try to eliminate dead defs that access `KillingLocWrapper.MemLoc` and are
+  // killed by `KillingLocWrapper.MemDef`. Return whether
+  // any changes were made, and whether `KillingLocWrapper.DefInst` was deleted.
+  std::pair<bool, bool>
+  eliminateDeadDefs(const MemoryLocationWrapper &KillingLocWrapper);
 
-      if (IsMemTerm) {
-        // If the killing def is a memory terminator (e.g. lifetime.end), check
-        // the next candidate if the current Current does not write the same
-        // underlying object as the terminator.
-        if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
-          CanOptimize = false;
-          continue;
-        }
-      } else {
-        int64_t KillingOffset = 0;
-        int64_t DeadOffset = 0;
-        auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
-                              KillingOffset, DeadOffset);
-        if (CanOptimize) {
-          // CurrentDef is the earliest write clobber of KillingDef. Use it as
-          // optimized access. Do not optimize if CurrentDef is already the
-          // defining access of KillingDef.
-          if (CurrentDef != KillingDef->getDefiningAccess() &&
-              (OR == OW_Complete || OR == OW_MaybePartial))
-            KillingDef->setOptimized(CurrentDef);
-
-          // Once a may-aliasing def is encountered do not set an optimized
-          // access.
-          if (OR != OW_None)
-            CanOptimize = false;
-        }
+  // Try to eliminate dead defs killed by `KillingDefWrapper` and return the
+  // change state: whether make any change.
+  bool eliminateDeadDefs(const MemoryDefWrapper &KillingDefWrapper);
+};
 
-        // If Current does not write to the same object as KillingDef, check
-        // the next candidate.
-        if (OR == OW_Unknown || OR == OW_None)
-          continue;
-        else if (OR == OW_MaybePartial) {
-          // If KillingDef only partially overwrites Current, check the next
-          // candidate if the partial step limit is exceeded. This aggressively
-          // limits the number of candidates for partial store elimination,
-          // which are less likely to be removable in the end.
-          if (PartialLimit <= 1) {
-            WalkerStepLimit -= 1;
-            LLVM_DEBUG(dbgs() << "   ... reached partial limit ... continue with next access\n");
-            continue;
-          }
-          PartialLimit -= 1;
-        }
-      }
-      break;
-    };
+} // end anonymous namespace
 
-    // Accesses to objects accessible after the function returns can only be
-    // eliminated if the access is dead along all paths to the exit. Collect
-    // the blocks with killing (=completely overwriting MemoryDefs) and check if
-    // they cover all paths from MaybeDeadAccess to any function exit.
-    SmallPtrSet<Instruction *, 16> KillingDefs;
-    KillingDefs.insert(KillingDef->getMemoryInst());
-    MemoryAccess *MaybeDeadAccess = Current;
-    MemoryLocation MaybeDeadLoc = *CurrentLoc;
-    Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst();
-    LLVM_DEBUG(dbgs() << "  Checking for reads of " << *MaybeDeadAccess << " ("
-                      << *MaybeDeadI << ")\n");
-
-    SmallVector<MemoryAccess *, 32> WorkList;
-    SmallPtrSet<MemoryAccess *, 32> Visited;
-    pushMemUses(MaybeDeadAccess, WorkList, Visited);
-
-    // Check if DeadDef may be read.
-    for (unsigned I = 0; I < WorkList.size(); I++) {
-      MemoryAccess *UseAccess = WorkList[I];
+static void pushMemUses(MemoryAccess *Acc,
+                        SmallVectorImpl<MemoryAccess *> &WorkList,
+                        SmallPtrSetImpl<MemoryAccess *> &Visited) {
+  for (Use &U : Acc->uses()) {
+    auto *MA = cast<MemoryAccess>(U.getUser());
+    if (Visited.insert(MA).second)
+      WorkList.push_back(MA);
+  }
+}
 
-      LLVM_DEBUG(dbgs() << "   " << *UseAccess);
-      // Bail out if the number of accesses to check exceeds the scan limit.
-      if (ScanLimit < (WorkList.size() - I)) {
-        LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
-        return std::nullopt;
-      }
-      --ScanLimit;
-      NumDomMemDefChecks++;
-
-      if (isa<MemoryPhi>(UseAccess)) {
-        if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
-              return DT.properlyDominates(KI->getParent(),
-                                          UseAccess->getBlock());
-            })) {
-          LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
-          continue;
-        }
-        LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
-        pushMemUses(UseAccess, WorkList, Visited);
-        continue;
-      }
+// Return true if "Arg" is function local and isn't captured before "CB".
+static bool isFuncLocalAndNotCaptured(Value *Arg, const CallBase *CB,
+                                      EarliestEscapeAnalysis &EA) {
+  const Value *UnderlyingObj = getUnderlyingObject(Arg);
+  return isIdentifiedFunctionLocal(UnderlyingObj) &&
+         capturesNothing(
+             EA.getCapturesBefore(UnderlyingObj, CB, /*OrAt*/ true));
+}
 
-      Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
-      LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
+DSEState::DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
+                   DominatorTree &DT, PostDominatorTree &PDT,
+                   const TargetLibraryInfo &TLI, const LoopInfo &LI)
+    : F(F), AA(AA), EA(DT, &LI), BatchAA(AA, &EA), MSSA(MSSA), DT(DT), PDT(PDT),
+      TLI(TLI), DL(F.getDataLayout()), LI(LI) {
+  // Collect blocks with throwing instructions not modeled in MemorySSA and
+  // alloc-like objects.
+  unsigned PO = 0;
+  for (BasicBlock *BB : post_order(&F)) {
+    PostOrderNumbers[BB] = PO++;
+    for (Instruction &I : *BB) {
+      MemoryAccess *MA = MSSA.getMemoryAccess(&I);
+      if (I.mayThrow() && !MA)
+        ThrowingBlocks.insert(I.getParent());
+
+      auto *MD = dyn_cast_or_null<MemoryDef>(MA);
+      if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit &&
+          (getLocForWrite(&I) || isMemTerminatorInst(&I) ||
+           (EnableInitializesImprovement && hasInitializesAttr(&I))))
+        MemDefs.push_back(MD);
+    }
+  }
 
-      if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
-            return DT.dominates(KI, UseInst);
-          })) {
-        LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
-        continue;
-      }
+  // Treat byval, inalloca or dead on return arguments the same as Allocas,
+  // stores to them are dead at the end of the function.
+  for (Argument &AI : F.args()) {
+    if (AI.hasPassPointeeByValueCopyAttr()) {
+      InvisibleToCallerAfterRet.insert({&AI, true});
+      continue;
+    }
 
-      // A memory terminator kills all preceeding MemoryDefs and all succeeding
-      // MemoryAccesses. We do not have to check it's users.
-      if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
-        LLVM_DEBUG(
-            dbgs()
-            << " ... skipping, memterminator invalidates following accesses\n");
-        continue;
-      }
+    if (!AI.getType()->isPointerTy())
+      continue;
 
-      if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
-        LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
-        pushMemUses(UseAccess, WorkList, Visited);
-        continue;
-      }
+    const DeadOnReturnInfo &Info = AI.getDeadOnReturnInfo();
+    if (Info.coversAllReachableMemory())
+      InvisibleToCallerAfterRet.insert({&AI, true});
+    else if (uint64_t DeadBytes = Info.getNumberOfDeadBytes())
+      InvisibleToCallerAfterRetBounded.insert({&AI, DeadBytes});
+  }
 
-      if (UseInst->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
-        LLVM_DEBUG(dbgs() << "  ... found throwing instruction\n");
-        return std::nullopt;
-      }
+  // Collect whether there is any irreducible control flow in the function.
+  ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
 
-      // Uses which may read the original MemoryDef mean we cannot eliminate the
-      // original MD. Stop walk.
-      // If KillingDef is a CallInst with "initializes" attribute, the reads in
-      // the callee would be dominated by initializations, so it should be safe.
-      bool IsKillingDefFromInitAttr = false;
-      if (IsInitializesAttrMemLoc) {
-        if (KillingI == UseInst &&
-            KillingUndObj == getUnderlyingObject(MaybeDeadLoc.Ptr))
-          IsKillingDefFromInitAttr = true;
-      }
+  AnyUnreachableExit = any_of(PDT.roots(), [](const BasicBlock *E) {
+    return isa<UnreachableInst>(E->getTerminator());
+  });
+}
 
-      if (isReadClobber(MaybeDeadLoc, UseInst) && !IsKillingDefFromInitAttr) {
-        LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
-        return std::nullopt;
-      }
+LocationSize DSEState::strengthenLocationSize(const Instruction *I,
+                                              LocationSize Size) const {
+  if (auto *CB = dyn_cast<CallBase>(I)) {
+    LibFunc F;
+    if (TLI.getLibFunc(*CB, F) && TLI.has(F) &&
+        (F == LibFunc_memset_chk || F == LibFunc_memcpy_chk)) {
+      // Use the precise location size specified by the 3rd argument
+      // for determining KillingI overwrites DeadLoc if it is a memset_chk
+      // instruction. memset_chk will write either the amount specified as 3rd
+      // argument or the function will immediately abort and exit the program.
+      // NOTE: AA may determine NoAlias if it can prove that the access size
+      // is larger than the allocation size due to that being UB. To avoid
+      // returning potentially invalid NoAlias results by AA, limit the use of
+      // the precise location size to isOverwrite.
+      if (const auto *Len = dyn_cast<ConstantInt>(CB->getArgOperand(2)))
+        return LocationSize::precise(Len->getZExtValue());
+    }
+  }
+  return Size;
+}
 
-      // If this worklist walks back to the original memory access (and the
-      // pointer is not guarenteed loop invariant) then we cannot assume that a
-      // store kills itself.
-      if (MaybeDeadAccess == UseAccess &&
-          !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) {
-        LLVM_DEBUG(dbgs() << "    ... found not loop invariant self access\n");
-        return std::nullopt;
-      }
-      // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check
-      // if it reads the memory location.
-      // TODO: It would probably be better to check for self-reads before
-      // calling the function.
-      if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
-        LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
-        continue;
-      }
+OverwriteResult DSEState::isOverwrite(const Instruction *KillingI,
+                                      const Instruction *DeadI,
+                                      const MemoryLocation &KillingLoc,
+                                      const MemoryLocation &DeadLoc,
+                                      int64_t &KillingOff, int64_t &DeadOff) {
+  // AliasAnalysis does not always account for loops. Limit overwrite checks
+  // to dependencies for which we can guarantee they are independent of any
+  // loops they are in.
+  if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
+    return OW_Unknown;
 
-      // Check all uses for MemoryDefs, except for defs completely overwriting
-      // the original location. Otherwise we have to check uses of *all*
-      // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
-      // miss cases like the following
-      //   1 = Def(LoE) ; <----- DeadDef stores [0,1]
-      //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
-      //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
-      //                  (The Use points to the *first* Def it may alias)
-      //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
-      //                  stores [0,1]
-      if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
-        if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
-          BasicBlock *MaybeKillingBlock = UseInst->getParent();
-          if (PostOrderNumbers.find(MaybeKillingBlock)->second <
-              PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) {
-            if (!isInvisibleToCallerAfterRet(KillingUndObj, KillingLoc.Ptr,
-                                             KillingLoc.Size)) {
-              LLVM_DEBUG(dbgs()
-                         << "    ... found killing def " << *UseInst << "\n");
-              KillingDefs.insert(UseInst);
-            }
-          } else {
-            LLVM_DEBUG(dbgs()
-                       << "    ... found preceeding def " << *UseInst << "\n");
-            return std::nullopt;
-          }
-        } else
-          pushMemUses(UseDef, WorkList, Visited);
-      }
+  LocationSize KillingLocSize =
+      strengthenLocationSize(KillingI, KillingLoc.Size);
+  const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts();
+  const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts();
+  const Value *DeadUndObj = getUnderlyingObject(DeadPtr);
+  const Value *KillingUndObj = getUnderlyingObject(KillingPtr);
+
+  // Check whether the killing store overwrites the whole object, in which
+  // case the size/offset of the dead store does not matter.
+  if (DeadUndObj == KillingUndObj && KillingLocSize.isPrecise() &&
+      isIdentifiedObject(KillingUndObj)) {
+    std::optional<TypeSize> KillingUndObjSize =
+        getPointerSize(KillingUndObj, DL, TLI, &F);
+    if (KillingUndObjSize && *KillingUndObjSize == KillingLocSize.getValue())
+      return OW_Complete;
+  }
+
+  // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
+  // get imprecise values here, though (except for unknown sizes).
+  if (!KillingLocSize.isPrecise() || !DeadLoc.Size.isPrecise()) {
+    // In case no constant size is known, try to an IR values for the number
+    // of bytes written and check if they match.
+    const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI);
+    const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI);
+    if (KillingMemI && DeadMemI) {
+      const Value *KillingV = KillingMemI->getLength();
+      const Value *DeadV = DeadMemI->getLength();
+      if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc))
+        return OW_Complete;
     }
 
-    // For accesses to locations visible after the function returns, make sure
-    // that the location is dead (=overwritten) along all paths from
-    // MaybeDeadAccess to the exit.
-    if (!isInvisibleToCallerAfterRet(KillingUndObj, KillingLoc.Ptr,
-                                     KillingLoc.Size)) {
-      SmallPtrSet<BasicBlock *, 16> KillingBlocks;
-      for (Instruction *KD : KillingDefs)
-        KillingBlocks.insert(KD->getParent());
-      assert(!KillingBlocks.empty() &&
-             "Expected at least a single killing block");
-
-      // Find the common post-dominator of all killing blocks.
-      BasicBlock *CommonPred = *KillingBlocks.begin();
-      for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) {
-        if (!CommonPred)
-          break;
-        CommonPred = PDT.findNearestCommonDominator(CommonPred, BB);
-      }
+    // Masked stores have imprecise locations, but we can reason about them
+    // to some extent.
+    return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA);
+  }
 
-      // If the common post-dominator does not post-dominate MaybeDeadAccess,
-      // there is a path from MaybeDeadAccess to an exit not going through a
-      // killing block.
-      if (!PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) {
-        if (!AnyUnreachableExit)
-          return std::nullopt;
+  const TypeSize KillingSize = KillingLocSize.getValue();
+  const TypeSize DeadSize = DeadLoc.Size.getValue();
+  // Bail on doing Size comparison which depends on AA for now
+  // TODO: Remove AnyScalable once Alias Analysis deal with scalable vectors
+  const bool AnyScalable = DeadSize.isScalable() || KillingLocSize.isScalable();
 
-        // Fall back to CFG scan starting at all non-unreachable roots if not
-        // all paths to the exit go through CommonPred.
-        CommonPred = nullptr;
-      }
+  if (AnyScalable)
+    return OW_Unknown;
+  // Query the alias information
+  AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
+
+  // If the start pointers are the same, we just have to compare sizes to see if
+  // the killing store was larger than the dead store.
+  if (AAR == AliasResult::MustAlias) {
+    // Make sure that the KillingSize size is >= the DeadSize size.
+    if (KillingSize >= DeadSize)
+      return OW_Complete;
+  }
 
-      // If CommonPred itself is in the set of killing blocks, we're done.
-      if (KillingBlocks.count(CommonPred))
-        return {MaybeDeadAccess};
-
-      SetVector<BasicBlock *> WorkList;
-      // If CommonPred is null, there are multiple exits from the function.
-      // They all have to be added to the worklist.
-      if (CommonPred)
-        WorkList.insert(CommonPred);
-      else
-        for (BasicBlock *R : PDT.roots()) {
-          if (!isa<UnreachableInst>(R->getTerminator()))
-            WorkList.insert(R);
-        }
+  // If we hit a partial alias we may have a full overwrite
+  if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
+    int32_t Off = AAR.getOffset();
+    if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
+      return OW_Complete;
+  }
 
-      NumCFGTries++;
-      // Check if all paths starting from an exit node go through one of the
-      // killing blocks before reaching MaybeDeadAccess.
-      for (unsigned I = 0; I < WorkList.size(); I++) {
-        NumCFGChecks++;
-        BasicBlock *Current = WorkList[I];
-        if (KillingBlocks.count(Current))
-          continue;
-        if (Current == MaybeDeadAccess->getBlock())
-          return std::nullopt;
+  // If we can't resolve the same pointers to the same object, then we can't
+  // analyze them at all.
+  if (DeadUndObj != KillingUndObj) {
+    // Non aliasing stores to 
diff erent objects don't overlap. Note that
+    // if the killing store is known to overwrite whole object (out of
+    // bounds access overwrites whole object as well) then it is assumed to
+    // completely overwrite any store to the same object even if they don't
+    // actually alias (see next check).
+    if (AAR == AliasResult::NoAlias)
+      return OW_None;
+    return OW_Unknown;
+  }
 
-        // MaybeDeadAccess is reachable from the entry, so we don't have to
-        // explore unreachable blocks further.
-        if (!DT.isReachableFromEntry(Current))
-          continue;
+  // Okay, we have stores to two completely 
diff erent pointers.  Try to
+  // decompose the pointer into a "base + constant_offset" form.  If the base
+  // pointers are equal, then we can reason about the two stores.
+  DeadOff = 0;
+  KillingOff = 0;
+  const Value *DeadBasePtr =
+      GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL);
+  const Value *KillingBasePtr =
+      GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL);
+
+  // If the base pointers still 
diff er, we have two completely 
diff erent
+  // stores.
+  if (DeadBasePtr != KillingBasePtr)
+    return OW_Unknown;
 
-        WorkList.insert_range(predecessors(Current));
+  // The killing access completely overlaps the dead store if and only if
+  // both start and end of the dead one is "inside" the killing one:
+  //    |<->|--dead--|<->|
+  //    |-----killing------|
+  // Accesses may overlap if and only if start of one of them is "inside"
+  // another one:
+  //    |<->|--dead--|<-------->|
+  //    |-------killing--------|
+  //           OR
+  //    |-------dead-------|
+  //    |<->|---killing---|<----->|
+  //
+  // We have to be careful here as *Off is signed while *.Size is unsigned.
 
-        if (WorkList.size() >= MemorySSAPathCheckLimit)
-          return std::nullopt;
-      }
-      NumCFGSuccess++;
-    }
+  // Check if the dead access starts "not before" the killing one.
+  if (DeadOff >= KillingOff) {
+    // If the dead access ends "not after" the killing access then the
+    // dead one is completely overwritten by the killing one.
+    if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
+      return OW_Complete;
+    // If start of the dead access is "before" end of the killing access
+    // then accesses overlap.
+    else if ((uint64_t)(DeadOff - KillingOff) < KillingSize)
+      return OW_MaybePartial;
+  }
+  // If start of the killing access is "before" end of the dead access then
+  // accesses overlap.
+  else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) {
+    return OW_MaybePartial;
+  }
 
-    // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is
-    // potentially dead.
-    return {MaybeDeadAccess};
+  // Can reach here only if accesses are known not to overlap.
+  return OW_None;
+}
+
+bool DSEState::isInvisibleToCallerAfterRet(const Value *V, const Value *Ptr,
+                                           const LocationSize StoreSize) {
+  if (isa<AllocaInst>(V))
+    return true;
+
+  auto IBounded = InvisibleToCallerAfterRetBounded.find(V);
+  if (IBounded != InvisibleToCallerAfterRetBounded.end()) {
+    int64_t ValueOffset;
+    [[maybe_unused]] const Value *BaseValue =
+        GetPointerBaseWithConstantOffset(Ptr, ValueOffset, DL);
+    // If we are not able to find a constant offset from the UO, we have to
+    // pessimistically assume that the store writes to memory out of the
+    // dead_on_return bounds.
+    if (BaseValue != V)
+      return false;
+    // This store is only invisible after return if we are in bounds of the
+    // range marked dead.
+    if (StoreSize.hasValue() &&
+        ValueOffset + StoreSize.getValue() <= IBounded->second &&
+        ValueOffset >= 0)
+      return true;
   }
+  auto I = InvisibleToCallerAfterRet.insert({V, false});
+  if (I.second && isInvisibleToCallerOnUnwind(V) && isNoAliasCall(V))
+    I.first->second = capturesNothing(PointerMayBeCaptured(
+        V, /*ReturnCaptures=*/true, CaptureComponents::Provenance));
+  return I.first->second;
+}
 
-  /// Delete dead memory defs and recursively add their operands to ToRemove if
-  /// they became dead.
-  void
-  deleteDeadInstruction(Instruction *SI,
-                        SmallPtrSetImpl<MemoryAccess *> *Deleted = nullptr) {
-    MemorySSAUpdater Updater(&MSSA);
-    SmallVector<Instruction *, 32> NowDeadInsts;
-    NowDeadInsts.push_back(SI);
-    --NumFastOther;
-
-    while (!NowDeadInsts.empty()) {
-      Instruction *DeadInst = NowDeadInsts.pop_back_val();
-      ++NumFastOther;
-
-      // Try to preserve debug information attached to the dead instruction.
-      salvageDebugInfo(*DeadInst);
-      salvageKnowledge(DeadInst);
-
-      // Remove the Instruction from MSSA.
-      MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst);
-      bool IsMemDef = MA && isa<MemoryDef>(MA);
-      if (MA) {
-        if (IsMemDef) {
-          auto *MD = cast<MemoryDef>(MA);
-          SkipStores.insert(MD);
-          if (Deleted)
-            Deleted->insert(MD);
-          if (auto *SI = dyn_cast<StoreInst>(MD->getMemoryInst())) {
-            if (SI->getValueOperand()->getType()->isPointerTy()) {
-              const Value *UO = getUnderlyingObject(SI->getValueOperand());
-              if (CapturedBeforeReturn.erase(UO))
-                ShouldIterateEndOfFunctionDSE = true;
-              InvisibleToCallerAfterRet.erase(UO);
-              InvisibleToCallerAfterRetBounded.erase(UO);
-            }
-          }
-        }
+bool DSEState::isInvisibleToCallerOnUnwind(const Value *V) {
+  bool RequiresNoCaptureBeforeUnwind;
+  if (!isNotVisibleOnUnwind(V, RequiresNoCaptureBeforeUnwind))
+    return false;
+  if (!RequiresNoCaptureBeforeUnwind)
+    return true;
 
-        Updater.removeMemoryAccess(MA);
-      }
+  auto I = CapturedBeforeReturn.insert({V, true});
+  if (I.second)
+    // NOTE: This could be made more precise by PointerMayBeCapturedBefore
+    // with the killing MemoryDef. But we refrain from doing so for now to
+    // limit compile-time and this does not cause any changes to the number
+    // of stores removed on a large test set in practice.
+    I.first->second = capturesAnything(PointerMayBeCaptured(
+        V, /*ReturnCaptures=*/false, CaptureComponents::Provenance));
+  return !I.first->second;
+}
 
-      auto I = IOLs.find(DeadInst->getParent());
-      if (I != IOLs.end())
-        I->second.erase(DeadInst);
-      // Remove its operands
-      for (Use &O : DeadInst->operands())
-        if (Instruction *OpI = dyn_cast<Instruction>(O)) {
-          O.set(PoisonValue::get(O->getType()));
-          if (isInstructionTriviallyDead(OpI, &TLI))
-            NowDeadInsts.push_back(OpI);
-        }
+std::optional<MemoryLocation> DSEState::getLocForWrite(Instruction *I) const {
+  if (!I->mayWriteToMemory())
+    return std::nullopt;
 
-      EA.removeInstruction(DeadInst);
-      // Remove memory defs directly if they don't produce results, but only
-      // queue other dead instructions for later removal. They may have been
-      // used as memory locations that have been cached by BatchAA. Removing
-      // them here may lead to newly created instructions to be allocated at the
-      // same address, yielding stale cache entries.
-      if (IsMemDef && DeadInst->getType()->isVoidTy())
-        DeadInst->eraseFromParent();
-      else
-        ToRemove.push_back(DeadInst);
-    }
-  }
+  if (auto *CB = dyn_cast<CallBase>(I))
+    return MemoryLocation::getForDest(CB, TLI);
 
-  // Check for any extra throws between \p KillingI and \p DeadI that block
-  // DSE.  This only checks extra maythrows (those that aren't MemoryDef's).
-  // MemoryDef that may throw are handled during the walk from one def to the
-  // next.
-  bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
-                       const Value *KillingUndObj) {
-    // First see if we can ignore it by using the fact that KillingI is an
-    // alloca/alloca like object that is not visible to the caller during
-    // execution of the function.
-    if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
-      return false;
+  return MemoryLocation::getOrNone(I);
+}
 
-    if (KillingI->getParent() == DeadI->getParent())
-      return ThrowingBlocks.count(KillingI->getParent());
-    return !ThrowingBlocks.empty();
+SmallVector<std::pair<MemoryLocation, bool>, 1>
+DSEState::getLocForInst(Instruction *I, bool ConsiderInitializesAttr) {
+  SmallVector<std::pair<MemoryLocation, bool>, 1> Locations;
+  if (isMemTerminatorInst(I)) {
+    if (auto Loc = getLocForTerminator(I))
+      Locations.push_back(std::make_pair(Loc->first, false));
+    return Locations;
   }
 
-  // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following
-  // instructions act as barriers:
-  //  * A memory instruction that may throw and \p KillingI accesses a non-stack
-  //  object.
-  //  * Atomic stores stronger that monotonic.
-  bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) {
-    // If DeadI may throw it acts as a barrier, unless we are to an
-    // alloca/alloca like object that does not escape.
-    if (DeadI->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
-      return true;
+  if (auto Loc = getLocForWrite(I))
+    Locations.push_back(std::make_pair(*Loc, false));
 
-    // If DeadI is an atomic load/store stronger than monotonic, do not try to
-    // eliminate/reorder it.
-    if (DeadI->isAtomic()) {
-      if (auto *LI = dyn_cast<LoadInst>(DeadI))
-        return isStrongerThanMonotonic(LI->getOrdering());
-      if (auto *SI = dyn_cast<StoreInst>(DeadI))
-        return isStrongerThanMonotonic(SI->getOrdering());
-      if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI))
-        return isStrongerThanMonotonic(ARMW->getOrdering());
-      if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI))
-        return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
-               isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
-      llvm_unreachable("other instructions should be skipped in MemorySSA");
+  if (ConsiderInitializesAttr) {
+    for (auto &MemLoc : getInitializesArgMemLoc(I)) {
+      Locations.push_back(std::make_pair(MemLoc, true));
     }
-    return false;
   }
+  return Locations;
+}
 
-  /// Eliminate writes to objects that are not visible in the caller and are not
-  /// accessed before returning from the function.
-  bool eliminateDeadWritesAtEndOfFunction() {
-    bool MadeChange = false;
-    LLVM_DEBUG(
-        dbgs()
-        << "Trying to eliminate MemoryDefs at the end of the function\n");
-    do {
-      ShouldIterateEndOfFunctionDSE = false;
-      for (MemoryDef *Def : llvm::reverse(MemDefs)) {
-        if (SkipStores.contains(Def))
-          continue;
+bool DSEState::isRemovable(Instruction *I) {
+  assert(getLocForWrite(I) && "Must have analyzable write");
 
-        Instruction *DefI = Def->getMemoryInst();
-        auto DefLoc = getLocForWrite(DefI);
-        if (!DefLoc || !isRemovable(DefI)) {
-          LLVM_DEBUG(dbgs() << "  ... could not get location for write or "
-                               "instruction not removable.\n");
-          continue;
-        }
+  // Don't remove volatile/atomic stores.
+  if (StoreInst *SI = dyn_cast<StoreInst>(I))
+    return SI->isUnordered();
 
-        // NOTE: Currently eliminating writes at the end of a function is
-        // limited to MemoryDefs with a single underlying object, to save
-        // compile-time. In practice it appears the case with multiple
-        // underlying objects is very uncommon. If it turns out to be important,
-        // we can use getUnderlyingObjects here instead.
-        const Value *UO = getUnderlyingObject(DefLoc->Ptr);
-        if (!isInvisibleToCallerAfterRet(UO, DefLoc->Ptr, DefLoc->Size))
-          continue;
+  if (auto *CB = dyn_cast<CallBase>(I)) {
+    // Don't remove volatile memory intrinsics.
+    if (auto *MI = dyn_cast<MemIntrinsic>(CB))
+      return !MI->isVolatile();
 
-        if (isWriteAtEndOfFunction(Def, *DefLoc)) {
-          // See through pointer-to-pointer bitcasts
-          LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
-                               "of the function\n");
-          deleteDeadInstruction(DefI);
-          ++NumFastStores;
-          MadeChange = true;
-        }
-      }
-    } while (ShouldIterateEndOfFunctionDSE);
-    return MadeChange;
+    // Never remove dead lifetime intrinsics, e.g. because they are followed
+    // by a free.
+    if (CB->isLifetimeStartOrEnd())
+      return false;
+
+    return CB->use_empty() && CB->willReturn() && CB->doesNotThrow() &&
+           !CB->isTerminator();
   }
 
-  /// If we have a zero initializing memset following a call to malloc,
-  /// try folding it into a call to calloc.
-  bool tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO) {
-    Instruction *DefI = Def->getMemoryInst();
-    MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
-    if (!MemSet)
-      // TODO: Could handle zero store to small allocation as well.
-      return false;
-    Constant *StoredConstant = dyn_cast<Constant>(MemSet->getValue());
-    if (!StoredConstant || !StoredConstant->isNullValue())
-      return false;
+  return false;
+}
 
-    if (!isRemovable(DefI))
-      // The memset might be volatile..
-      return false;
+bool DSEState::isCompleteOverwrite(const MemoryLocation &DefLoc,
+                                   Instruction *DefInst, Instruction *UseInst) {
+  // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
+  // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
+  // MemoryDef.
+  if (!UseInst->mayWriteToMemory())
+    return false;
 
-    if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
-        F.hasFnAttribute(Attribute::SanitizeAddress) ||
-        F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
-        F.getName() == "calloc")
-      return false;
-    auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUO));
-    if (!Malloc)
-      return false;
-    auto *InnerCallee = Malloc->getCalledFunction();
-    if (!InnerCallee)
+  if (auto *CB = dyn_cast<CallBase>(UseInst))
+    if (CB->onlyAccessesInaccessibleMemory())
       return false;
-    LibFunc Func = NotLibFunc;
-    StringRef ZeroedVariantName;
-    if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
-        Func != LibFunc_malloc) {
-      Attribute Attr = Malloc->getFnAttr("alloc-variant-zeroed");
-      if (!Attr.isValid())
-        return false;
-      ZeroedVariantName = Attr.getValueAsString();
-      if (ZeroedVariantName.empty())
-        return false;
-    }
 
-    // Gracefully handle malloc with unexpected memory attributes.
-    auto *MallocDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(Malloc));
-    if (!MallocDef)
+  int64_t InstWriteOffset, DepWriteOffset;
+  if (auto CC = getLocForWrite(UseInst))
+    return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
+                       DepWriteOffset) == OW_Complete;
+  return false;
+}
+
+bool DSEState::isWriteAtEndOfFunction(MemoryDef *Def,
+                                      const MemoryLocation &DefLoc) {
+  LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
+                    << *Def->getMemoryInst()
+                    << ") is at the end the function \n");
+  SmallVector<MemoryAccess *, 4> WorkList;
+  SmallPtrSet<MemoryAccess *, 8> Visited;
+
+  pushMemUses(Def, WorkList, Visited);
+  for (unsigned I = 0; I < WorkList.size(); I++) {
+    if (WorkList.size() >= MemorySSAScanLimit) {
+      LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
       return false;
+    }
 
-    auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) {
-      // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
-      // of malloc block
-      auto *MallocBB = Malloc->getParent(),
-        *MemsetBB = Memset->getParent();
-      if (MallocBB == MemsetBB)
-        return true;
-      auto *Ptr = Memset->getArgOperand(0);
-      auto *TI = MallocBB->getTerminator();
-      BasicBlock *TrueBB, *FalseBB;
-      if (!match(TI, m_Br(m_SpecificICmp(ICmpInst::ICMP_EQ, m_Specific(Ptr),
-                                         m_Zero()),
-                          TrueBB, FalseBB)))
-        return false;
-      if (MemsetBB != FalseBB)
+    MemoryAccess *UseAccess = WorkList[I];
+    if (isa<MemoryPhi>(UseAccess)) {
+      // AliasAnalysis does not account for loops. Limit elimination to
+      // candidates for which we can guarantee they always store to the same
+      // memory location.
+      if (!isGuaranteedLoopInvariant(DefLoc.Ptr))
         return false;
-      return true;
-    };
 
-    if (Malloc->getOperand(0) != MemSet->getLength())
-      return false;
-    if (!shouldCreateCalloc(Malloc, MemSet) || !DT.dominates(Malloc, MemSet) ||
-        !memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT))
-      return false;
-    IRBuilder<> IRB(Malloc);
-    assert(Func == LibFunc_malloc || !ZeroedVariantName.empty());
-    Value *Calloc = nullptr;
-    if (!ZeroedVariantName.empty()) {
-      LLVMContext &Ctx = Malloc->getContext();
-      AttributeList Attrs = InnerCallee->getAttributes();
-      AllocFnKind AllocKind =
-          Attrs.getFnAttr(Attribute::AllocKind).getAllocKind() |
-          AllocFnKind::Zeroed;
-      AllocKind &= ~AllocFnKind::Uninitialized;
-      Attrs =
-          Attrs.addFnAttribute(Ctx, Attribute::getWithAllocKind(Ctx, AllocKind))
-              .removeFnAttribute(Ctx, "alloc-variant-zeroed");
-      FunctionCallee ZeroedVariant = Malloc->getModule()->getOrInsertFunction(
-          ZeroedVariantName, InnerCallee->getFunctionType(), Attrs);
-      cast<Function>(ZeroedVariant.getCallee())
-          ->setCallingConv(Malloc->getCallingConv());
-      SmallVector<Value *, 3> Args;
-      Args.append(Malloc->arg_begin(), Malloc->arg_end());
-      CallInst *CI = IRB.CreateCall(ZeroedVariant, Args, ZeroedVariantName);
-      CI->setCallingConv(Malloc->getCallingConv());
-      Calloc = CI;
-    } else {
-      Type *SizeTTy = Malloc->getArgOperand(0)->getType();
-      Calloc =
-          emitCalloc(ConstantInt::get(SizeTTy, 1), Malloc->getArgOperand(0),
-                     IRB, TLI, Malloc->getType()->getPointerAddressSpace());
+      pushMemUses(cast<MemoryPhi>(UseAccess), WorkList, Visited);
+      continue;
     }
-    if (!Calloc)
+    // TODO: Checking for aliasing is expensive. Consider reducing the amount
+    // of times this is called and/or caching it.
+    Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
+    if (isReadClobber(DefLoc, UseInst)) {
+      LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
       return false;
+    }
 
-    MemorySSAUpdater Updater(&MSSA);
-    auto *NewAccess =
-      Updater.createMemoryAccessAfter(cast<Instruction>(Calloc), nullptr,
-                                      MallocDef);
-    auto *NewAccessMD = cast<MemoryDef>(NewAccess);
-    Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
-    Malloc->replaceAllUsesWith(Calloc);
-    deleteDeadInstruction(Malloc);
-    return true;
+    if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
+      pushMemUses(UseDef, WorkList, Visited);
   }
+  return true;
+}
 
-  // Check if there is a dominating condition, that implies that the value
-  // being stored in a ptr is already present in the ptr.
-  bool dominatingConditionImpliesValue(MemoryDef *Def) {
-    auto *StoreI = cast<StoreInst>(Def->getMemoryInst());
-    BasicBlock *StoreBB = StoreI->getParent();
-    Value *StorePtr = StoreI->getPointerOperand();
-    Value *StoreVal = StoreI->getValueOperand();
-
-    DomTreeNode *IDom = DT.getNode(StoreBB)->getIDom();
-    if (!IDom)
-      return false;
+std::optional<std::pair<MemoryLocation, bool>>
+DSEState::getLocForTerminator(Instruction *I) const {
+  if (auto *CB = dyn_cast<CallBase>(I)) {
+    if (CB->getIntrinsicID() == Intrinsic::lifetime_end)
+      return {
+          std::make_pair(MemoryLocation::getForArgument(CB, 0, &TLI), false)};
+    if (Value *FreedOp = getFreedOperand(CB, &TLI))
+      return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)};
+  }
 
-    auto *BI = dyn_cast<BranchInst>(IDom->getBlock()->getTerminator());
-    if (!BI || !BI->isConditional())
-      return false;
+  return std::nullopt;
+}
 
-    // In case both blocks are the same, it is not possible to determine
-    // if optimization is possible. (We would not want to optimize a store
-    // in the FalseBB if condition is true and vice versa.)
-    if (BI->getSuccessor(0) == BI->getSuccessor(1))
-      return false;
+bool DSEState::isMemTerminatorInst(Instruction *I) const {
+  auto *CB = dyn_cast<CallBase>(I);
+  return CB && (CB->getIntrinsicID() == Intrinsic::lifetime_end ||
+                getFreedOperand(CB, &TLI) != nullptr);
+}
 
-    Instruction *ICmpL;
-    CmpPredicate Pred;
-    if (!match(BI->getCondition(),
-               m_c_ICmp(Pred,
-                        m_CombineAnd(m_Load(m_Specific(StorePtr)),
-                                     m_Instruction(ICmpL)),
-                        m_Specific(StoreVal))) ||
-        !ICmpInst::isEquality(Pred))
-      return false;
+bool DSEState::isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
+                               Instruction *MaybeTerm) {
+  std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
+      getLocForTerminator(MaybeTerm);
 
-    // In case the else blocks also branches to the if block or the other way
-    // around it is not possible to determine if the optimization is possible.
-    if (Pred == ICmpInst::ICMP_EQ &&
-        !DT.dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(0)),
-                      StoreBB))
-      return false;
+  if (!MaybeTermLoc)
+    return false;
+
+  // If the terminator is a free-like call, all accesses to the underlying
+  // object can be considered terminated.
+  if (getUnderlyingObject(Loc.Ptr) !=
+      getUnderlyingObject(MaybeTermLoc->first.Ptr))
+    return false;
+
+  auto TermLoc = MaybeTermLoc->first;
+  if (MaybeTermLoc->second) {
+    const Value *LocUO = getUnderlyingObject(Loc.Ptr);
+    return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
+  }
+  int64_t InstWriteOffset = 0;
+  int64_t DepWriteOffset = 0;
+  return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
+                     DepWriteOffset) == OW_Complete;
+}
+
+bool DSEState::isReadClobber(const MemoryLocation &DefLoc,
+                             Instruction *UseInst) {
+  if (isNoopIntrinsic(UseInst))
+    return false;
+
+  // Monotonic or weaker atomic stores can be re-ordered and do not need to be
+  // treated as read clobber.
+  if (auto SI = dyn_cast<StoreInst>(UseInst))
+    return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
+
+  if (!UseInst->mayReadFromMemory())
+    return false;
 
-    if (Pred == ICmpInst::ICMP_NE &&
-        !DT.dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(1)),
-                      StoreBB))
+  if (auto *CB = dyn_cast<CallBase>(UseInst))
+    if (CB->onlyAccessesInaccessibleMemory())
       return false;
 
-    MemoryAccess *LoadAcc = MSSA.getMemoryAccess(ICmpL);
-    MemoryAccess *ClobAcc =
-        MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def, BatchAA);
+  return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
+}
+
+bool DSEState::isGuaranteedLoopIndependent(const Instruction *Current,
+                                           const Instruction *KillingDef,
+                                           const MemoryLocation &CurrentLoc) {
+  // If the dependency is within the same block or loop level (being careful
+  // of irreducible loops), we know that AA will return a valid result for the
+  // memory dependency. (Both at the function level, outside of any loop,
+  // would also be valid but we currently disable that to limit compile time).
+  if (Current->getParent() == KillingDef->getParent())
+    return true;
+  const Loop *CurrentLI = LI.getLoopFor(Current->getParent());
+  if (!ContainsIrreducibleLoops && CurrentLI &&
+      CurrentLI == LI.getLoopFor(KillingDef->getParent()))
+    return true;
+  // Otherwise check the memory location is invariant to any loops.
+  return isGuaranteedLoopInvariant(CurrentLoc.Ptr);
+}
 
-    return MSSA.dominates(ClobAcc, LoadAcc);
+bool DSEState::isGuaranteedLoopInvariant(const Value *Ptr) {
+  Ptr = Ptr->stripPointerCasts();
+  if (auto *GEP = dyn_cast<GEPOperator>(Ptr))
+    if (GEP->hasAllConstantIndices())
+      Ptr = GEP->getPointerOperand()->stripPointerCasts();
+
+  if (auto *I = dyn_cast<Instruction>(Ptr)) {
+    return I->getParent()->isEntryBlock() ||
+           (!ContainsIrreducibleLoops && !LI.getLoopFor(I->getParent()));
   }
+  return true;
+}
 
-  /// \returns true if \p Def is a no-op store, either because it
-  /// directly stores back a loaded value or stores zero to a calloced object.
-  bool storeIsNoop(MemoryDef *Def, const Value *DefUO) {
-    Instruction *DefI = Def->getMemoryInst();
-    StoreInst *Store = dyn_cast<StoreInst>(DefI);
-    MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
-    Constant *StoredConstant = nullptr;
-    if (Store)
-      StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
-    else if (MemSet)
-      StoredConstant = dyn_cast<Constant>(MemSet->getValue());
-    else
-      return false;
+std::optional<MemoryAccess *> DSEState::getDomMemoryDef(
+    MemoryDef *KillingDef, MemoryAccess *StartAccess,
+    const MemoryLocation &KillingLoc, const Value *KillingUndObj,
+    unsigned &ScanLimit, unsigned &WalkerStepLimit, bool IsMemTerm,
+    unsigned &PartialLimit, bool IsInitializesAttrMemLoc) {
+  if (ScanLimit == 0 || WalkerStepLimit == 0) {
+    LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
+    return std::nullopt;
+  }
 
-    if (!isRemovable(DefI))
-      return false;
+  MemoryAccess *Current = StartAccess;
+  Instruction *KillingI = KillingDef->getMemoryInst();
+  LLVM_DEBUG(dbgs() << "  trying to get dominating access\n");
+
+  // Only optimize defining access of KillingDef when directly starting at its
+  // defining access. The defining access also must only access KillingLoc. At
+  // the moment we only support instructions with a single write location, so
+  // it should be sufficient to disable optimizations for instructions that
+  // also read from memory.
+  bool CanOptimize = OptimizeMemorySSA &&
+                     KillingDef->getDefiningAccess() == StartAccess &&
+                     !KillingI->mayReadFromMemory();
+
+  // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
+  std::optional<MemoryLocation> CurrentLoc;
+  for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
+    LLVM_DEBUG({
+      dbgs() << "   visiting " << *Current;
+      if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
+        dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
+               << ")";
+      dbgs() << "\n";
+    });
+
+    // Reached TOP.
+    if (MSSA.isLiveOnEntryDef(Current)) {
+      LLVM_DEBUG(dbgs() << "   ...  found LiveOnEntryDef\n");
+      if (CanOptimize && Current != KillingDef->getDefiningAccess())
+        // The first clobbering def is... none.
+        KillingDef->setOptimized(Current);
+      return std::nullopt;
+    }
 
-    if (StoredConstant) {
-      Constant *InitC =
-          getInitialValueOfAllocation(DefUO, &TLI, StoredConstant->getType());
-      // If the clobbering access is LiveOnEntry, no instructions between them
-      // can modify the memory location.
-      if (InitC && InitC == StoredConstant)
-        return MSSA.isLiveOnEntryDef(
-            MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def, BatchAA));
+    // Cost of a step. Accesses in the same block are more likely to be valid
+    // candidates for elimination, hence consider them cheaper.
+    unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
+                            ? MemorySSASameBBStepCost
+                            : MemorySSAOtherBBStepCost;
+    if (WalkerStepLimit <= StepCost) {
+      LLVM_DEBUG(dbgs() << "   ...  hit walker step limit\n");
+      return std::nullopt;
     }
+    WalkerStepLimit -= StepCost;
 
-    if (!Store)
-      return false;
+    // Return for MemoryPhis. They cannot be eliminated directly and the
+    // caller is responsible for traversing them.
+    if (isa<MemoryPhi>(Current)) {
+      LLVM_DEBUG(dbgs() << "   ...  found MemoryPhi\n");
+      return Current;
+    }
 
-    if (dominatingConditionImpliesValue(Def))
-      return true;
+    // Below, check if CurrentDef is a valid candidate to be eliminated by
+    // KillingDef. If it is not, check the next candidate.
+    MemoryDef *CurrentDef = cast<MemoryDef>(Current);
+    Instruction *CurrentI = CurrentDef->getMemoryInst();
 
-    if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
-      if (LoadI->getPointerOperand() == Store->getOperand(1)) {
-        // Get the defining access for the load.
-        auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
-        // Fast path: the defining accesses are the same.
-        if (LoadAccess == Def->getDefiningAccess())
-          return true;
-
-        // Look through phi accesses. Recursively scan all phi accesses by
-        // adding them to a worklist. Bail when we run into a memory def that
-        // does not match LoadAccess.
-        SetVector<MemoryAccess *> ToCheck;
-        MemoryAccess *Current =
-            MSSA.getWalker()->getClobberingMemoryAccess(Def, BatchAA);
-        // We don't want to bail when we run into the store memory def. But,
-        // the phi access may point to it. So, pretend like we've already
-        // checked it.
-        ToCheck.insert(Def);
-        ToCheck.insert(Current);
-        // Start at current (1) to simulate already having checked Def.
-        for (unsigned I = 1; I < ToCheck.size(); ++I) {
-          Current = ToCheck[I];
-          if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
-            // Check all the operands.
-            for (auto &Use : PhiAccess->incoming_values())
-              ToCheck.insert(cast<MemoryAccess>(&Use));
-            continue;
-          }
+    if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
+      CanOptimize = false;
+      continue;
+    }
 
-          // If we found a memory def, bail. This happens when we have an
-          // unrelated write in between an otherwise noop store.
-          assert(isa<MemoryDef>(Current) &&
-                 "Only MemoryDefs should reach here.");
-          // TODO: Skip no alias MemoryDefs that have no aliasing reads.
-          // We are searching for the definition of the store's destination.
-          // So, if that is the same definition as the load, then this is a
-          // noop. Otherwise, fail.
-          if (LoadAccess != Current)
-            return false;
+    // Before we try to remove anything, check for any extra throwing
+    // instructions that block us from DSEing
+    if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
+      LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
+      return std::nullopt;
+    }
+
+    // Check for anything that looks like it will be a barrier to further
+    // removal
+    if (isDSEBarrier(KillingUndObj, CurrentI)) {
+      LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
+      return std::nullopt;
+    }
+
+    // If Current is known to be on path that reads DefLoc or is a read
+    // clobber, bail out, as the path is not profitable. We skip this check
+    // for intrinsic calls, because the code knows how to handle memcpy
+    // intrinsics.
+    if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI))
+      return std::nullopt;
+
+    // Quick check if there are direct uses that are read-clobbers.
+    if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) {
+          if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
+            return !MSSA.dominates(StartAccess, UseOrDef) &&
+                   isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
+          return false;
+        })) {
+      LLVM_DEBUG(dbgs() << "   ...  found a read clobber\n");
+      return std::nullopt;
+    }
+
+    // If Current does not have an analyzable write location or is not
+    // removable, skip it.
+    CurrentLoc = getLocForWrite(CurrentI);
+    if (!CurrentLoc || !isRemovable(CurrentI)) {
+      CanOptimize = false;
+      continue;
+    }
+
+    // AliasAnalysis does not account for loops. Limit elimination to
+    // candidates for which we can guarantee they always store to the same
+    // memory location and not located in 
diff erent loops.
+    if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
+      LLVM_DEBUG(dbgs() << "  ... not guaranteed loop independent\n");
+      CanOptimize = false;
+      continue;
+    }
+
+    if (IsMemTerm) {
+      // If the killing def is a memory terminator (e.g. lifetime.end), check
+      // the next candidate if the current Current does not write the same
+      // underlying object as the terminator.
+      if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
+        CanOptimize = false;
+        continue;
+      }
+    } else {
+      int64_t KillingOffset = 0;
+      int64_t DeadOffset = 0;
+      auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
+                            KillingOffset, DeadOffset);
+      if (CanOptimize) {
+        // CurrentDef is the earliest write clobber of KillingDef. Use it as
+        // optimized access. Do not optimize if CurrentDef is already the
+        // defining access of KillingDef.
+        if (CurrentDef != KillingDef->getDefiningAccess() &&
+            (OR == OW_Complete || OR == OW_MaybePartial))
+          KillingDef->setOptimized(CurrentDef);
+
+        // Once a may-aliasing def is encountered do not set an optimized
+        // access.
+        if (OR != OW_None)
+          CanOptimize = false;
+      }
+
+      // If Current does not write to the same object as KillingDef, check
+      // the next candidate.
+      if (OR == OW_Unknown || OR == OW_None)
+        continue;
+      else if (OR == OW_MaybePartial) {
+        // If KillingDef only partially overwrites Current, check the next
+        // candidate if the partial step limit is exceeded. This aggressively
+        // limits the number of candidates for partial store elimination,
+        // which are less likely to be removable in the end.
+        if (PartialLimit <= 1) {
+          WalkerStepLimit -= 1;
+          LLVM_DEBUG(dbgs() << "   ... reached partial limit ... continue with "
+                               "next access\n");
+          continue;
         }
-        return true;
+        PartialLimit -= 1;
       }
     }
+    break;
+  };
 
-    return false;
-  }
+  // Accesses to objects accessible after the function returns can only be
+  // eliminated if the access is dead along all paths to the exit. Collect
+  // the blocks with killing (=completely overwriting MemoryDefs) and check if
+  // they cover all paths from MaybeDeadAccess to any function exit.
+  SmallPtrSet<Instruction *, 16> KillingDefs;
+  KillingDefs.insert(KillingDef->getMemoryInst());
+  MemoryAccess *MaybeDeadAccess = Current;
+  MemoryLocation MaybeDeadLoc = *CurrentLoc;
+  Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst();
+  LLVM_DEBUG(dbgs() << "  Checking for reads of " << *MaybeDeadAccess << " ("
+                    << *MaybeDeadI << ")\n");
+
+  SmallVector<MemoryAccess *, 32> WorkList;
+  SmallPtrSet<MemoryAccess *, 32> Visited;
+  pushMemUses(MaybeDeadAccess, WorkList, Visited);
+
+  // Check if DeadDef may be read.
+  for (unsigned I = 0; I < WorkList.size(); I++) {
+    MemoryAccess *UseAccess = WorkList[I];
+
+    LLVM_DEBUG(dbgs() << "   " << *UseAccess);
+    // Bail out if the number of accesses to check exceeds the scan limit.
+    if (ScanLimit < (WorkList.size() - I)) {
+      LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
+      return std::nullopt;
+    }
+    --ScanLimit;
+    NumDomMemDefChecks++;
 
-  bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) {
-    bool Changed = false;
-    for (auto OI : IOL) {
-      Instruction *DeadI = OI.first;
-      MemoryLocation Loc = *getLocForWrite(DeadI);
-      assert(isRemovable(DeadI) && "Expect only removable instruction");
-
-      const Value *Ptr = Loc.Ptr->stripPointerCasts();
-      int64_t DeadStart = 0;
-      uint64_t DeadSize = Loc.Size.getValue();
-      GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL);
-      OverlapIntervalsTy &IntervalMap = OI.second;
-      Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize);
-      if (IntervalMap.empty())
+    if (isa<MemoryPhi>(UseAccess)) {
+      if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
+            return DT.properlyDominates(KI->getParent(), UseAccess->getBlock());
+          })) {
+        LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
         continue;
-      Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize);
+      }
+      LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
+      pushMemUses(UseAccess, WorkList, Visited);
+      continue;
+    }
+
+    Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
+    LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
+
+    if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
+          return DT.dominates(KI, UseInst);
+        })) {
+      LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
+      continue;
+    }
+
+    // A memory terminator kills all preceeding MemoryDefs and all succeeding
+    // MemoryAccesses. We do not have to check it's users.
+    if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
+      LLVM_DEBUG(
+          dbgs()
+          << " ... skipping, memterminator invalidates following accesses\n");
+      continue;
+    }
+
+    if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
+      LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
+      pushMemUses(UseAccess, WorkList, Visited);
+      continue;
+    }
+
+    if (UseInst->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
+      LLVM_DEBUG(dbgs() << "  ... found throwing instruction\n");
+      return std::nullopt;
+    }
+
+    // Uses which may read the original MemoryDef mean we cannot eliminate the
+    // original MD. Stop walk.
+    // If KillingDef is a CallInst with "initializes" attribute, the reads in
+    // the callee would be dominated by initializations, so it should be safe.
+    bool IsKillingDefFromInitAttr = false;
+    if (IsInitializesAttrMemLoc) {
+      if (KillingI == UseInst &&
+          KillingUndObj == getUnderlyingObject(MaybeDeadLoc.Ptr))
+        IsKillingDefFromInitAttr = true;
+    }
+
+    if (isReadClobber(MaybeDeadLoc, UseInst) && !IsKillingDefFromInitAttr) {
+      LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
+      return std::nullopt;
+    }
+
+    // If this worklist walks back to the original memory access (and the
+    // pointer is not guarenteed loop invariant) then we cannot assume that a
+    // store kills itself.
+    if (MaybeDeadAccess == UseAccess &&
+        !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) {
+      LLVM_DEBUG(dbgs() << "    ... found not loop invariant self access\n");
+      return std::nullopt;
+    }
+    // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check
+    // if it reads the memory location.
+    // TODO: It would probably be better to check for self-reads before
+    // calling the function.
+    if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
+      LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
+      continue;
+    }
+
+    // Check all uses for MemoryDefs, except for defs completely overwriting
+    // the original location. Otherwise we have to check uses of *all*
+    // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
+    // miss cases like the following
+    //   1 = Def(LoE) ; <----- DeadDef stores [0,1]
+    //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
+    //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
+    //                  (The Use points to the *first* Def it may alias)
+    //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
+    //                  stores [0,1]
+    if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
+      if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
+        BasicBlock *MaybeKillingBlock = UseInst->getParent();
+        if (PostOrderNumbers.find(MaybeKillingBlock)->second <
+            PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) {
+          if (!isInvisibleToCallerAfterRet(KillingUndObj, KillingLoc.Ptr,
+                                           KillingLoc.Size)) {
+            LLVM_DEBUG(dbgs()
+                       << "    ... found killing def " << *UseInst << "\n");
+            KillingDefs.insert(UseInst);
+          }
+        } else {
+          LLVM_DEBUG(dbgs()
+                     << "    ... found preceeding def " << *UseInst << "\n");
+          return std::nullopt;
+        }
+      } else
+        pushMemUses(UseDef, WorkList, Visited);
     }
-    return Changed;
   }
 
-  /// Eliminates writes to locations where the value that is being written
-  /// is already stored at the same location.
-  bool eliminateRedundantStoresOfExistingValues() {
-    bool MadeChange = false;
-    LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the "
-                         "already existing value\n");
-    for (auto *Def : MemDefs) {
-      if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def))
-        continue;
+  // For accesses to locations visible after the function returns, make sure
+  // that the location is dead (=overwritten) along all paths from
+  // MaybeDeadAccess to the exit.
+  if (!isInvisibleToCallerAfterRet(KillingUndObj, KillingLoc.Ptr,
+                                   KillingLoc.Size)) {
+    SmallPtrSet<BasicBlock *, 16> KillingBlocks;
+    for (Instruction *KD : KillingDefs)
+      KillingBlocks.insert(KD->getParent());
+    assert(!KillingBlocks.empty() &&
+           "Expected at least a single killing block");
+
+    // Find the common post-dominator of all killing blocks.
+    BasicBlock *CommonPred = *KillingBlocks.begin();
+    for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) {
+      if (!CommonPred)
+        break;
+      CommonPred = PDT.findNearestCommonDominator(CommonPred, BB);
+    }
 
-      Instruction *DefInst = Def->getMemoryInst();
-      auto MaybeDefLoc = getLocForWrite(DefInst);
-      if (!MaybeDefLoc || !isRemovable(DefInst))
+    // If the common post-dominator does not post-dominate MaybeDeadAccess,
+    // there is a path from MaybeDeadAccess to an exit not going through a
+    // killing block.
+    if (!PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) {
+      if (!AnyUnreachableExit)
+        return std::nullopt;
+
+      // Fall back to CFG scan starting at all non-unreachable roots if not
+      // all paths to the exit go through CommonPred.
+      CommonPred = nullptr;
+    }
+
+    // If CommonPred itself is in the set of killing blocks, we're done.
+    if (KillingBlocks.count(CommonPred))
+      return {MaybeDeadAccess};
+
+    SetVector<BasicBlock *> WorkList;
+    // If CommonPred is null, there are multiple exits from the function.
+    // They all have to be added to the worklist.
+    if (CommonPred)
+      WorkList.insert(CommonPred);
+    else
+      for (BasicBlock *R : PDT.roots()) {
+        if (!isa<UnreachableInst>(R->getTerminator()))
+          WorkList.insert(R);
+      }
+
+    NumCFGTries++;
+    // Check if all paths starting from an exit node go through one of the
+    // killing blocks before reaching MaybeDeadAccess.
+    for (unsigned I = 0; I < WorkList.size(); I++) {
+      NumCFGChecks++;
+      BasicBlock *Current = WorkList[I];
+      if (KillingBlocks.count(Current))
         continue;
+      if (Current == MaybeDeadAccess->getBlock())
+        return std::nullopt;
 
-      MemoryDef *UpperDef;
-      // To conserve compile-time, we avoid walking to the next clobbering def.
-      // Instead, we just try to get the optimized access, if it exists. DSE
-      // will try to optimize defs during the earlier traversal.
-      if (Def->isOptimized())
-        UpperDef = dyn_cast<MemoryDef>(Def->getOptimized());
-      else
-        UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess());
-      if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef))
+      // MaybeDeadAccess is reachable from the entry, so we don't have to
+      // explore unreachable blocks further.
+      if (!DT.isReachableFromEntry(Current))
         continue;
 
-      Instruction *UpperInst = UpperDef->getMemoryInst();
-      auto IsRedundantStore = [&]() {
-        // We don't care about 
diff erences in call attributes here.
-        if (DefInst->isIdenticalToWhenDefined(UpperInst,
-                                              /*IntersectAttrs=*/true))
-          return true;
-        if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) {
-          if (auto *SI = dyn_cast<StoreInst>(DefInst)) {
-            // MemSetInst must have a write location.
-            auto UpperLoc = getLocForWrite(UpperInst);
-            if (!UpperLoc)
-              return false;
-            int64_t InstWriteOffset = 0;
-            int64_t DepWriteOffset = 0;
-            auto OR = isOverwrite(UpperInst, DefInst, *UpperLoc, *MaybeDefLoc,
-                                  InstWriteOffset, DepWriteOffset);
-            Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL);
-            return StoredByte && StoredByte == MemSetI->getOperand(1) &&
-                   OR == OW_Complete;
+      WorkList.insert_range(predecessors(Current));
+
+      if (WorkList.size() >= MemorySSAPathCheckLimit)
+        return std::nullopt;
+    }
+    NumCFGSuccess++;
+  }
+
+  // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is
+  // potentially dead.
+  return {MaybeDeadAccess};
+}
+
+void DSEState::deleteDeadInstruction(Instruction *SI,
+                                     SmallPtrSetImpl<MemoryAccess *> *Deleted) {
+  MemorySSAUpdater Updater(&MSSA);
+  SmallVector<Instruction *, 32> NowDeadInsts;
+  NowDeadInsts.push_back(SI);
+  --NumFastOther;
+
+  while (!NowDeadInsts.empty()) {
+    Instruction *DeadInst = NowDeadInsts.pop_back_val();
+    ++NumFastOther;
+
+    // Try to preserve debug information attached to the dead instruction.
+    salvageDebugInfo(*DeadInst);
+    salvageKnowledge(DeadInst);
+
+    // Remove the Instruction from MSSA.
+    MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst);
+    bool IsMemDef = MA && isa<MemoryDef>(MA);
+    if (MA) {
+      if (IsMemDef) {
+        auto *MD = cast<MemoryDef>(MA);
+        SkipStores.insert(MD);
+        if (Deleted)
+          Deleted->insert(MD);
+        if (auto *SI = dyn_cast<StoreInst>(MD->getMemoryInst())) {
+          if (SI->getValueOperand()->getType()->isPointerTy()) {
+            const Value *UO = getUnderlyingObject(SI->getValueOperand());
+            if (CapturedBeforeReturn.erase(UO))
+              ShouldIterateEndOfFunctionDSE = true;
+            InvisibleToCallerAfterRet.erase(UO);
+            InvisibleToCallerAfterRetBounded.erase(UO);
           }
         }
-        return false;
-      };
+      }
 
-      if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst))
+      Updater.removeMemoryAccess(MA);
+    }
+
+    auto I = IOLs.find(DeadInst->getParent());
+    if (I != IOLs.end())
+      I->second.erase(DeadInst);
+    // Remove its operands
+    for (Use &O : DeadInst->operands())
+      if (Instruction *OpI = dyn_cast<Instruction>(O)) {
+        O.set(PoisonValue::get(O->getType()));
+        if (isInstructionTriviallyDead(OpI, &TLI))
+          NowDeadInsts.push_back(OpI);
+      }
+
+    EA.removeInstruction(DeadInst);
+    // Remove memory defs directly if they don't produce results, but only
+    // queue other dead instructions for later removal. They may have been
+    // used as memory locations that have been cached by BatchAA. Removing
+    // them here may lead to newly created instructions to be allocated at the
+    // same address, yielding stale cache entries.
+    if (IsMemDef && DeadInst->getType()->isVoidTy())
+      DeadInst->eraseFromParent();
+    else
+      ToRemove.push_back(DeadInst);
+  }
+}
+
+bool DSEState::mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
+                               const Value *KillingUndObj) {
+  // First see if we can ignore it by using the fact that KillingI is an
+  // alloca/alloca like object that is not visible to the caller during
+  // execution of the function.
+  if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
+    return false;
+
+  if (KillingI->getParent() == DeadI->getParent())
+    return ThrowingBlocks.count(KillingI->getParent());
+  return !ThrowingBlocks.empty();
+}
+
+bool DSEState::isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) {
+  // If DeadI may throw it acts as a barrier, unless we are to an
+  // alloca/alloca like object that does not escape.
+  if (DeadI->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
+    return true;
+
+  // If DeadI is an atomic load/store stronger than monotonic, do not try to
+  // eliminate/reorder it.
+  if (DeadI->isAtomic()) {
+    if (auto *LI = dyn_cast<LoadInst>(DeadI))
+      return isStrongerThanMonotonic(LI->getOrdering());
+    if (auto *SI = dyn_cast<StoreInst>(DeadI))
+      return isStrongerThanMonotonic(SI->getOrdering());
+    if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI))
+      return isStrongerThanMonotonic(ARMW->getOrdering());
+    if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI))
+      return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
+             isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
+    llvm_unreachable("other instructions should be skipped in MemorySSA");
+  }
+  return false;
+}
+
+bool DSEState::eliminateDeadWritesAtEndOfFunction() {
+  bool MadeChange = false;
+  LLVM_DEBUG(
+      dbgs() << "Trying to eliminate MemoryDefs at the end of the function\n");
+  do {
+    ShouldIterateEndOfFunctionDSE = false;
+    for (MemoryDef *Def : llvm::reverse(MemDefs)) {
+      if (SkipStores.contains(Def))
         continue;
-      LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *DefInst
-                        << '\n');
-      deleteDeadInstruction(DefInst);
-      NumRedundantStores++;
-      MadeChange = true;
+
+      Instruction *DefI = Def->getMemoryInst();
+      auto DefLoc = getLocForWrite(DefI);
+      if (!DefLoc || !isRemovable(DefI)) {
+        LLVM_DEBUG(dbgs() << "  ... could not get location for write or "
+                             "instruction not removable.\n");
+        continue;
+      }
+
+      // NOTE: Currently eliminating writes at the end of a function is
+      // limited to MemoryDefs with a single underlying object, to save
+      // compile-time. In practice it appears the case with multiple
+      // underlying objects is very uncommon. If it turns out to be important,
+      // we can use getUnderlyingObjects here instead.
+      const Value *UO = getUnderlyingObject(DefLoc->Ptr);
+      if (!isInvisibleToCallerAfterRet(UO, DefLoc->Ptr, DefLoc->Size))
+        continue;
+
+      if (isWriteAtEndOfFunction(Def, *DefLoc)) {
+        // See through pointer-to-pointer bitcasts
+        LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
+                             "of the function\n");
+        deleteDeadInstruction(DefI);
+        ++NumFastStores;
+        MadeChange = true;
+      }
     }
-    return MadeChange;
+  } while (ShouldIterateEndOfFunctionDSE);
+  return MadeChange;
+}
+
+bool DSEState::tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO) {
+  Instruction *DefI = Def->getMemoryInst();
+  MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
+  if (!MemSet)
+    // TODO: Could handle zero store to small allocation as well.
+    return false;
+  Constant *StoredConstant = dyn_cast<Constant>(MemSet->getValue());
+  if (!StoredConstant || !StoredConstant->isNullValue())
+    return false;
+
+  if (!isRemovable(DefI))
+    // The memset might be volatile..
+    return false;
+
+  if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
+      F.hasFnAttribute(Attribute::SanitizeAddress) ||
+      F.hasFnAttribute(Attribute::SanitizeHWAddress) || F.getName() == "calloc")
+    return false;
+  auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUO));
+  if (!Malloc)
+    return false;
+  auto *InnerCallee = Malloc->getCalledFunction();
+  if (!InnerCallee)
+    return false;
+  LibFunc Func = NotLibFunc;
+  StringRef ZeroedVariantName;
+  if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
+      Func != LibFunc_malloc) {
+    Attribute Attr = Malloc->getFnAttr("alloc-variant-zeroed");
+    if (!Attr.isValid())
+      return false;
+    ZeroedVariantName = Attr.getValueAsString();
+    if (ZeroedVariantName.empty())
+      return false;
   }
 
-  // Return the locations written by the initializes attribute.
-  // Note that this function considers:
-  // 1. Unwind edge: use "initializes" attribute only if the callee has
-  //    "nounwind" attribute, or the argument has "dead_on_unwind" attribute,
-  //    or the argument is invisible to caller on unwind. That is, we don't
-  //    perform incorrect DSE on unwind edges in the current function.
-  // 2. Argument alias: for aliasing arguments, the "initializes" attribute is
-  //    the intersected range list of their "initializes" attributes.
-  SmallVector<MemoryLocation, 1> getInitializesArgMemLoc(const Instruction *I);
+  // Gracefully handle malloc with unexpected memory attributes.
+  auto *MallocDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(Malloc));
+  if (!MallocDef)
+    return false;
 
-  // Try to eliminate dead defs that access `KillingLocWrapper.MemLoc` and are
-  // killed by `KillingLocWrapper.MemDef`. Return whether
-  // any changes were made, and whether `KillingLocWrapper.DefInst` was deleted.
-  std::pair<bool, bool>
-  eliminateDeadDefs(const MemoryLocationWrapper &KillingLocWrapper);
+  auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) {
+    // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
+    // of malloc block
+    auto *MallocBB = Malloc->getParent(), *MemsetBB = Memset->getParent();
+    if (MallocBB == MemsetBB)
+      return true;
+    auto *Ptr = Memset->getArgOperand(0);
+    auto *TI = MallocBB->getTerminator();
+    BasicBlock *TrueBB, *FalseBB;
+    if (!match(TI, m_Br(m_SpecificICmp(ICmpInst::ICMP_EQ, m_Specific(Ptr),
+                                       m_Zero()),
+                        TrueBB, FalseBB)))
+      return false;
+    if (MemsetBB != FalseBB)
+      return false;
+    return true;
+  };
 
-  // Try to eliminate dead defs killed by `KillingDefWrapper` and return the
-  // change state: whether make any change.
-  bool eliminateDeadDefs(const MemoryDefWrapper &KillingDefWrapper);
-};
-} // namespace
+  if (Malloc->getOperand(0) != MemSet->getLength())
+    return false;
+  if (!shouldCreateCalloc(Malloc, MemSet) || !DT.dominates(Malloc, MemSet) ||
+      !memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT))
+    return false;
+  IRBuilder<> IRB(Malloc);
+  assert(Func == LibFunc_malloc || !ZeroedVariantName.empty());
+  Value *Calloc = nullptr;
+  if (!ZeroedVariantName.empty()) {
+    LLVMContext &Ctx = Malloc->getContext();
+    AttributeList Attrs = InnerCallee->getAttributes();
+    AllocFnKind AllocKind =
+        Attrs.getFnAttr(Attribute::AllocKind).getAllocKind() |
+        AllocFnKind::Zeroed;
+    AllocKind &= ~AllocFnKind::Uninitialized;
+    Attrs =
+        Attrs.addFnAttribute(Ctx, Attribute::getWithAllocKind(Ctx, AllocKind))
+            .removeFnAttribute(Ctx, "alloc-variant-zeroed");
+    FunctionCallee ZeroedVariant = Malloc->getModule()->getOrInsertFunction(
+        ZeroedVariantName, InnerCallee->getFunctionType(), Attrs);
+    cast<Function>(ZeroedVariant.getCallee())
+        ->setCallingConv(Malloc->getCallingConv());
+    SmallVector<Value *, 3> Args;
+    Args.append(Malloc->arg_begin(), Malloc->arg_end());
+    CallInst *CI = IRB.CreateCall(ZeroedVariant, Args, ZeroedVariantName);
+    CI->setCallingConv(Malloc->getCallingConv());
+    Calloc = CI;
+  } else {
+    Type *SizeTTy = Malloc->getArgOperand(0)->getType();
+    Calloc = emitCalloc(ConstantInt::get(SizeTTy, 1), Malloc->getArgOperand(0),
+                        IRB, TLI, Malloc->getType()->getPointerAddressSpace());
+  }
+  if (!Calloc)
+    return false;
 
-// Return true if "Arg" is function local and isn't captured before "CB".
-static bool isFuncLocalAndNotCaptured(Value *Arg, const CallBase *CB,
-                                      EarliestEscapeAnalysis &EA) {
-  const Value *UnderlyingObj = getUnderlyingObject(Arg);
-  return isIdentifiedFunctionLocal(UnderlyingObj) &&
-         capturesNothing(
-             EA.getCapturesBefore(UnderlyingObj, CB, /*OrAt*/ true));
+  MemorySSAUpdater Updater(&MSSA);
+  auto *NewAccess = Updater.createMemoryAccessAfter(cast<Instruction>(Calloc),
+                                                    nullptr, MallocDef);
+  auto *NewAccessMD = cast<MemoryDef>(NewAccess);
+  Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
+  Malloc->replaceAllUsesWith(Calloc);
+  deleteDeadInstruction(Malloc);
+  return true;
+}
+
+bool DSEState::dominatingConditionImpliesValue(MemoryDef *Def) {
+  auto *StoreI = cast<StoreInst>(Def->getMemoryInst());
+  BasicBlock *StoreBB = StoreI->getParent();
+  Value *StorePtr = StoreI->getPointerOperand();
+  Value *StoreVal = StoreI->getValueOperand();
+
+  DomTreeNode *IDom = DT.getNode(StoreBB)->getIDom();
+  if (!IDom)
+    return false;
+
+  auto *BI = dyn_cast<BranchInst>(IDom->getBlock()->getTerminator());
+  if (!BI || !BI->isConditional())
+    return false;
+
+  // In case both blocks are the same, it is not possible to determine
+  // if optimization is possible. (We would not want to optimize a store
+  // in the FalseBB if condition is true and vice versa.)
+  if (BI->getSuccessor(0) == BI->getSuccessor(1))
+    return false;
+
+  Instruction *ICmpL;
+  CmpPredicate Pred;
+  if (!match(BI->getCondition(),
+             m_c_ICmp(Pred,
+                      m_CombineAnd(m_Load(m_Specific(StorePtr)),
+                                   m_Instruction(ICmpL)),
+                      m_Specific(StoreVal))) ||
+      !ICmpInst::isEquality(Pred))
+    return false;
+
+  // In case the else blocks also branches to the if block or the other way
+  // around it is not possible to determine if the optimization is possible.
+  if (Pred == ICmpInst::ICMP_EQ &&
+      !DT.dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(0)),
+                    StoreBB))
+    return false;
+
+  if (Pred == ICmpInst::ICMP_NE &&
+      !DT.dominates(BasicBlockEdge(BI->getParent(), BI->getSuccessor(1)),
+                    StoreBB))
+    return false;
+
+  MemoryAccess *LoadAcc = MSSA.getMemoryAccess(ICmpL);
+  MemoryAccess *ClobAcc =
+      MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def, BatchAA);
+
+  return MSSA.dominates(ClobAcc, LoadAcc);
+}
+
+bool DSEState::storeIsNoop(MemoryDef *Def, const Value *DefUO) {
+  Instruction *DefI = Def->getMemoryInst();
+  StoreInst *Store = dyn_cast<StoreInst>(DefI);
+  MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
+  Constant *StoredConstant = nullptr;
+  if (Store)
+    StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
+  else if (MemSet)
+    StoredConstant = dyn_cast<Constant>(MemSet->getValue());
+  else
+    return false;
+
+  if (!isRemovable(DefI))
+    return false;
+
+  if (StoredConstant) {
+    Constant *InitC =
+        getInitialValueOfAllocation(DefUO, &TLI, StoredConstant->getType());
+    // If the clobbering access is LiveOnEntry, no instructions between them
+    // can modify the memory location.
+    if (InitC && InitC == StoredConstant)
+      return MSSA.isLiveOnEntryDef(
+          MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def, BatchAA));
+  }
+
+  if (!Store)
+    return false;
+
+  if (dominatingConditionImpliesValue(Def))
+    return true;
+
+  if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
+    if (LoadI->getPointerOperand() == Store->getOperand(1)) {
+      // Get the defining access for the load.
+      auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
+      // Fast path: the defining accesses are the same.
+      if (LoadAccess == Def->getDefiningAccess())
+        return true;
+
+      // Look through phi accesses. Recursively scan all phi accesses by
+      // adding them to a worklist. Bail when we run into a memory def that
+      // does not match LoadAccess.
+      SetVector<MemoryAccess *> ToCheck;
+      MemoryAccess *Current =
+          MSSA.getWalker()->getClobberingMemoryAccess(Def, BatchAA);
+      // We don't want to bail when we run into the store memory def. But,
+      // the phi access may point to it. So, pretend like we've already
+      // checked it.
+      ToCheck.insert(Def);
+      ToCheck.insert(Current);
+      // Start at current (1) to simulate already having checked Def.
+      for (unsigned I = 1; I < ToCheck.size(); ++I) {
+        Current = ToCheck[I];
+        if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
+          // Check all the operands.
+          for (auto &Use : PhiAccess->incoming_values())
+            ToCheck.insert(cast<MemoryAccess>(&Use));
+          continue;
+        }
+
+        // If we found a memory def, bail. This happens when we have an
+        // unrelated write in between an otherwise noop store.
+        assert(isa<MemoryDef>(Current) && "Only MemoryDefs should reach here.");
+        // TODO: Skip no alias MemoryDefs that have no aliasing reads.
+        // We are searching for the definition of the store's destination.
+        // So, if that is the same definition as the load, then this is a
+        // noop. Otherwise, fail.
+        if (LoadAccess != Current)
+          return false;
+      }
+      return true;
+    }
+  }
+
+  return false;
+}
+
+bool DSEState::removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) {
+  bool Changed = false;
+  for (auto OI : IOL) {
+    Instruction *DeadI = OI.first;
+    MemoryLocation Loc = *getLocForWrite(DeadI);
+    assert(isRemovable(DeadI) && "Expect only removable instruction");
+
+    const Value *Ptr = Loc.Ptr->stripPointerCasts();
+    int64_t DeadStart = 0;
+    uint64_t DeadSize = Loc.Size.getValue();
+    GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL);
+    OverlapIntervalsTy &IntervalMap = OI.second;
+    Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize);
+    if (IntervalMap.empty())
+      continue;
+    Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize);
+  }
+  return Changed;
+}
+
+bool DSEState::eliminateRedundantStoresOfExistingValues() {
+  bool MadeChange = false;
+  LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the "
+                       "already existing value\n");
+  for (auto *Def : MemDefs) {
+    if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def))
+      continue;
+
+    Instruction *DefInst = Def->getMemoryInst();
+    auto MaybeDefLoc = getLocForWrite(DefInst);
+    if (!MaybeDefLoc || !isRemovable(DefInst))
+      continue;
+
+    MemoryDef *UpperDef;
+    // To conserve compile-time, we avoid walking to the next clobbering def.
+    // Instead, we just try to get the optimized access, if it exists. DSE
+    // will try to optimize defs during the earlier traversal.
+    if (Def->isOptimized())
+      UpperDef = dyn_cast<MemoryDef>(Def->getOptimized());
+    else
+      UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess());
+    if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef))
+      continue;
+
+    Instruction *UpperInst = UpperDef->getMemoryInst();
+    auto IsRedundantStore = [&]() {
+      // We don't care about 
diff erences in call attributes here.
+      if (DefInst->isIdenticalToWhenDefined(UpperInst,
+                                            /*IntersectAttrs=*/true))
+        return true;
+      if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) {
+        if (auto *SI = dyn_cast<StoreInst>(DefInst)) {
+          // MemSetInst must have a write location.
+          auto UpperLoc = getLocForWrite(UpperInst);
+          if (!UpperLoc)
+            return false;
+          int64_t InstWriteOffset = 0;
+          int64_t DepWriteOffset = 0;
+          auto OR = isOverwrite(UpperInst, DefInst, *UpperLoc, *MaybeDefLoc,
+                                InstWriteOffset, DepWriteOffset);
+          Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL);
+          return StoredByte && StoredByte == MemSetI->getOperand(1) &&
+                 OR == OW_Complete;
+        }
+      }
+      return false;
+    };
+
+    if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst))
+      continue;
+    LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *DefInst
+                      << '\n');
+    deleteDeadInstruction(DefInst);
+    NumRedundantStores++;
+    MadeChange = true;
+  }
+  return MadeChange;
 }
 
 SmallVector<MemoryLocation, 1>

diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 72400e1055427..d58b5561aeeb2 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -374,6 +374,11 @@ cl::opt<bool> llvm::VPlanPrintAfterAll(
 cl::list<std::string> llvm::VPlanPrintAfterPasses(
     "vplan-print-after", cl::Hidden,
     cl::desc("Print VPlans after specified VPlan transformations (regexp)."));
+
+cl::opt<bool> llvm::VPlanPrintVectorRegionScope(
+    "vplan-print-vector-region-scope", cl::init(false), cl::Hidden,
+    cl::desc("Limit VPlan printing to vector loop region in "
+             "`-vplan-print-after*` if the plan has one."));
 #endif
 
 // This flag enables the stress testing of the VPlan H-CFG construction in the
@@ -8231,7 +8236,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(
   // ---------------------------------------------------------------------------
   // Predicate and linearize the top-level loop region.
   // ---------------------------------------------------------------------------
-  VPlanTransforms::introduceMasksAndLinearize(*Plan, CM.foldTailByMasking());
+  RUN_VPLAN_PASS_NO_VERIFY(VPlanTransforms::introduceMasksAndLinearize, *Plan,
+                           CM.foldTailByMasking());
 
   // ---------------------------------------------------------------------------
   // Construct wide recipes and apply predication for original scalar

diff  --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index bc9fe1eb81416..3304282068b55 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -2065,7 +2065,9 @@ static bool simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF,
   VPBasicBlock *ExitingVPBB = VectorRegion->getExitingBasicBlock();
   auto *Term = &ExitingVPBB->back();
   VPValue *Cond;
-  if (match(Term, m_BranchOnCount()) ||
+  if (match(Term,
+            m_BranchOnCount(m_Add(m_VPValue(), m_Specific(&Plan.getVFxUF())),
+                            m_VPValue())) ||
       match(Term, m_BranchOnCond(m_Not(m_ActiveLaneMask(
                       m_VPValue(), m_VPValue(), m_VPValue()))))) {
     // Try to simplify the branch condition if VectorTC <= VF * UF when the

diff  --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
index b76fde2bd1217..423bc1b0cf6ed 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h
@@ -41,6 +41,7 @@ LLVM_ABI_FOR_TEST extern cl::opt<bool> EnableWideActiveLaneMask;
 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
 LLVM_ABI_FOR_TEST extern cl::opt<bool> VPlanPrintAfterAll;
 LLVM_ABI_FOR_TEST extern cl::list<std::string> VPlanPrintAfterPasses;
+LLVM_ABI_FOR_TEST extern cl::opt<bool> VPlanPrintVectorRegionScope;
 #endif
 
 struct VPlanTransforms {
@@ -63,7 +64,10 @@ struct VPlanTransforms {
             << "VPlan for loop in '"
             << Plan.getScalarHeader()->getIRBasicBlock()->getParent()->getName()
             << "' after " << PassName << '\n';
-        dbgs() << Plan << '\n';
+        if (VPlanPrintVectorRegionScope && Plan.getVectorLoopRegion())
+          Plan.getVectorLoopRegion()->print(dbgs());
+        else
+          dbgs() << Plan << '\n';
       }
 #endif
       if (VerifyEachVPlan && EnableVerify)

diff  --git a/llvm/test/CodeGen/X86/win_cst_pool.ll b/llvm/test/CodeGen/X86/win_cst_pool.ll
index 1fc05b26fddb5..097fe2a39abb6 100644
--- a/llvm/test/CodeGen/X86/win_cst_pool.ll
+++ b/llvm/test/CodeGen/X86/win_cst_pool.ll
@@ -2,6 +2,7 @@
 ; RUN: llc < %s -mattr=sse2 -mattr=avx | FileCheck %s
 ; RUN: llc < %s -mtriple=x86_64-win32 -mattr=sse2 -mattr=avx | FileCheck %s
 ; RUN: llc < %s -mtriple=x86_64-windows-msvc -mattr=sse2 -mattr=avx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-windows-msvc -mattr=sse2 -mattr=avx --use-constant-int-for-fixed-length-splat -use-constant-fp-for-fixed-length-splat | FileCheck %s
 ; GNU environment.
 ; RUN: llc < %s -mtriple=x86_64-win32-gnu -mattr=sse2 -mattr=avx | FileCheck -check-prefix=MINGW %s
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -101,4 +102,32 @@ entry:
 ; CHECK: 	.quad	8589934593              # 0x200000001
 ; CHECK: 	.quad	17179869187             # 0x400000003
 ; CHECK: 	.quad	8589934593              # 0x200000001
-; CHECK: 	.quad	17179869187
+; CHECK: 	.quad	17179869187             # 0x400000003
+
+define <4 x i64> @ymm_splat() {
+entry:
+  ret <4 x i64> splat(i64 8589934593)
+}
+
+; CHECK:	.globl	__ymm at 0000000200000001000000020000000100000002000000010000000200000001
+; CHECK:	.section	.rdata,"dr",discard,__ymm at 0000000200000001000000020000000100000002000000010000000200000001
+; CHECK:	.p2align	5
+; CHECK: __ymm at 0000000200000001000000020000000100000002000000010000000200000001
+; CHECK: 	.quad	8589934593             # 0x200000001
+; CHECK: 	.quad	8589934593             # 0x200000001
+; CHECK: 	.quad	8589934593             # 0x200000001
+; CHECK: 	.quad	8589934593             # 0x200000001
+
+define <4 x double> @ymm_splat_double() {
+entry:
+  ret <4 x double> splat(double 0x0000000000800000)
+}
+
+; CHECK:	.globl	__ymm at 0000000000800000000000000080000000000000008000000000000000800000
+; CHECK:	.section	.rdata,"dr",discard,__ymm at 0000000000800000000000000080000000000000008000000000000000800000
+; CHECK:	.p2align	5, 0x0
+; CHECK: __ymm at 0000000000800000000000000080000000000000008000000000000000800000:
+; CHECK:	.quad	0x0000000000800000              # double 4.1445230292290475E-317
+; CHECK:	.quad	0x0000000000800000              # double 4.1445230292290475E-317
+; CHECK:	.quad	0x0000000000800000              # double 4.1445230292290475E-317
+; CHECK:	.quad	0x0000000000800000              # double 4.1445230292290475E-317

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
index 2865495954140..7d77f2f6b5b9c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
@@ -13,9 +13,14 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) {
 ; VF2:       [[VECTOR_PH]]:
 ; VF2-NEXT:    br label %[[VECTOR_BODY:.*]]
 ; VF2:       [[VECTOR_BODY]]:
-; VF2-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[DATA]], align 8
-; VF2-NEXT:    store <2 x i64> [[WIDE_LOAD]], ptr [[DATA]], align 8
-; VF2-NEXT:    br label %[[MIDDLE_BLOCK:.*]]
+; VF2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF2-NEXT:    [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1
+; VF2-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP0]]
+; VF2-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
+; VF2-NEXT:    store <2 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8
+; VF2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 1
+; VF2-NEXT:    [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 2
+; VF2-NEXT:    br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
 ; VF2:       [[MIDDLE_BLOCK]]:
 ; VF2-NEXT:    br label %[[EXIT:.*]]
 ; VF2:       [[EXIT]]:
@@ -200,7 +205,7 @@ define void @test_complex_add_float_tc_4(ptr %res, ptr noalias %A, ptr noalias %
 ; VF2-NEXT:    store <4 x float> [[INTERLEAVED_VEC]], ptr [[TMP5]], align 4
 ; VF2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
 ; VF2-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4
-; VF2-NEXT:    br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; VF2-NEXT:    br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
 ; VF2:       [[MIDDLE_BLOCK]]:
 ; VF2-NEXT:    br label %[[EXIT:.*]]
 ; VF2:       [[EXIT]]:

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
index 57d0534872118..fa60f8eec28f1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
@@ -104,6 +104,58 @@ exit:
   ret void
 }
 
+; Test that after narrowInterleaveGroups, the vector loop is not incorrectly
+; removed when VectorTC <= VF * UF. The narrowed plan has a smaller step
+; (vscale * UF instead of VF * UF), so it needs multiple iterations.
+define void @narrow_interleave_tc_16_vf_4_if_4(ptr noalias %data) vscale_range(2,2) {
+; CHECK-LABEL: define void @narrow_interleave_tc_16_vf_4_if_4(
+; CHECK-SAME: ptr noalias [[DATA:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 16, [[TMP3]]
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 16, [[N_MOD_VF]]
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds float, ptr [[DATA]], i64 [[TMP0]]
+; CHECK-NEXT:    store <vscale x 4 x float> splat (float 1.000000e+00), ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 16, [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], [[EXIT1:label %.*]], label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %mul.4 = shl nsw i64 %iv, 2
+  %data.0 = getelementptr inbounds float, ptr %data, i64 %mul.4
+  store float 1.0, ptr %data.0, align 4
+  %idx.1 = or disjoint i64 %mul.4, 1
+  %data.1 = getelementptr inbounds float, ptr %data, i64 %idx.1
+  store float 1.0, ptr %data.1, align 4
+  %idx.2 = or disjoint i64 %mul.4, 2
+  %data.2 = getelementptr inbounds float, ptr %data, i64 %idx.2
+  store float 1.0, ptr %data.2, align 4
+  %idx.3 = or disjoint i64 %mul.4, 3
+  %data.3 = getelementptr inbounds float, ptr %data, i64 %idx.3
+  store float 1.0, ptr %data.3, align 4
+  %iv.next = add nuw nsw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 16
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
 define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst) {
 ; IC1-LABEL: define void @test_masked_interleave_group(
 ; IC1-SAME: i32 [[N:%.*]], ptr [[MASK:%.*]], ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] {
@@ -213,10 +265,10 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst)
 ; CHECK-NEXT:    [[TMP26:%.*]] = mul i64 [[INDEX1]], 16
 ; CHECK-NEXT:    [[NEXT_GEP9:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP26]]
 ; CHECK-NEXT:    [[NEXT_GEP10:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX1]]
-; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[NEXT_GEP10]], align 1, !alias.scope [[META6:![0-9]+]]
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[NEXT_GEP10]], align 1, !alias.scope [[META8:![0-9]+]]
 ; CHECK-NEXT:    [[TMP27:%.*]] = icmp eq <vscale x 16 x i8> [[WIDE_LOAD]], zeroinitializer
 ; CHECK-NEXT:    [[INTERLEAVED_MASK:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP27]], <vscale x 16 x i1> [[TMP27]], <vscale x 16 x i1> [[TMP27]], <vscale x 16 x i1> [[TMP27]])
-; CHECK-NEXT:    [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x float> @llvm.masked.load.nxv64f32.p0(ptr align 4 [[NEXT_GEP9]], <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x float> poison), !alias.scope [[META9:![0-9]+]]
+; CHECK-NEXT:    [[WIDE_MASKED_VEC:%.*]] = call <vscale x 64 x float> @llvm.masked.load.nxv64f32.p0(ptr align 4 [[NEXT_GEP9]], <vscale x 64 x i1> [[INTERLEAVED_MASK]], <vscale x 64 x float> poison), !alias.scope [[META11:![0-9]+]]
 ; CHECK-NEXT:    [[STRIDED_VEC:%.*]] = call { <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float> } @llvm.vector.deinterleave4.nxv64f32(<vscale x 64 x float> [[WIDE_MASKED_VEC]])
 ; CHECK-NEXT:    [[TMP28:%.*]] = extractvalue { <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float> } [[STRIDED_VEC]], 0
 ; CHECK-NEXT:    [[TMP16:%.*]] = extractvalue { <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float> } [[STRIDED_VEC]], 1
@@ -224,10 +276,10 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst)
 ; CHECK-NEXT:    [[TMP18:%.*]] = extractvalue { <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float> } [[STRIDED_VEC]], 3
 ; CHECK-NEXT:    [[INTERLEAVED_VEC:%.*]] = call <vscale x 64 x float> @llvm.vector.interleave4.nxv64f32(<vscale x 16 x float> [[TMP28]], <vscale x 16 x float> [[TMP16]], <vscale x 16 x float> [[TMP17]], <vscale x 16 x float> [[TMP18]])
 ; CHECK-NEXT:    [[INTERLEAVED_MASK9:%.*]] = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> [[TMP27]], <vscale x 16 x i1> [[TMP27]], <vscale x 16 x i1> [[TMP27]], <vscale x 16 x i1> [[TMP27]])
-; CHECK-NEXT:    call void @llvm.masked.store.nxv64f32.p0(<vscale x 64 x float> [[INTERLEAVED_VEC]], ptr align 4 [[NEXT_GEP1]], <vscale x 64 x i1> [[INTERLEAVED_MASK9]]), !alias.scope [[META11:![0-9]+]], !noalias [[META13:![0-9]+]]
+; CHECK-NEXT:    call void @llvm.masked.store.nxv64f32.p0(<vscale x 64 x float> [[INTERLEAVED_VEC]], ptr align 4 [[NEXT_GEP1]], <vscale x 64 x i1> [[INTERLEAVED_MASK9]]), !alias.scope [[META13:![0-9]+]], !noalias [[META15:![0-9]+]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP9]]
 ; CHECK-NEXT:    [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT:    br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
 ; CHECK:       [[MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
 ; CHECK-NEXT:    br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
@@ -239,7 +291,7 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst)
 ; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP13]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[N_VEC]]
 ; CHECK-NEXT:    [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], [[UMAX]]
-; CHECK-NEXT:    br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF15:![0-9]+]]
+; CHECK-NEXT:    br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]], !prof [[PROF17:![0-9]+]]
 ; CHECK:       [[VEC_EPILOG_PH]]:
 ; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
 ; CHECK-NEXT:    [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
@@ -260,10 +312,10 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst)
 ; CHECK-NEXT:    [[OFFSET_IDX14:%.*]] = mul i64 [[INDEX12]], 16
 ; CHECK-NEXT:    [[NEXT_GEP15:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX14]]
 ; CHECK-NEXT:    [[NEXT_GEP16:%.*]] = getelementptr i8, ptr [[MASK]], i64 [[INDEX12]]
-; CHECK-NEXT:    [[WIDE_LOAD17:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP16]], align 1, !alias.scope [[META6]]
+; CHECK-NEXT:    [[WIDE_LOAD17:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP16]], align 1, !alias.scope [[META8]]
 ; CHECK-NEXT:    [[TMP30:%.*]] = icmp eq <vscale x 8 x i8> [[WIDE_LOAD17]], zeroinitializer
 ; CHECK-NEXT:    [[INTERLEAVED_MASK18:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave4.nxv32i1(<vscale x 8 x i1> [[TMP30]], <vscale x 8 x i1> [[TMP30]], <vscale x 8 x i1> [[TMP30]], <vscale x 8 x i1> [[TMP30]])
-; CHECK-NEXT:    [[WIDE_MASKED_VEC19:%.*]] = call <vscale x 32 x float> @llvm.masked.load.nxv32f32.p0(ptr align 4 [[NEXT_GEP15]], <vscale x 32 x i1> [[INTERLEAVED_MASK18]], <vscale x 32 x float> poison), !alias.scope [[META9]]
+; CHECK-NEXT:    [[WIDE_MASKED_VEC19:%.*]] = call <vscale x 32 x float> @llvm.masked.load.nxv32f32.p0(ptr align 4 [[NEXT_GEP15]], <vscale x 32 x i1> [[INTERLEAVED_MASK18]], <vscale x 32 x float> poison), !alias.scope [[META11]]
 ; CHECK-NEXT:    [[STRIDED_VEC20:%.*]] = call { <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float> } @llvm.vector.deinterleave4.nxv32f32(<vscale x 32 x float> [[WIDE_MASKED_VEC19]])
 ; CHECK-NEXT:    [[TMP31:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float> } [[STRIDED_VEC20]], 0
 ; CHECK-NEXT:    [[TMP32:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float> } [[STRIDED_VEC20]], 1
@@ -271,10 +323,10 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst)
 ; CHECK-NEXT:    [[TMP34:%.*]] = extractvalue { <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float> } [[STRIDED_VEC20]], 3
 ; CHECK-NEXT:    [[INTERLEAVED_VEC21:%.*]] = call <vscale x 32 x float> @llvm.vector.interleave4.nxv32f32(<vscale x 8 x float> [[TMP31]], <vscale x 8 x float> [[TMP32]], <vscale x 8 x float> [[TMP33]], <vscale x 8 x float> [[TMP34]])
 ; CHECK-NEXT:    [[INTERLEAVED_MASK22:%.*]] = call <vscale x 32 x i1> @llvm.vector.interleave4.nxv32i1(<vscale x 8 x i1> [[TMP30]], <vscale x 8 x i1> [[TMP30]], <vscale x 8 x i1> [[TMP30]], <vscale x 8 x i1> [[TMP30]])
-; CHECK-NEXT:    call void @llvm.masked.store.nxv32f32.p0(<vscale x 32 x float> [[INTERLEAVED_VEC21]], ptr align 4 [[NEXT_GEP13]], <vscale x 32 x i1> [[INTERLEAVED_MASK22]]), !alias.scope [[META11]], !noalias [[META13]]
+; CHECK-NEXT:    call void @llvm.masked.store.nxv32f32.p0(<vscale x 32 x float> [[INTERLEAVED_VEC21]], ptr align 4 [[NEXT_GEP13]], <vscale x 32 x i1> [[INTERLEAVED_MASK22]]), !alias.scope [[META13]], !noalias [[META15]]
 ; CHECK-NEXT:    [[INDEX_NEXT23]] = add nuw i64 [[INDEX12]], [[TMP23]]
 ; CHECK-NEXT:    [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT23]], [[INDEX]]
-; CHECK-NEXT:    br i1 [[TMP35]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP35]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
 ; CHECK:       [[VEC_EPILOG_MIDDLE_BLOCK]]:
 ; CHECK-NEXT:    [[CMP_N24:%.*]] = icmp eq i64 [[TMP1]], [[INDEX]]
 ; CHECK-NEXT:    br i1 [[CMP_N24]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
@@ -314,7 +366,7 @@ define void @test_masked_interleave_group(i32 %N, ptr %mask, ptr %src, ptr %dst)
 ; CHECK-NEXT:    [[SRC_IV_NEXT]] = getelementptr i8, ptr [[SRC_IV]], i64 16
 ; CHECK-NEXT:    [[DST_IV_NEXT]] = getelementptr i8, ptr [[DST_IV]], i64 16
 ; CHECK-NEXT:    [[EC:%.*]] = icmp eq i32 [[IV]], [[N]]
-; CHECK-NEXT:    br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT:    br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP19:![0-9]+]]
 ; CHECK:       [[EXIT]]:
 ; CHECK-NEXT:    ret void
 ;

diff  --git a/llvm/test/Transforms/LoopVectorize/VPlan/predicator.ll b/llvm/test/Transforms/LoopVectorize/VPlan/predicator.ll
new file mode 100644
index 0000000000000..e630095f25ef0
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/predicator.ll
@@ -0,0 +1,389 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -disable-output < %s -p loop-vectorize -vplan-print-after=introduceMasksAndLinearize -vplan-print-vector-region-scope 2>&1 | FileCheck %s
+
+define void @diamond_phi(ptr %a) {
+; CHECK-LABEL: 'diamond_phi'
+; CHECK-NEXT:  <x1> vector loop: {
+; CHECK-NEXT:    vector.body:
+; CHECK-NEXT:      EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT:      ir<%iv> = WIDEN-INDUCTION nuw nsw ir<0>, ir<1>, vp<[[VP0:%[0-9]+]]>
+; CHECK-NEXT:      EMIT ir<%gep> = getelementptr ir<%a>, ir<%iv>
+; CHECK-NEXT:      EMIT ir<%c0> = icmp sle ir<%iv>, ir<0>
+; CHECK-NEXT:    Successor(s): bb2
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb2:
+; CHECK-NEXT:      EMIT vp<[[VP4:%[0-9]+]]> = not ir<%c0>
+; CHECK-NEXT:      EMIT ir<%add2> = add ir<%iv>, ir<2>, vp<[[VP4]]>
+; CHECK-NEXT:    Successor(s): bb1
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb1:
+; CHECK-NEXT:      EMIT ir<%add1> = add ir<%iv>, ir<1>, ir<%c0>
+; CHECK-NEXT:    Successor(s): bb4
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb4:
+; CHECK-NEXT:      EMIT vp<[[VP5:%[0-9]+]]> = or vp<[[VP4]]>, ir<%c0>
+; CHECK-NEXT:      BLEND ir<%phi4> = ir<%add2>/vp<[[VP4]]> ir<%add1>/ir<%c0>
+; CHECK-NEXT:      EMIT store ir<%phi4>, ir<%gep>, vp<[[VP5]]>
+; CHECK-NEXT:      EMIT ir<%iv.next> = add nuw nsw ir<%iv>, ir<1>, vp<[[VP5]]>
+; CHECK-NEXT:      EMIT ir<%ec> = icmp eq ir<%iv.next>, ir<128>, vp<[[VP5]]>
+; CHECK-NEXT:      EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1:%[0-9]+]]>
+; CHECK-NEXT:      EMIT branch-on-count vp<%index.next>, vp<[[VP2:%[0-9]+]]>
+; CHECK-NEXT:    No successors
+; CHECK-NEXT:  }
+; CHECK-NEXT:  Successor(s): middle.block
+;
+entry:
+  br label %bb0
+
+bb0:
+;          bb0
+;         /  \
+;       bb1  bb2
+;         \  /
+;          bb4
+; TODO: bb4 should be unmasked.
+  %iv = phi i64 [0, %entry], [%iv.next, %bb4]
+  %gep = getelementptr i64, ptr %a, i64 %iv
+  %c0 = icmp sle i64 %iv, 0
+  br i1 %c0, label %bb1, label %bb2
+
+bb1:
+  %add1 = add i64 %iv, 1
+  br label %bb4
+
+bb2:
+  %add2 = add i64 %iv, 2
+  br label %bb4
+
+bb4:
+  %phi4 = phi i64 [%add1, %bb1], [%add2, %bb2]
+  store i64 %phi4, ptr %gep
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 128
+  br i1 %ec, label %exit, label %bb0
+
+exit:
+  ret void
+}
+
+define void @mask_reuse(ptr %a) {
+; CHECK-LABEL: 'mask_reuse'
+; CHECK-NEXT:  <x1> vector loop: {
+; CHECK-NEXT:    vector.body:
+; CHECK-NEXT:      EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT:      ir<%iv> = WIDEN-INDUCTION nuw nsw ir<0>, ir<1>, vp<[[VP0:%[0-9]+]]>
+; CHECK-NEXT:      EMIT ir<%gep> = getelementptr ir<%a>, ir<%iv>
+; CHECK-NEXT:      EMIT ir<%c0> = icmp sle ir<%iv>, ir<0>
+; CHECK-NEXT:      EMIT ir<%add0> = add ir<%iv>, ir<0>
+; CHECK-NEXT:    Successor(s): bb1
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb1:
+; CHECK-NEXT:      EMIT ir<%add1> = add ir<%iv>, ir<1>, ir<%c0>
+; CHECK-NEXT:      EMIT ir<%c1> = icmp sle ir<%iv>, ir<1>, ir<%c0>
+; CHECK-NEXT:    Successor(s): bb2
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb2:
+; CHECK-NEXT:      EMIT vp<[[VP4:%[0-9]+]]> = logical-and ir<%c0>, ir<%c1>
+; CHECK-NEXT:      EMIT ir<%add2> = add ir<%iv>, ir<2>, vp<[[VP4]]>
+; CHECK-NEXT:    Successor(s): bb3
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb3:
+; CHECK-NEXT:      EMIT vp<[[VP5:%[0-9]+]]> = not ir<%c1>
+; CHECK-NEXT:      EMIT vp<[[VP6:%[0-9]+]]> = logical-and ir<%c0>, vp<[[VP5]]>
+; CHECK-NEXT:      EMIT vp<[[VP7:%[0-9]+]]> = or vp<[[VP4]]>, vp<[[VP6]]>
+; CHECK-NEXT:      BLEND ir<%phi3> = ir<%add2>/vp<[[VP4]]> ir<%add1>/vp<[[VP6]]>
+; CHECK-NEXT:      EMIT ir<%add3> = add ir<%iv>, ir<3>, vp<[[VP7]]>
+; CHECK-NEXT:    Successor(s): bb4
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb4:
+; CHECK-NEXT:      EMIT vp<[[VP8:%[0-9]+]]> = not ir<%c0>
+; CHECK-NEXT:      EMIT vp<[[VP9:%[0-9]+]]> = or vp<[[VP7]]>, vp<[[VP8]]>
+; CHECK-NEXT:      BLEND ir<%phi4> = ir<%add3>/vp<[[VP7]]> ir<%add0>/vp<[[VP8]]>
+; CHECK-NEXT:      EMIT store ir<%phi4>, ir<%gep>, vp<[[VP9]]>
+; CHECK-NEXT:      EMIT ir<%iv.next> = add nuw nsw ir<%iv>, ir<1>, vp<[[VP9]]>
+; CHECK-NEXT:      EMIT ir<%ec> = icmp eq ir<%iv.next>, ir<128>, vp<[[VP9]]>
+; CHECK-NEXT:      EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1:%[0-9]+]]>
+; CHECK-NEXT:      EMIT branch-on-count vp<%index.next>, vp<[[VP2:%[0-9]+]]>
+; CHECK-NEXT:    No successors
+; CHECK-NEXT:  }
+; CHECK-NEXT:  Successor(s): middle.block
+;
+entry:
+  br label %bb0
+
+bb0:
+;         bb0:
+;         / \
+;      bb1   \
+;       /\    \
+;     bb2 |   |
+;      \  |   |
+;       bb3  /
+;         \ /
+;         bb4
+; TODO: bb3 can reuse bb1's mask and bb4 should be unmasked.
+  %iv = phi i64 [0, %entry], [%iv.next, %bb4]
+  %gep = getelementptr i64, ptr %a, i64 %iv
+  %c0 = icmp sle i64 %iv, 0
+  %add0 = add i64 %iv, 0
+  br i1 %c0, label %bb1, label %bb4
+
+bb1:
+  %add1 = add i64 %iv, 1
+  %c1 = icmp sle i64 %iv, 1
+  br i1 %c1, label %bb2, label %bb3
+
+bb2:
+  %add2 = add i64 %iv, 2
+  br label %bb3
+
+bb3:
+  %phi3 = phi i64 [%add1, %bb1], [%add2, %bb2]
+  %add3 = add i64 %iv, 3
+  br label %bb4
+
+bb4:
+  %phi4 = phi i64 [%add3, %bb3], [%add0, %bb0]
+  store i64 %phi4, ptr %gep
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 128
+  br i1 %ec, label %exit, label %bb0
+
+exit:
+  ret void
+}
+
+define void @optimized_mask(ptr %a) {
+; CHECK-LABEL: 'optimized_mask'
+; CHECK-NEXT:  <x1> vector loop: {
+; CHECK-NEXT:    vector.body:
+; CHECK-NEXT:      EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT:      ir<%iv> = WIDEN-INDUCTION nuw nsw ir<0>, ir<1>, vp<[[VP0:%[0-9]+]]>
+; CHECK-NEXT:      EMIT ir<%gep> = getelementptr ir<%a>, ir<%iv>
+; CHECK-NEXT:      EMIT ir<%c0> = icmp sle ir<%iv>, ir<0>
+; CHECK-NEXT:    Successor(s): bb6
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb6:
+; CHECK-NEXT:      EMIT vp<[[VP4:%[0-9]+]]> = not ir<%c0>
+; CHECK-NEXT:      EMIT ir<%add6> = add ir<%iv>, ir<6>, vp<[[VP4]]>
+; CHECK-NEXT:      EMIT ir<%c6> = icmp sle ir<%iv>, ir<6>, vp<[[VP4]]>
+; CHECK-NEXT:    Successor(s): bb1
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb1:
+; CHECK-NEXT:      EMIT ir<%add1> = add ir<%iv>, ir<1>, ir<%c0>
+; CHECK-NEXT:      EMIT ir<%c1> = icmp sle ir<%iv>, ir<1>, ir<%c0>
+; CHECK-NEXT:    Successor(s): bb3
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb3:
+; CHECK-NEXT:      EMIT vp<[[VP5:%[0-9]+]]> = not ir<%c1>
+; CHECK-NEXT:      EMIT vp<[[VP6:%[0-9]+]]> = logical-and ir<%c0>, vp<[[VP5]]>
+; CHECK-NEXT:      EMIT ir<%add3> = add ir<%iv>, ir<3>, vp<[[VP6]]>
+; CHECK-NEXT:      EMIT ir<%c3> = icmp sle ir<%iv>, ir<3>, vp<[[VP6]]>
+; CHECK-NEXT:    Successor(s): bb2
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb2:
+; CHECK-NEXT:      EMIT vp<[[VP7:%[0-9]+]]> = logical-and ir<%c0>, ir<%c1>
+; CHECK-NEXT:      EMIT ir<%add2> = add ir<%iv>, ir<2>, vp<[[VP7]]>
+; CHECK-NEXT:    Successor(s): bb4
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb4:
+; CHECK-NEXT:      EMIT vp<[[VP8:%[0-9]+]]> = logical-and vp<[[VP6]]>, ir<%c3>
+; CHECK-NEXT:      EMIT vp<[[VP9:%[0-9]+]]> = or vp<[[VP8]]>, vp<[[VP7]]>
+; CHECK-NEXT:      BLEND ir<%phi4> = ir<%add3>/vp<[[VP8]]> ir<%add2>/vp<[[VP7]]>
+; CHECK-NEXT:      EMIT ir<%add4> = add ir<%iv>, ir<4>, vp<[[VP9]]>
+; CHECK-NEXT:    Successor(s): bb5
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb5:
+; CHECK-NEXT:      EMIT vp<[[VP10:%[0-9]+]]> = logical-and vp<[[VP4]]>, ir<%c6>
+; CHECK-NEXT:      EMIT vp<[[VP11:%[0-9]+]]> = or vp<[[VP10]]>, vp<[[VP9]]>
+; CHECK-NEXT:      EMIT vp<[[VP12:%[0-9]+]]> = not ir<%c3>
+; CHECK-NEXT:      EMIT vp<[[VP13:%[0-9]+]]> = logical-and vp<[[VP6]]>, vp<[[VP12]]>
+; CHECK-NEXT:      EMIT vp<[[VP14:%[0-9]+]]> = or vp<[[VP11]]>, vp<[[VP13]]>
+; CHECK-NEXT:      BLEND ir<%phi5> = ir<%add6>/vp<[[VP10]]> ir<%add4>/vp<[[VP9]]> ir<%add3>/vp<[[VP13]]>
+; CHECK-NEXT:      EMIT ir<%add5> = add ir<%iv>, ir<5>, vp<[[VP14]]>
+; CHECK-NEXT:    Successor(s): bb7
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb7:
+; CHECK-NEXT:      EMIT vp<[[VP15:%[0-9]+]]> = not ir<%c6>
+; CHECK-NEXT:      EMIT vp<[[VP16:%[0-9]+]]> = logical-and vp<[[VP4]]>, vp<[[VP15]]>
+; CHECK-NEXT:      EMIT vp<[[VP17:%[0-9]+]]> = or vp<[[VP16]]>, vp<[[VP14]]>
+; CHECK-NEXT:      BLEND ir<%phi7> = ir<%add6>/vp<[[VP16]]> ir<%add5>/vp<[[VP14]]>
+; CHECK-NEXT:      EMIT store ir<%phi7>, ir<%gep>, vp<[[VP17]]>
+; CHECK-NEXT:      EMIT ir<%iv.next> = add nuw nsw ir<%iv>, ir<1>, vp<[[VP17]]>
+; CHECK-NEXT:      EMIT ir<%ec> = icmp eq ir<%iv.next>, ir<128>, vp<[[VP17]]>
+; CHECK-NEXT:      EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1:%[0-9]+]]>
+; CHECK-NEXT:      EMIT branch-on-count vp<%index.next>, vp<[[VP2:%[0-9]+]]>
+; CHECK-NEXT:    No successors
+; CHECK-NEXT:  }
+; CHECK-NEXT:  Successor(s): middle.block
+;
+entry:
+  br label %bb0
+
+bb0:
+;               bb0:
+;              /   \
+;             /     \
+;          bb1       bb6
+;          /  \      / |
+;        bb2  bb3   /  /
+;          \  /|   /  /
+;           bb4|  /  /
+;            \ | /  /
+;             bb5  /
+;               \ /
+;               bb7
+; TODO: bb5's mask shouldn't depend on c1/c3.
+  %iv = phi i64 [0, %entry], [%iv.next, %bb7]
+  %gep = getelementptr i64, ptr %a, i64 %iv
+  %c0 = icmp sle i64 %iv, 0
+  br i1 %c0, label %bb1, label %bb6
+
+bb1:
+  %add1 = add i64 %iv, 1
+  %c1 = icmp sle i64 %iv, 1
+  br i1 %c1, label %bb2, label %bb3
+
+bb2:
+  %add2 = add i64 %iv, 2
+  br label %bb4
+
+bb3:
+  %add3 = add i64 %iv, 3
+  %c3 = icmp sle i64 %iv, 3
+  br i1 %c3, label %bb4, label %bb5
+
+bb4:
+  %phi4 = phi i64 [%add2, %bb2], [%add3, %bb3]
+  %add4 = add i64 %iv, 4
+  br label %bb5
+
+bb5:
+  %phi5 = phi i64 [%add4, %bb4], [%add3, %bb3], [%add6, %bb6]
+  %add5 = add i64 %iv, 5
+  br label %bb7
+
+bb6:
+  %add6 = add i64 %iv, 6
+  %c6 = icmp sle i64 %iv, 6
+  br i1 %c6, label %bb5, label %bb7
+
+bb7:
+  %phi7 = phi i64 [%add5, %bb5], [%add6, %bb6]
+  store i64 %phi7, ptr %gep
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 128
+  br i1 %ec, label %exit, label %bb0
+
+exit:
+  ret void
+}
+
+define void @switch(ptr %a) {
+; CHECK-LABEL: 'switch'
+; CHECK-NEXT:  <x1> vector loop: {
+; CHECK-NEXT:    vector.body:
+; CHECK-NEXT:      EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT:      ir<%iv> = WIDEN-INDUCTION nuw nsw ir<0>, ir<1>, vp<[[VP0:%[0-9]+]]>
+; CHECK-NEXT:      EMIT ir<%gep> = getelementptr ir<%a>, ir<%iv>
+; CHECK-NEXT:      EMIT ir<%c0> = icmp sle ir<%iv>, ir<0>
+; CHECK-NEXT:      EMIT ir<%add0> = add ir<%iv>, ir<0>
+; CHECK-NEXT:    Successor(s): bb2
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb2:
+; CHECK-NEXT:      EMIT vp<[[VP4:%[0-9]+]]> = not ir<%c0>
+; CHECK-NEXT:      EMIT ir<%add2> = add ir<%iv>, ir<2>, vp<[[VP4]]>
+; CHECK-NEXT:      EMIT ir<%c2> = icmp sle ir<%iv>, ir<2>, vp<[[VP4]]>
+; CHECK-NEXT:    Successor(s): bb1
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb1:
+; CHECK-NEXT:      EMIT ir<%add1> = add ir<%iv>, ir<1>, ir<%c0>
+; CHECK-NEXT:    Successor(s): bb3
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb3:
+; CHECK-NEXT:      EMIT vp<[[VP5:%[0-9]+]]> = logical-and vp<[[VP4]]>, ir<%c2>
+; CHECK-NEXT:      EMIT vp<[[VP6:%[0-9]+]]> = icmp eq ir<%iv>, ir<1>
+; CHECK-NEXT:      EMIT vp<[[VP7:%[0-9]+]]> = icmp eq ir<%iv>, ir<2>
+; CHECK-NEXT:      EMIT vp<[[VP8:%[0-9]+]]> = icmp eq ir<%iv>, ir<3>
+; CHECK-NEXT:      EMIT vp<[[VP9:%[0-9]+]]> = logical-and ir<%c0>, vp<[[VP6]]>
+; CHECK-NEXT:      EMIT vp<[[VP10:%[0-9]+]]> = or vp<[[VP7]]>, vp<[[VP8]]>
+; CHECK-NEXT:      EMIT vp<[[VP11:%[0-9]+]]> = logical-and ir<%c0>, vp<[[VP10]]>
+; CHECK-NEXT:      EMIT vp<[[VP12:%[0-9]+]]> = or vp<[[VP9]]>, vp<[[VP11]]>
+; CHECK-NEXT:      EMIT vp<[[VP13:%[0-9]+]]> = not vp<[[VP12]]>
+; CHECK-NEXT:      EMIT vp<[[VP14:%[0-9]+]]> = logical-and ir<%c0>, vp<[[VP13]]>
+; CHECK-NEXT:      EMIT vp<[[VP15:%[0-9]+]]> = or vp<[[VP5]]>, vp<[[VP11]]>
+; CHECK-NEXT:      BLEND ir<%phi3> = ir<%add2>/vp<[[VP5]]> ir<%add1>/vp<[[VP11]]> ir<%add1>/vp<[[VP11]]>
+; CHECK-NEXT:      EMIT ir<%add3> = add ir<%iv>, ir<3>, vp<[[VP15]]>
+; CHECK-NEXT:    Successor(s): bb4
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb4:
+; CHECK-NEXT:      EMIT ir<%add4> = add ir<%iv>, ir<4>, vp<[[VP9]]>
+; CHECK-NEXT:    Successor(s): bb5
+; CHECK-EMPTY:
+; CHECK-NEXT:    bb5:
+; CHECK-NEXT:      EMIT vp<[[VP16:%[0-9]+]]> = or vp<[[VP9]]>, vp<[[VP15]]>
+; CHECK-NEXT:      EMIT vp<[[VP17:%[0-9]+]]> = not ir<%c2>
+; CHECK-NEXT:      EMIT vp<[[VP18:%[0-9]+]]> = logical-and vp<[[VP4]]>, vp<[[VP17]]>
+; CHECK-NEXT:      EMIT vp<[[VP19:%[0-9]+]]> = or vp<[[VP16]]>, vp<[[VP18]]>
+; CHECK-NEXT:      EMIT vp<[[VP20:%[0-9]+]]> = or vp<[[VP19]]>, vp<[[VP14]]>
+; CHECK-NEXT:      BLEND ir<%phi5> = ir<%add4>/vp<[[VP9]]> ir<%add3>/vp<[[VP15]]> ir<%add2>/vp<[[VP18]]> ir<%add1>/vp<[[VP14]]>
+; CHECK-NEXT:      EMIT store ir<%phi5>, ir<%gep>, vp<[[VP20]]>
+; CHECK-NEXT:      EMIT ir<%iv.next> = add nuw nsw ir<%iv>, ir<1>, vp<[[VP20]]>
+; CHECK-NEXT:      EMIT ir<%ec> = icmp eq ir<%iv.next>, ir<128>, vp<[[VP20]]>
+; CHECK-NEXT:      EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1:%[0-9]+]]>
+; CHECK-NEXT:      EMIT branch-on-count vp<%index.next>, vp<[[VP2:%[0-9]+]]>
+; CHECK-NEXT:    No successors
+; CHECK-NEXT:  }
+; CHECK-NEXT:  Successor(s): middle.block
+;
+entry:
+  br label %bb0
+
+bb0:
+;         bb0:
+;         / \
+;      bb1-+ bb2
+;     / | \| /\
+;     \bb4 bb3|
+;      \ \ | /
+;       +>bb5
+; Test for blends at switch destinations, including multiple edges from switch
+; to a single block (bb3).
+  %iv = phi i64 [0, %entry], [%iv.next, %bb5]
+  %gep = getelementptr i64, ptr %a, i64 %iv
+  %c0 = icmp sle i64 %iv, 0
+  %add0 = add i64 %iv, 0
+  br i1 %c0, label %bb1, label %bb2
+
+bb1:
+  %add1 = add i64 %iv, 1
+  switch i64 %iv, label %bb5 [
+    i64 1, label %bb4
+    i64 2, label %bb3
+    i64 3, label %bb3
+  ]
+
+bb2:
+  %add2 = add i64 %iv, 2
+  %c2 = icmp sle i64 %iv, 2
+  br i1 %c2, label %bb3, label %bb5
+
+bb3:
+  %phi3 = phi i64 [%add1, %bb1], [%add1, %bb1], [%add2, %bb2]
+  %add3 = add i64 %iv, 3
+  br label %bb5
+
+bb4:
+  %add4 = add i64 %iv, 4
+  br label %bb5
+
+bb5:
+  %phi5 = phi i64 [%add1, %bb1], [%add2, %bb2], [%add3, %bb3], [%add4, %bb4]
+  store i64 %phi5, ptr %gep
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 128
+  br i1 %ec, label %exit, label %bb0
+
+exit:
+  ret void
+}

diff  --git a/llvm/test/Transforms/LoopVectorize/VPlan/vplan-print-after-all.ll b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-print-after-all.ll
index 478fa82ccd218..bc9367942ac27 100644
--- a/llvm/test/Transforms/LoopVectorize/VPlan/vplan-print-after-all.ll
+++ b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-print-after-all.ll
@@ -4,6 +4,7 @@
 ; Verify that `-vplan-print-after-all` option works.
 
 ; CHECK: VPlan for loop in 'foo' after printAfterInitialConstruction
+; CHECK: VPlan for loop in 'foo' after VPlanTransforms::introduceMasksAndLinearize
 ; CHECK: VPlan for loop in 'foo' after VPlanTransforms::clearReductionWrapFlags
 ; CHECK: VPlan for loop in 'foo' after VPlanTransforms::optimizeFindIVReductions
 ; CHECK: VPlan for loop in 'foo' after VPlanTransforms::handleMultiUseReductions


        


More information about the llvm-branch-commits mailing list