[llvm] [LoadStoreVectorizer] Fill gaps in load/store chains to enable vectorization (PR #159388)

Drew Kersnar via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 21 15:42:56 PST 2025


https://github.com/dakersnar updated https://github.com/llvm/llvm-project/pull/159388

>From 81b05344e6143ed9b901c8fba945b2c1a140d499 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 17 Sep 2025 15:32:39 +0000
Subject: [PATCH 01/24] [LoadStoreVectorizer] Fill gaps in loads/stores to
 enable vectorization

---
 .../llvm/Analysis/TargetTransformInfo.h       |   6 +
 .../llvm/Analysis/TargetTransformInfoImpl.h   |   2 +
 llvm/lib/Analysis/TargetTransformInfo.cpp     |   4 +
 .../Target/NVPTX/NVPTXTargetTransformInfo.h   |   2 +
 .../Vectorize/LoadStoreVectorizer.cpp         | 435 ++++++++++++--
 .../test/CodeGen/NVPTX/LoadStoreVectorizer.ll |  40 +-
 .../CodeGen/NVPTX/param-vectorize-device.ll   |   6 +-
 llvm/test/CodeGen/NVPTX/variadics-backend.ll  |   2 +-
 .../LoadStoreVectorizer/NVPTX/extend-chain.ll |  81 +++
 .../NVPTX/gap-fill-cleanup.ll                 |  37 ++
 .../NVPTX/gap-fill-invariant.ll               |  83 +++
 .../NVPTX/gap-fill-vectors.ll                 | 186 ++++++
 .../LoadStoreVectorizer/NVPTX/gap-fill.ll     | 194 +++++++
 .../LoadStoreVectorizer/NVPTX/masked-store.ll | 541 ++++++++++++++++++
 .../LoadStoreVectorizer/NVPTX/vectorize_i8.ll |   3 +-
 15 files changed, 1544 insertions(+), 78 deletions(-)
 create mode 100644 llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
 create mode 100644 llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
 create mode 100644 llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
 create mode 100644 llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
 create mode 100644 llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
 create mode 100644 llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 7b7dc1b46dd80..45355d1732c83 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -823,6 +823,12 @@ class TargetTransformInfo {
   LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment,
                                   unsigned AddressSpace) const;
 
+  /// Return true if it is legal to widen loads beyond their current width,
+  /// assuming the result is still well-aligned. For example, converting a load
+  /// i32 to a load i64, or vectorizing three continuous load i32s into a load
+  /// <4 x i32>.
+  LLVM_ABI bool isLegalToWidenLoads() const;
+
   /// Return true if the target supports nontemporal store.
   LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const;
   /// Return true if the target supports nontemporal load.
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 4cd607c0d0c8d..979d20c2ec299 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -318,6 +318,8 @@ class TargetTransformInfoImplBase {
     return false;
   }
 
+  virtual bool isLegalToWidenLoads() const { return false; }
+
   virtual bool isLegalNTStore(Type *DataType, Align Alignment) const {
     // By default, assume nontemporal memory stores are available for stores
     // that are aligned and have a size that is a power of 2.
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index c47a1c1b23a37..b9be4ca569f73 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -477,6 +477,10 @@ bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType, Align Alignment,
   return TTIImpl->isLegalMaskedLoad(DataType, Alignment, AddressSpace);
 }
 
+bool TargetTransformInfo::isLegalToWidenLoads() const {
+  return TTIImpl->isLegalToWidenLoads();
+}
+
 bool TargetTransformInfo::isLegalNTStore(Type *DataType,
                                          Align Alignment) const {
   return TTIImpl->isLegalNTStore(DataType, Alignment);
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index 78eb751cf3c2e..6cc891a2db591 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -72,6 +72,8 @@ class NVPTXTTIImpl final : public BasicTTIImplBase<NVPTXTTIImpl> {
     return isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, AddrSpace);
   }
 
+  bool isLegalToWidenLoads() const override { return true; };
+
   // NVPTX has infinite registers of all kinds, but the actual machine doesn't.
   // We conservatively return 1 here which is just enough to enable the
   // vectorizers but disables heuristics based on the number of registers.
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 7b5137b0185ab..04f4e92826a52 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -119,6 +119,29 @@ using namespace llvm;
 
 #define DEBUG_TYPE "load-store-vectorizer"
 
+cl::opt<bool>
+    ExtendLoads("vect-extend-loads", cl::Hidden,
+                cl::desc("Load more elements if the target VF is higher "
+                         "than the chain length."),
+                cl::init(true));
+
+cl::opt<bool> ExtendStores(
+    "vect-extend-stores", cl::Hidden,
+    cl::desc("Store more elements if the target VF is higher "
+             "than the chain length and we have access to masked stores."),
+    cl::init(true));
+
+cl::opt<bool> FillLoadGaps(
+    "vect-fill-load-gaps", cl::Hidden,
+    cl::desc("Should Loads be introduced in gaps to enable vectorization."),
+    cl::init(true));
+
+cl::opt<bool>
+    FillStoreGaps("vect-fill-store-gaps", cl::Hidden,
+                  cl::desc("Should Stores be introduced in gaps to enable "
+                           "vectorization into masked stores."),
+                  cl::init(true));
+
 STATISTIC(NumVectorInstructions, "Number of vector accesses generated");
 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized");
 
@@ -246,12 +269,16 @@ class Vectorizer {
   const DataLayout &DL;
   IRBuilder<> Builder;
 
-  // We could erase instrs right after vectorizing them, but that can mess up
-  // our BB iterators, and also can make the equivalence class keys point to
-  // freed memory.  This is fixable, but it's simpler just to wait until we're
-  // done with the BB and erase all at once.
+  /// We could erase instrs right after vectorizing them, but that can mess up
+  /// our BB iterators, and also can make the equivalence class keys point to
+  /// freed memory.  This is fixable, but it's simpler just to wait until we're
+  /// done with the BB and erase all at once.
   SmallVector<Instruction *, 128> ToErase;
 
+  /// We insert load/store instructions and GEPs to fill gaps and extend chains
+  /// to enable vectorization. Keep track and delete them later.
+  DenseSet<Instruction *> ExtraElements;
+
 public:
   Vectorizer(Function &F, AliasAnalysis &AA, AssumptionCache &AC,
              DominatorTree &DT, ScalarEvolution &SE, TargetTransformInfo &TTI)
@@ -344,6 +371,28 @@ class Vectorizer {
   /// Postcondition: For all i, ret[i][0].second == 0, because the first instr
   /// in the chain is the leader, and an instr touches distance 0 from itself.
   std::vector<Chain> gatherChains(ArrayRef<Instruction *> Instrs);
+
+  /// Is a load/store with this alignment allowed by TTI and at least as fast
+  /// as an unvectorized load/store.
+  bool accessIsAllowedAndFast(unsigned SizeBytes, unsigned AS, Align Alignment,
+                              unsigned VecElemBits) const;
+
+  /// Before attempting to fill gaps, check if the chain is a candidate for
+  /// a masked store, to save compile time if it is not possible for the address
+  /// space and element type.
+  bool shouldAttemptMaskedStore(const ArrayRef<ChainElem> C) const;
+
+  /// Create a new GEP and a new Load/Store instruction such that the GEP
+  /// is pointing at PrevElem + Offset. In the case of stores, store poison.
+  /// Extra elements will either be combined into a vector/masked store or
+  /// deleted before the end of the pass.
+  ChainElem createExtraElementAfter(const ChainElem &PrevElem, APInt Offset,
+                                    StringRef Prefix,
+                                    Align Alignment = Align(1));
+
+  /// Delete dead GEPs and extra Load/Store instructions created by
+  /// createExtraElementAfter
+  void deleteExtraElements();
 };
 
 class LoadStoreVectorizerLegacyPass : public FunctionPass {
@@ -457,12 +506,21 @@ bool Vectorizer::run() {
       Changed |= runOnPseudoBB(*It, *std::next(It));
 
     for (Instruction *I : ToErase) {
+      // These will get deleted in deleteExtraElements.
+      // This is because ExtraElements will include both extra elements
+      // that *were* vectorized and extra elements that *were not*
+      // vectorized. ToErase will only include extra elements that *were*
+      // vectorized, so in order to avoid double deletion we skip them here and
+      // handle them in deleteExtraElements.
+      if (ExtraElements.contains(I))
+        continue;
       auto *PtrOperand = getLoadStorePointerOperand(I);
       if (I->use_empty())
         I->eraseFromParent();
       RecursivelyDeleteTriviallyDeadInstructions(PtrOperand);
     }
     ToErase.clear();
+    deleteExtraElements();
   }
 
   return Changed;
@@ -623,6 +681,29 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
     dumpChain(C);
   });
 
+  // If the chain is not contiguous, we try to fill the gap with "extra"
+  // elements to artificially make it contiguous, to try to enable
+  // vectorization.
+  // - Filling gaps in loads is always ok if the target supports widening loads.
+  // - For stores, we only fill gaps if there is a potentially legal masked
+  //   store for the target. If later on, we don't end up with a chain that
+  //   could be vectorized into a legal masked store, the chains with extra
+  //   elements will be filtered out in splitChainByAlignment.
+  bool TryFillGaps = isa<LoadInst>(C[0].Inst)
+                         ? (FillLoadGaps && TTI.isLegalToWidenLoads())
+                         : (FillStoreGaps && shouldAttemptMaskedStore(C));
+
+  unsigned ASPtrBits =
+      DL.getIndexSizeInBits(getLoadStoreAddressSpace(C[0].Inst));
+
+  // Compute the alignment of the leader of the chain (which every stored offset
+  // is based on) using the current first element of the chain. This is
+  // conservative, we may be able to derive better alignment by iterating over
+  // the chain and finding the leader.
+  Align LeaderOfChainAlign =
+      commonAlignment(getLoadStoreAlignment(C[0].Inst),
+                      C[0].OffsetFromLeader.abs().getLimitedValue());
+
   std::vector<Chain> Ret;
   Ret.push_back({C.front()});
 
@@ -633,7 +714,8 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
     unsigned SzBits = DL.getTypeSizeInBits(getLoadStoreType(&*Prev.Inst));
     assert(SzBits % 8 == 0 && "Non-byte sizes should have been filtered out by "
                               "collectEquivalenceClass");
-    APInt PrevReadEnd = Prev.OffsetFromLeader + SzBits / 8;
+    APInt PrevSzBytes = APInt(ASPtrBits, SzBits / 8);
+    APInt PrevReadEnd = Prev.OffsetFromLeader + PrevSzBytes;
 
     // Add this instruction to the end of the current chain, or start a new one.
     bool AreContiguous = It->OffsetFromLeader == PrevReadEnd;
@@ -642,10 +724,54 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
                       << *Prev.Inst << " (ends at offset " << PrevReadEnd
                       << ") -> " << *It->Inst << " (starts at offset "
                       << It->OffsetFromLeader << ")\n");
-    if (AreContiguous)
+
+    if (AreContiguous) {
       CurChain.push_back(*It);
-    else
-      Ret.push_back({*It});
+      continue;
+    }
+
+    // For now, we aren't filling gaps between load/stores of different sizes.
+    // Additionally, as a conservative heuristic, we only fill gaps of 1-2
+    // elements. Generating loads/stores with too many unused bytes has a side
+    // effect of increasing register pressure (on NVIDIA targets at least),
+    // which could cancel out the benefits of reducing number of load/stores.
+    if (TryFillGaps &&
+        SzBits == DL.getTypeSizeInBits(getLoadStoreType(It->Inst))) {
+      APInt OffsetOfGapStart = Prev.OffsetFromLeader + PrevSzBytes;
+      APInt GapSzBytes = It->OffsetFromLeader - OffsetOfGapStart;
+      if (GapSzBytes == PrevSzBytes) {
+        // There is a single gap between Prev and Curr, create one extra element
+        ChainElem NewElem = createExtraElementAfter(
+            Prev, PrevSzBytes, "GapFill",
+            commonAlignment(LeaderOfChainAlign,
+                            OffsetOfGapStart.abs().getLimitedValue()));
+        CurChain.push_back(NewElem);
+        CurChain.push_back(*It);
+        continue;
+      }
+      // There are two gaps between Prev and Curr, only create two extra
+      // elements if Prev is the first element in a sequence of four.
+      // This has the highest chance of resulting in a beneficial vectorization.
+      if ((GapSzBytes == 2 * PrevSzBytes) && (CurChain.size() % 4 == 1)) {
+        ChainElem NewElem1 = createExtraElementAfter(
+            Prev, PrevSzBytes, "GapFill",
+            commonAlignment(LeaderOfChainAlign,
+                            OffsetOfGapStart.abs().getLimitedValue()));
+        ChainElem NewElem2 = createExtraElementAfter(
+            NewElem1, PrevSzBytes, "GapFill",
+            commonAlignment(
+                LeaderOfChainAlign,
+                (OffsetOfGapStart + PrevSzBytes).abs().getLimitedValue()));
+        CurChain.push_back(NewElem1);
+        CurChain.push_back(NewElem2);
+        CurChain.push_back(*It);
+        continue;
+      }
+    }
+
+    // The chain is not contiguous and cannot be made contiguous with gap
+    // filling, so we need to start a new chain.
+    Ret.push_back({*It});
   }
 
   // Filter out length-1 chains, these are uninteresting.
@@ -721,6 +847,14 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
   unsigned AS = getLoadStoreAddressSpace(C[0].Inst);
   unsigned VecRegBytes = TTI.getLoadStoreVecRegBitWidth(AS) / 8;
 
+  // For compile time reasons, we cache whether or not the superset
+  // of all candidate chains contains any extra stores from earlier gap
+  // filling.
+  bool CandidateChainsMayContainExtraStores =
+      !IsLoadChain && any_of(C, [this](const ChainElem &E) {
+        return ExtraElements.contains(E.Inst);
+      });
+
   std::vector<Chain> Ret;
   for (unsigned CBegin = 0; CBegin < C.size(); ++CBegin) {
     // Find candidate chains of size not greater than the largest vector reg.
@@ -769,41 +903,6 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         continue;
       }
 
-      // Is a load/store with this alignment allowed by TTI and at least as fast
-      // as an unvectorized load/store?
-      //
-      // TTI and F are passed as explicit captures to WAR an MSVC misparse (??).
-      auto IsAllowedAndFast = [&, SizeBytes = SizeBytes, &TTI = TTI,
-                               &F = F](Align Alignment) {
-        if (Alignment.value() % SizeBytes == 0)
-          return true;
-        unsigned VectorizedSpeed = 0;
-        bool AllowsMisaligned = TTI.allowsMisalignedMemoryAccesses(
-            F.getContext(), SizeBytes * 8, AS, Alignment, &VectorizedSpeed);
-        if (!AllowsMisaligned) {
-          LLVM_DEBUG(dbgs()
-                     << "LSV: Access of " << SizeBytes << "B in addrspace "
-                     << AS << " with alignment " << Alignment.value()
-                     << " is misaligned, and therefore can't be vectorized.\n");
-          return false;
-        }
-
-        unsigned ElementwiseSpeed = 0;
-        (TTI).allowsMisalignedMemoryAccesses((F).getContext(), VecElemBits, AS,
-                                             Alignment, &ElementwiseSpeed);
-        if (VectorizedSpeed < ElementwiseSpeed) {
-          LLVM_DEBUG(dbgs()
-                     << "LSV: Access of " << SizeBytes << "B in addrspace "
-                     << AS << " with alignment " << Alignment.value()
-                     << " has relative speed " << VectorizedSpeed
-                     << ", which is lower than the elementwise speed of "
-                     << ElementwiseSpeed
-                     << ".  Therefore this access won't be vectorized.\n");
-          return false;
-        }
-        return true;
-      };
-
       // If we're loading/storing from an alloca, align it if possible.
       //
       // FIXME: We eagerly upgrade the alignment, regardless of whether TTI
@@ -818,8 +917,7 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
                             isa<AllocaInst>(PtrOperand->stripPointerCasts());
       Align Alignment = getLoadStoreAlignment(C[CBegin].Inst);
       Align PrefAlign = Align(StackAdjustedAlignment);
-      if (IsAllocaAccess && Alignment.value() % SizeBytes != 0 &&
-          IsAllowedAndFast(PrefAlign)) {
+      if (IsAllocaAccess && Alignment.value() % SizeBytes != 0) {
         Align NewAlign = getOrEnforceKnownAlignment(
             PtrOperand, PrefAlign, DL, C[CBegin].Inst, nullptr, &DT);
         if (NewAlign >= Alignment) {
@@ -831,7 +929,59 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         }
       }
 
-      if (!IsAllowedAndFast(Alignment)) {
+      Chain ExtendingLoadsStores;
+      bool ExtendChain = IsLoadChain
+                             ? ExtendLoads
+                             : ExtendStores;
+      if (ExtendChain && NumVecElems < TargetVF && NumVecElems % 2 != 0 &&
+          VecElemBits >= 8) {
+        // TargetVF may be a lot higher than NumVecElems,
+        // so only extend to the next power of 2.
+        assert(VecElemBits % 8 == 0);
+        unsigned VecElemBytes = VecElemBits / 8;
+        unsigned NewNumVecElems = PowerOf2Ceil(NumVecElems);
+        unsigned NewSizeBytes = VecElemBytes * NewNumVecElems;
+
+        assert(NewNumVecElems <= TargetVF);
+
+        LLVM_DEBUG(dbgs() << "LSV: attempting to extend chain of "
+                          << NumVecElems << " "
+                          << (IsLoadChain ? "loads" : "stores") << " to "
+                          << NewNumVecElems << " elements\n");
+        // Do not artificially increase the chain if it becomes misaligned,
+        // otherwise we may unnecessary split the chain when the target actually
+        // supports non-pow2 VF.
+        if (accessIsAllowedAndFast(NewSizeBytes, AS, Alignment, VecElemBits) &&
+            ((IsLoadChain ? TTI.isLegalToWidenLoads()
+                          : TTI.isLegalMaskedStore(
+                                FixedVectorType::get(VecElemTy, NewNumVecElems),
+                                Alignment, AS, /*IsMaskConstant=*/true)))) {
+          LLVM_DEBUG(dbgs()
+                     << "LSV: extending " << (IsLoadChain ? "load" : "store")
+                     << " chain of " << NumVecElems << " "
+                     << (IsLoadChain ? "loads" : "stores")
+                     << " with total byte size of " << SizeBytes << " to "
+                     << NewNumVecElems << " "
+                     << (IsLoadChain ? "loads" : "stores")
+                     << " with total byte size of " << NewSizeBytes
+                     << ", TargetVF=" << TargetVF << " \n");
+
+          unsigned ASPtrBits = DL.getIndexSizeInBits(AS);
+          ChainElem Prev = C[CEnd];
+          for (unsigned i = 0; i < (NewNumVecElems - NumVecElems); i++) {
+            ChainElem NewElem = createExtraElementAfter(
+                Prev, APInt(ASPtrBits, VecElemBytes), "Extend");
+            ExtendingLoadsStores.push_back(NewElem);
+            Prev = ExtendingLoadsStores.back();
+          }
+
+          // Update the size and number of elements for upcoming checks.
+          SizeBytes = NewSizeBytes;
+          NumVecElems = NewNumVecElems;
+        }
+      }
+
+      if (!accessIsAllowedAndFast(SizeBytes, AS, Alignment, VecElemBits)) {
         LLVM_DEBUG(
             dbgs() << "LSV: splitChainByAlignment discarding candidate chain "
                       "because its alignment is not AllowedAndFast: "
@@ -849,10 +999,41 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         continue;
       }
 
+      if (CandidateChainsMayContainExtraStores) {
+        // The legality of adding extra stores to ExtendingLoadsStores has
+        // already been checked, but if the candidate chain contains extra
+        // stores from an earlier optimization, confirm legality now.
+        // This filter is essential because, when filling gaps in
+        // splitChainByContinuity, we queried the API to check that (for a given
+        // element type and address space) there *may* be a legal masked store
+        // we can try to create. Now, we need to check if the actual chain we
+        // ended up with is legal to turn into a masked store.
+        // This is relevant for NVPTX targets, for example, where a masked store
+        // is only legal if we have ended up with a 256-bit vector.
+        bool CandidateChainContainsExtraStores = llvm::any_of(
+            ArrayRef<ChainElem>(C).slice(CBegin, CEnd - CBegin + 1),
+            [this](const ChainElem &E) {
+              return ExtraElements.contains(E.Inst);
+            });
+
+        if (CandidateChainContainsExtraStores &&
+            !TTI.isLegalMaskedStore(
+                FixedVectorType::get(VecElemTy, NumVecElems), Alignment, AS,
+                /*IsMaskConstant=*/true)) {
+          LLVM_DEBUG(dbgs()
+                     << "LSV: splitChainByAlignment discarding candidate chain "
+                        "because it contains extra stores that we cannot "
+                        "legally vectorize into a masked store \n");
+          continue;
+        }
+      }
+
       // Hooray, we can vectorize this chain!
       Chain &NewChain = Ret.emplace_back();
       for (unsigned I = CBegin; I <= CEnd; ++I)
         NewChain.emplace_back(C[I]);
+      for (ChainElem E : ExtendingLoadsStores)
+        NewChain.emplace_back(E);
       CBegin = CEnd; // Skip over the instructions we've added to the chain.
       break;
     }
@@ -864,6 +1045,12 @@ bool Vectorizer::vectorizeChain(Chain &C) {
   if (C.size() < 2)
     return false;
 
+  // If we are left with a two-element chain, and one of the elements is an
+  // extra element, we don't want to vectorize
+  if (C.size() == 2 && (ExtraElements.contains(C[0].Inst) ||
+                        ExtraElements.contains(C[1].Inst)))
+    return false;
+
   sortChainInOffsetOrder(C);
 
   LLVM_DEBUG({
@@ -983,12 +1170,41 @@ bool Vectorizer::vectorizeChain(Chain &C) {
       }
     }
 
-    // Chain is in offset order, so C[0] is the instr with the lowest offset,
-    // i.e. the root of the vector.
-    VecInst = Builder.CreateAlignedStore(
-        Vec,
-        getLoadStorePointerOperand(C[0].Inst),
-        Alignment);
+    // If the chain originates from extra stores, we need to vectorize into a
+    // masked store.
+    bool ChainContainsExtraStores = llvm::any_of(C, [this](const ChainElem &E) {
+      return ExtraElements.contains(E.Inst);
+    });
+    if (ChainContainsExtraStores) {
+      assert(TTI.isLegalMaskedStore(Vec->getType(), Alignment, AS,
+                                    /*IsMaskConstant=*/true));
+      unsigned MaskIdx = 0;
+      // loop through the chain and create a mask for the masked store
+      Value *Mask = PoisonValue::get(FixedVectorType::get(
+          Builder.getInt1Ty(), cast<FixedVectorType>(VecTy)->getNumElements()));
+      for (const ChainElem &E : C) {
+        bool IsExtraStore = ExtraElements.contains(E.Inst);
+        if (FixedVectorType *VT =
+                dyn_cast<FixedVectorType>(getLoadStoreType(E.Inst))) {
+          for (int J = 0, JE = VT->getNumElements(); J < JE; ++J) {
+            Mask = Builder.CreateInsertElement(Mask,
+                                               Builder.getInt1(!IsExtraStore),
+                                               Builder.getInt32(MaskIdx++));
+          }
+        } else {
+          Mask =
+              Builder.CreateInsertElement(Mask, Builder.getInt1(!IsExtraStore),
+                                          Builder.getInt32(MaskIdx++));
+        }
+      }
+      VecInst = Builder.CreateMaskedStore(
+          Vec, getLoadStorePointerOperand(C[0].Inst), Alignment, Mask);
+    } else {
+      // Chain is in offset order, so C[0] is the instr with the lowest offset,
+      // i.e. the root of the vector.
+      VecInst = Builder.CreateAlignedStore(
+          Vec, getLoadStorePointerOperand(C[0].Inst), Alignment);
+    }
   }
 
   propagateMetadata(VecInst, C);
@@ -1641,3 +1857,118 @@ std::optional<APInt> Vectorizer::getConstantOffset(Value *PtrA, Value *PtrB,
         .sextOrTrunc(OrigBitWidth);
   return std::nullopt;
 }
+
+bool Vectorizer::accessIsAllowedAndFast(unsigned SizeBytes, unsigned AS,
+                                        Align Alignment,
+                                        unsigned VecElemBits) const {
+  if (Alignment.value() % SizeBytes == 0)
+    return true;
+  unsigned VectorizedSpeed = 0;
+  bool AllowsMisaligned = TTI.allowsMisalignedMemoryAccesses(
+      F.getContext(), SizeBytes * 8, AS, Alignment, &VectorizedSpeed);
+  if (!AllowsMisaligned) {
+    LLVM_DEBUG(
+        dbgs() << "LSV: Access of " << SizeBytes << "B in addrspace " << AS
+               << " with alignment " << Alignment.value()
+               << " is misaligned, and therefore can't be vectorized.\n");
+    return false;
+  }
+
+  unsigned ElementwiseSpeed = 0;
+  (TTI).allowsMisalignedMemoryAccesses((F).getContext(), VecElemBits, AS,
+                                       Alignment, &ElementwiseSpeed);
+  if (VectorizedSpeed < ElementwiseSpeed) {
+    LLVM_DEBUG(dbgs() << "LSV: Access of " << SizeBytes << "B in addrspace "
+                      << AS << " with alignment " << Alignment.value()
+                      << " has relative speed " << VectorizedSpeed
+                      << ", which is lower than the elementwise speed of "
+                      << ElementwiseSpeed
+                      << ".  Therefore this access won't be vectorized.\n");
+    return false;
+  }
+  return true;
+}
+
+bool Vectorizer::shouldAttemptMaskedStore(const ArrayRef<ChainElem> C) const {
+  assert(isa<StoreInst>(C[0].Inst));
+
+  unsigned AS = getLoadStoreAddressSpace(C[0].Inst);
+  Type *ElementType = getLoadStoreType(C[0].Inst)->getScalarType();
+  unsigned VecRegBits = TTI.getLoadStoreVecRegBitWidth(AS);
+  // Assume max alignment, splitChainByAlignment will legalize it later if the
+  // necessary alignment is not reached.
+  Align OptimisticAlign = Align(VecRegBits / 8);
+  unsigned int MaxVectorNumElems =
+      VecRegBits / DL.getTypeSizeInBits(ElementType);
+
+  // Attempt to find the smallest power-of-two number of elements that, if
+  // well aligned, could be represented as a legal masked store.
+  // If one exists for a given element type and address space, it is worth
+  // attempting to fill gaps as we may be able to create a legal masked store.
+  // If we do not end up with a legal masked store, chains with extra elements
+  // will be discarded.
+  const unsigned MinMaskedStoreNumElems = 4;
+  for (unsigned NumElems = MinMaskedStoreNumElems;
+       NumElems <= MaxVectorNumElems; NumElems *= 2) {
+    FixedVectorType *VectorType = FixedVectorType::get(ElementType, NumElems);
+    if (TTI.isLegalMaskedStore(VectorType, OptimisticAlign, AS,
+                               /*IsMaskConstant=*/true))
+      return true;
+  }
+  return false;
+}
+
+ChainElem Vectorizer::createExtraElementAfter(const ChainElem &Prev,
+                                              APInt Offset, StringRef Prefix,
+                                              Align Alignment) {
+  Instruction *NewElement = nullptr;
+  Builder.SetInsertPoint(Prev.Inst->getNextNode());
+  if (LoadInst *PrevLoad = dyn_cast<LoadInst>(Prev.Inst)) {
+    Value *NewGep = Builder.CreatePtrAdd(
+        PrevLoad->getPointerOperand(), Builder.getInt(Offset), Prefix + "GEP");
+    LLVM_DEBUG(dbgs() << "LSV: Extra GEP Created: \n" << *NewGep << "\n");
+    NewElement = Builder.CreateAlignedLoad(PrevLoad->getType(), NewGep,
+                                           Alignment, Prefix);
+  } else {
+    StoreInst *PrevStore = cast<StoreInst>(Prev.Inst);
+
+    Value *NewGep = Builder.CreatePtrAdd(
+        PrevStore->getPointerOperand(), Builder.getInt(Offset), Prefix + "GEP");
+    LLVM_DEBUG(dbgs() << "LSV: Extra GEP Created: \n" << *NewGep << "\n");
+    NewElement = Builder.CreateAlignedStore(
+        PoisonValue::get(PrevStore->getValueOperand()->getType()), NewGep,
+        Alignment);
+  }
+
+  // Attach all metadata to the new element.
+  // propagateMetadata will fold it into the final vector when applicable.
+  NewElement->copyMetadata(*Prev.Inst);
+
+  // Cache created elements for tracking and cleanup
+  ExtraElements.insert(NewElement);
+
+  APInt NewOffsetFromLeader = Prev.OffsetFromLeader + Offset;
+  LLVM_DEBUG(dbgs() << "LSV: Extra Element Created: \n"
+                    << *NewElement
+                    << " OffsetFromLeader: " << NewOffsetFromLeader << "\n");
+  return ChainElem{NewElement, NewOffsetFromLeader};
+}
+
+void Vectorizer::deleteExtraElements() {
+  for (auto *ExtraElement : ExtraElements) {
+    if (isa<LoadInst>(ExtraElement)) {
+      [[maybe_unused]] bool Deleted =
+          RecursivelyDeleteTriviallyDeadInstructions(ExtraElement);
+      assert(Deleted && "Extra Load should always be trivially dead");
+    } else {
+      // Unlike Extra Loads, Extra Stores won't be "dead", but should all be
+      // deleted regardless. They will have either been combined into a masked
+      // store, or will be left behind and need to be cleaned up.
+      auto *PtrOperand = getLoadStorePointerOperand(ExtraElement);
+      ExtraElement->eraseFromParent();
+      RecursivelyDeleteTriviallyDeadInstructions(PtrOperand);
+    }
+  }
+
+  ExtraElements.clear();
+}
diff --git a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
index dd9a472984c25..19ec2574e32b4 100644
--- a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
+++ b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
@@ -45,29 +45,31 @@ define half @fh(ptr %p) {
 ; ENABLED-LABEL: fh(
 ; ENABLED:       {
 ; ENABLED-NEXT:    .reg .b16 %rs<10>;
-; ENABLED-NEXT:    .reg .b32 %r<13>;
+; ENABLED-NEXT:    .reg .b32 %r<17>;
 ; ENABLED-NEXT:    .reg .b64 %rd<2>;
 ; ENABLED-EMPTY:
 ; ENABLED-NEXT:  // %bb.0:
 ; ENABLED-NEXT:    ld.param.b64 %rd1, [fh_param_0];
-; ENABLED-NEXT:    ld.v4.b16 {%rs1, %rs2, %rs3, %rs4}, [%rd1];
-; ENABLED-NEXT:    ld.b16 %rs5, [%rd1+8];
-; ENABLED-NEXT:    cvt.f32.f16 %r1, %rs2;
-; ENABLED-NEXT:    cvt.f32.f16 %r2, %rs1;
-; ENABLED-NEXT:    add.rn.f32 %r3, %r2, %r1;
-; ENABLED-NEXT:    cvt.rn.f16.f32 %rs6, %r3;
-; ENABLED-NEXT:    cvt.f32.f16 %r4, %rs4;
-; ENABLED-NEXT:    cvt.f32.f16 %r5, %rs3;
-; ENABLED-NEXT:    add.rn.f32 %r6, %r5, %r4;
-; ENABLED-NEXT:    cvt.rn.f16.f32 %rs7, %r6;
-; ENABLED-NEXT:    cvt.f32.f16 %r7, %rs7;
-; ENABLED-NEXT:    cvt.f32.f16 %r8, %rs6;
-; ENABLED-NEXT:    add.rn.f32 %r9, %r8, %r7;
-; ENABLED-NEXT:    cvt.rn.f16.f32 %rs8, %r9;
-; ENABLED-NEXT:    cvt.f32.f16 %r10, %rs8;
-; ENABLED-NEXT:    cvt.f32.f16 %r11, %rs5;
-; ENABLED-NEXT:    add.rn.f32 %r12, %r10, %r11;
-; ENABLED-NEXT:    cvt.rn.f16.f32 %rs9, %r12;
+; ENABLED-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; ENABLED-NEXT:    { .reg .b16 tmp; mov.b32 {%rs1, tmp}, %r3; }
+; ENABLED-NEXT:    mov.b32 {%rs2, %rs3}, %r2;
+; ENABLED-NEXT:    mov.b32 {%rs4, %rs5}, %r1;
+; ENABLED-NEXT:    cvt.f32.f16 %r5, %rs5;
+; ENABLED-NEXT:    cvt.f32.f16 %r6, %rs4;
+; ENABLED-NEXT:    add.rn.f32 %r7, %r6, %r5;
+; ENABLED-NEXT:    cvt.rn.f16.f32 %rs6, %r7;
+; ENABLED-NEXT:    cvt.f32.f16 %r8, %rs3;
+; ENABLED-NEXT:    cvt.f32.f16 %r9, %rs2;
+; ENABLED-NEXT:    add.rn.f32 %r10, %r9, %r8;
+; ENABLED-NEXT:    cvt.rn.f16.f32 %rs7, %r10;
+; ENABLED-NEXT:    cvt.f32.f16 %r11, %rs7;
+; ENABLED-NEXT:    cvt.f32.f16 %r12, %rs6;
+; ENABLED-NEXT:    add.rn.f32 %r13, %r12, %r11;
+; ENABLED-NEXT:    cvt.rn.f16.f32 %rs8, %r13;
+; ENABLED-NEXT:    cvt.f32.f16 %r14, %rs8;
+; ENABLED-NEXT:    cvt.f32.f16 %r15, %rs1;
+; ENABLED-NEXT:    add.rn.f32 %r16, %r14, %r15;
+; ENABLED-NEXT:    cvt.rn.f16.f32 %rs9, %r16;
 ; ENABLED-NEXT:    st.param.b16 [func_retval0], %rs9;
 ; ENABLED-NEXT:    ret;
 ;
diff --git a/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll b/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
index 51f6b00601069..4870050dd2d43 100644
--- a/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
+++ b/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
@@ -171,8 +171,7 @@ define internal fastcc [3 x i32] @callee_St4x3(ptr nocapture noundef readonly by
   ; CHECK:       .func  (.param .align 16 .b8 func_retval0[12])
   ; CHECK-LABEL: callee_St4x3(
   ; CHECK-NEXT:  .param .align 16 .b8 callee_St4x3_param_0[12]
-  ; CHECK:       ld.param.v2.b32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]]}, [callee_St4x3_param_0];
-  ; CHECK:       ld.param.b32    [[R3:%r[0-9]+]],  [callee_St4x3_param_0+8];
+  ; CHECK:       ld.param.v4.b32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], %{{.*}}}, [callee_St4x3_param_0];
   ; CHECK-DAG:   st.param.v2.b32 [func_retval0], {[[R1]], [[R2]]};
   ; CHECK-DAG:   st.param.b32    [func_retval0+8], [[R3]];
   ; CHECK-NEXT:  ret;
@@ -394,8 +393,7 @@ define internal fastcc [7 x i32] @callee_St4x7(ptr nocapture noundef readonly by
   ; CHECK-LABEL: callee_St4x7(
   ; CHECK-NEXT:  .param .align 16 .b8 callee_St4x7_param_0[28]
   ; CHECK:       ld.param.v4.b32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x7_param_0];
-  ; CHECK:       ld.param.v2.b32 {[[R5:%r[0-9]+]],  [[R6:%r[0-9]+]]}, [callee_St4x7_param_0+16];
-  ; CHECK:       ld.param.b32    [[R7:%r[0-9]+]],   [callee_St4x7_param_0+24];
+  ; CHECK:       ld.param.v4.b32 {[[R5:%r[0-9]+]], [[R6:%r[0-9]+]], [[R7:%r[0-9]+]], %{{.*}}}, [callee_St4x7_param_0+16];
   ; CHECK-DAG:   st.param.v4.b32 [func_retval0],  {[[R1]], [[R2]], [[R3]], [[R4]]};
   ; CHECK-DAG:   st.param.v2.b32 [func_retval0+16], {[[R5]], [[R6]]};
   ; CHECK-DAG:   st.param.b32    [func_retval0+24], [[R7]];
diff --git a/llvm/test/CodeGen/NVPTX/variadics-backend.ll b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
index 61ff80632c789..5499dbce61bae 100644
--- a/llvm/test/CodeGen/NVPTX/variadics-backend.ll
+++ b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
@@ -110,7 +110,7 @@ define dso_local i32 @foo() {
 ; CHECK-PTX-NEXT:  // %bb.0: // %entry
 ; CHECK-PTX-NEXT:    mov.b64 %SPL, __local_depot1;
 ; CHECK-PTX-NEXT:    cvta.local.u64 %SP, %SPL;
-; CHECK-PTX-NEXT:    st.b64 [%SP], 4294967297;
+; CHECK-PTX-NEXT:    st.v2.b32 [%SP], {1, 1};
 ; CHECK-PTX-NEXT:    st.b32 [%SP+8], 1;
 ; CHECK-PTX-NEXT:    st.b64 [%SP+16], 1;
 ; CHECK-PTX-NEXT:    st.b64 [%SP+24], 4607182418800017408;
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
new file mode 100644
index 0000000000000..24d0dea086ba8
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
@@ -0,0 +1,81 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s | FileCheck %s
+
+;; Check that the vectorizer extends a Chain to the next power of two,
+;; essentially loading more vector elements than the original
+;; code. Alignment and other requirement for vectorization should
+;; still be met.
+
+define void @load3to4(ptr %p) #0 {
+; CHECK-LABEL: define void @load3to4(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i32, ptr [[P]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[P_0]], align 16
+; CHECK-NEXT:    [[V01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[V12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[V23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[EXTEND4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    ret void
+;
+  %p.0 = getelementptr i32, ptr %p, i32 0
+  %p.1 = getelementptr i32, ptr %p, i32 1
+  %p.2 = getelementptr i32, ptr %p, i32 2
+
+  %v0 = load i32, ptr %p.0, align 16
+  %v1 = load i32, ptr %p.1, align 4
+  %v2 = load i32, ptr %p.2, align 8
+
+  ret void
+}
+
+define void @load5to8(ptr %p) #0 {
+; CHECK-LABEL: define void @load5to8(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i16, ptr [[P]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[P_0]], align 16
+; CHECK-NEXT:    [[V05:%.*]] = extractelement <8 x i16> [[TMP1]], i32 0
+; CHECK-NEXT:    [[V16:%.*]] = extractelement <8 x i16> [[TMP1]], i32 1
+; CHECK-NEXT:    [[V27:%.*]] = extractelement <8 x i16> [[TMP1]], i32 2
+; CHECK-NEXT:    [[V38:%.*]] = extractelement <8 x i16> [[TMP1]], i32 3
+; CHECK-NEXT:    [[V49:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4
+; CHECK-NEXT:    [[EXTEND10:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5
+; CHECK-NEXT:    [[EXTEND211:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6
+; CHECK-NEXT:    [[EXTEND412:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
+; CHECK-NEXT:    ret void
+;
+  %p.0 = getelementptr i16, ptr %p, i32 0
+  %p.1 = getelementptr i16, ptr %p, i32 1
+  %p.2 = getelementptr i16, ptr %p, i32 2
+  %p.3 = getelementptr i16, ptr %p, i32 3
+  %p.4 = getelementptr i16, ptr %p, i32 4
+
+  %v0 = load i16, ptr %p.0, align 16
+  %v1 = load i16, ptr %p.1, align 2
+  %v2 = load i16, ptr %p.2, align 4
+  %v3 = load i16, ptr %p.3, align 8
+  %v4 = load i16, ptr %p.4, align 2
+
+  ret void
+}
+
+define void @load3to4_unaligned(ptr %p) #0 {
+; CHECK-LABEL: define void @load3to4_unaligned(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i32, ptr [[P]], i32 0
+; CHECK-NEXT:    [[P_2:%.*]] = getelementptr i32, ptr [[P]], i32 2
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[P_0]], align 8
+; CHECK-NEXT:    [[V01:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[V12:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[V2:%.*]] = load i32, ptr [[P_2]], align 8
+; CHECK-NEXT:    ret void
+;
+  %p.0 = getelementptr i32, ptr %p, i32 0
+  %p.1 = getelementptr i32, ptr %p, i32 1
+  %p.2 = getelementptr i32, ptr %p, i32 2
+
+  %v0 = load i32, ptr %p.0, align 8
+  %v1 = load i32, ptr %p.1, align 4
+  %v2 = load i32, ptr %p.2, align 8
+
+  ret void
+}
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
new file mode 100644
index 0000000000000..e812f8750fa76
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
@@ -0,0 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S < %s | FileCheck %s
+
+; Test that gap filled instructions get deleted if they are not used
+%struct.S10 = type { i32, i32, i32, i32 }
+
+; First, confirm that gap instructions get generated and would be vectorized if the alignment is correct
+define void @fillTwoGapsCanVectorize(ptr %in) {
+; CHECK-LABEL: define void @fillTwoGapsCanVectorize(
+; CHECK-SAME: ptr [[IN:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[IN]], align 16
+; CHECK-NEXT:    [[LOAD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LOAD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    ret void
+;
+  %load0 = load i32, ptr %in, align 16
+  %getElem = getelementptr i8, ptr %in, i64 12
+  %load3 = load i32, ptr %getElem, align 4
+  ret void
+}
+
+; Then, confirm that gap instructions get deleted if the alignment prevents the vectorization
+define void @fillTwoGapsCantVectorize(ptr %in) {
+; CHECK-LABEL: define void @fillTwoGapsCantVectorize(
+; CHECK-SAME: ptr [[IN:%.*]]) {
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i32, ptr [[IN]], align 4
+; CHECK-NEXT:    [[GETELEM:%.*]] = getelementptr i8, ptr [[IN]], i64 12
+; CHECK-NEXT:    [[LOAD3:%.*]] = load i32, ptr [[GETELEM]], align 4
+; CHECK-NEXT:    ret void
+;
+  %load0 = load i32, ptr %in, align 4
+  %getElem = getelementptr i8, ptr %in, i64 12
+  %load3 = load i32, ptr %getElem, align 4
+  ret void
+}
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
new file mode 100644
index 0000000000000..6d0dfc677780d
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S < %s | FileCheck %s
+
+; Test that gap filled instructions don't lose invariant metadata
+%struct.S10 = type { i32, i32, i32, i32 }
+
+; With no gaps, if every load is invariant, the vectorized load will be too.
+define i32 @noGaps(ptr %in) {
+; CHECK-LABEL: define i32 @noGaps(
+; CHECK-SAME: ptr [[IN:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[IN]], align 16, !invariant.load [[META0:![0-9]+]]
+; CHECK-NEXT:    [[TMP01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[TMP34:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    [[SUM01:%.*]] = add i32 [[TMP01]], [[TMP12]]
+; CHECK-NEXT:    [[SUM012:%.*]] = add i32 [[SUM01]], [[TMP23]]
+; CHECK-NEXT:    [[SUM0123:%.*]] = add i32 [[SUM012]], [[TMP34]]
+; CHECK-NEXT:    ret i32 [[SUM0123]]
+;
+  %load0 = load i32, ptr %in, align 16, !invariant.load !0
+  %getElem1 = getelementptr inbounds %struct.S10, ptr %in, i64 0, i32 1
+  %load1 = load i32, ptr %getElem1, align 4, !invariant.load !0
+  %getElem2 = getelementptr inbounds %struct.S10, ptr %in, i64 0, i32 2
+  %load2 = load i32, ptr %getElem2, align 4, !invariant.load !0
+  %getElem3 = getelementptr inbounds %struct.S10, ptr %in, i64 0, i32 3
+  %load3 = load i32, ptr %getElem3, align 4, !invariant.load !0
+  %sum01 = add i32 %load0, %load1
+  %sum012 = add i32 %sum01, %load2
+  %sum0123 = add i32 %sum012, %load3
+  ret i32 %sum0123
+}
+
+; If one of the loads is not invariant, the vectorized load will not be invariant.
+define i32 @noGapsMissingInvariant(ptr %in) {
+; CHECK-LABEL: define i32 @noGapsMissingInvariant(
+; CHECK-SAME: ptr [[IN:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[IN]], align 16
+; CHECK-NEXT:    [[TMP01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[TMP34:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    [[SUM01:%.*]] = add i32 [[TMP01]], [[TMP12]]
+; CHECK-NEXT:    [[SUM012:%.*]] = add i32 [[SUM01]], [[TMP23]]
+; CHECK-NEXT:    [[SUM0123:%.*]] = add i32 [[SUM012]], [[TMP34]]
+; CHECK-NEXT:    ret i32 [[SUM0123]]
+;
+  %load0 = load i32, ptr %in, align 16, !invariant.load !0
+  %getElem1 = getelementptr inbounds %struct.S10, ptr %in, i64 0, i32 1
+  %load1 = load i32, ptr %getElem1, align 4, !invariant.load !0
+  %getElem2 = getelementptr inbounds %struct.S10, ptr %in, i64 0, i32 2
+  %load2 = load i32, ptr %getElem2, align 4, !invariant.load !0
+  %getElem3 = getelementptr inbounds %struct.S10, ptr %in, i64 0, i32 3
+  %load3 = load i32, ptr %getElem3, align 4
+  %sum01 = add i32 %load0, %load1
+  %sum012 = add i32 %sum01, %load2
+  %sum0123 = add i32 %sum012, %load3
+  ret i32 %sum0123
+}
+
+; With two gaps, if every real load is invariant, the vectorized load will be too.
+define i32 @twoGaps(ptr %in) {
+; CHECK-LABEL: define i32 @twoGaps(
+; CHECK-SAME: ptr [[IN:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[IN]], align 16, !invariant.load [[META0]]
+; CHECK-NEXT:    [[LOAD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LOAD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    [[SUM:%.*]] = add i32 [[LOAD03]], [[LOAD36]]
+; CHECK-NEXT:    ret i32 [[SUM]]
+;
+  %load0 = load i32, ptr %in, align 16, !invariant.load !0
+  %getElem3 = getelementptr inbounds %struct.S10, ptr %in, i64 0, i32 3
+  %load3 = load i32, ptr %getElem3, align 4, !invariant.load !0
+  %sum = add i32 %load0, %load3
+  ret i32 %sum
+}
+
+!0 = !{}
+;.
+; CHECK: [[META0]] = !{}
+;.
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
new file mode 100644
index 0000000000000..fe7123898d450
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -mcpu=sm_100 -mattr=+ptx88 -S < %s | FileCheck %s
+
+; The LSV can handle vector inputs, and gap filling can too, with one exception:
+; currently, we do not gap fill when the loads enclosing the gap are different sizes
+; Otherwise, vectors are treated the same as any other scalar types
+
+define void @i1x8_gap_gap_i1x8(ptr %ptr) {
+; CHECK-LABEL: define void @i1x8_gap_gap_i1x8(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i1>, ptr [[PTR0]], align 4
+; CHECK-NEXT:    [[L03:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; CHECK-NEXT:    [[L36:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT:    ret void
+;
+  %ptr0 = getelementptr i8, ptr %ptr, i64 0
+  %ptr3 = getelementptr i8, ptr %ptr, i64 3
+
+  %l0 = load <8 x i1>,  ptr %ptr0, align 4
+  %l3 = load <8 x i1>,  ptr %ptr3, align 1
+
+  ret void
+}
+
+; The chain elements are different sizes, gap filling won't kick in
+define void @i1x8_gap_gap_i1x16(ptr %ptr) {
+; CHECK-LABEL: define void @i1x8_gap_gap_i1x16(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
+; CHECK-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr [[PTR]], i64 3
+; CHECK-NEXT:    [[L0:%.*]] = load <8 x i1>, ptr [[PTR0]], align 4
+; CHECK-NEXT:    [[L3:%.*]] = load <16 x i1>, ptr [[PTR3]], align 2
+; CHECK-NEXT:    ret void
+;
+  %ptr0 = getelementptr i8, ptr %ptr, i64 0
+  %ptr3 = getelementptr i8, ptr %ptr, i64 3
+
+  %l0 = load <8 x i1>,  ptr %ptr0, align 4
+  %l3 = load <16 x i1>,  ptr %ptr3, align 2
+
+  ret void
+}
+
+; Gap of two load <2 x i8>s gets filled
+define void @i8x2_gap_gap_i8x2(ptr %ptr) {
+; CHECK-LABEL: define void @i8x2_gap_gap_i8x2(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[PTR0]], align 8
+; CHECK-NEXT:    [[L03:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT:    [[L36:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 6, i32 7>
+; CHECK-NEXT:    ret void
+;
+  %ptr0 = getelementptr i8, ptr %ptr, i64 0
+  %ptr3 = getelementptr i8, ptr %ptr, i64 6
+
+  %l0 = load <2 x i8>,  ptr %ptr0, align 8
+  %l3 = load <2 x i8>,  ptr %ptr3, align 2
+
+  ret void
+}
+
+; The chain elements are different sizes, gap filling won't kick in
+define void @i8x2_gap_gap_i8(ptr %ptr) {
+; CHECK-LABEL: define void @i8x2_gap_gap_i8(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
+; CHECK-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr [[PTR]], i64 6
+; CHECK-NEXT:    [[L0:%.*]] = load <2 x i8>, ptr [[PTR0]], align 8
+; CHECK-NEXT:    [[L3:%.*]] = load i8, ptr [[PTR3]], align 1
+; CHECK-NEXT:    ret void
+;
+  %ptr0 = getelementptr i8, ptr %ptr, i64 0
+  %ptr3 = getelementptr i8, ptr %ptr, i64 6
+
+  %l0 = load <2 x i8>,  ptr %ptr0, align 8
+  %l3 = load i8,  ptr %ptr3, align 1
+
+  ret void
+}
+
+
+define void @i16x2_gap_i16x2_i16x2(ptr %ptr) {
+; CHECK-LABEL: define void @i16x2_gap_i16x2_i16x2(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[PTR0]], align 16
+; CHECK-NEXT:    [[L01:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[GAPFILL2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[L23:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT:    [[L34:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 6, i32 7>
+; CHECK-NEXT:    ret void
+;
+  %ptr0 = getelementptr i8, ptr %ptr, i64 0
+  %ptr2 = getelementptr i8, ptr %ptr, i64 8
+  %ptr3 = getelementptr i8, ptr %ptr, i64 12
+
+  %l0 = load <2 x i16>,  ptr %ptr0, align 16
+  %l2 = load <2 x i16>,  ptr %ptr2, align 2
+  %l3 = load <2 x i16>,  ptr %ptr3, align 2
+
+  ret void
+}
+
+define void @i16x2_gap_gap_i16x2(ptr %ptr) {
+; CHECK-LABEL: define void @i16x2_gap_gap_i16x2(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[PTR0]], align 16
+; CHECK-NEXT:    [[L03:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT:    [[L36:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 6, i32 7>
+; CHECK-NEXT:    ret void
+;
+  %ptr0 = getelementptr i8, ptr %ptr, i64 0
+  %ptr3 = getelementptr i8, ptr %ptr, i64 12
+
+  %l0 = load <2 x i16>,  ptr %ptr0, align 16
+  %l3 = load <2 x i16>,  ptr %ptr3, align 4
+
+  ret void
+}
+
+define void @i32x2_i32x2_gap_i32x2(ptr addrspace(1) %in) {
+; CHECK-LABEL: define void @i32x2_i32x2_gap_i32x2(
+; CHECK-SAME: ptr addrspace(1) [[IN:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[VEC01:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[VEC12:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[GAPFILL3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT:    [[VEC34:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 6, i32 7>
+; CHECK-NEXT:    ret void
+;
+  %vec0 = load <2 x i32>, ptr addrspace(1) %in, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 8
+  %vec1 = load <2 x i32>, ptr addrspace(1) %getElem1, align 8
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 24
+  %vec3 = load <2 x i32>, ptr addrspace(1) %getElem3, align 8
+  ret void
+}
+
+; This gap is filled but then eventually discarded because the total size
+; of the vector is larger than the target supports.
+define void @i64x2_gap_i64x2_i64x2(ptr addrspace(1) %in) {
+; CHECK-LABEL: define void @i64x2_gap_i64x2_i64x2(
+; CHECK-SAME: ptr addrspace(1) [[IN:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[VEC0:%.*]] = load <2 x i64>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[GETELEM3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[IN]], i32 32
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr addrspace(1) [[GETELEM3]], align 32
+; CHECK-NEXT:    [[VEC31:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[VEC12:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    ret void
+;
+  %vec0 = load <2 x i64>, ptr addrspace(1) %in, align 32
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 32
+  %vec3 = load <2 x i64>, ptr addrspace(1) %getElem3, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 48
+  %vec1 = load <2 x i64>, ptr addrspace(1) %getElem1, align 16
+  ret void
+}
+
+; This gap is filled but then eventually discarded because the total size
+; of the vector is larger than the target supports.
+define void @i64x2_i64x2_gap_i64x2(ptr addrspace(1) %in) {
+; CHECK-LABEL: define void @i64x2_i64x2_gap_i64x2(
+; CHECK-SAME: ptr addrspace(1) [[IN:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i64>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[VEC01:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[VEC32:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[GETELEM1:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[IN]], i32 48
+; CHECK-NEXT:    [[VEC1:%.*]] = load <2 x i64>, ptr addrspace(1) [[GETELEM1]], align 8
+; CHECK-NEXT:    ret void
+;
+  %vec0 = load <2 x i64>, ptr addrspace(1) %in, align 32
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 16
+  %vec3 = load <2 x i64>, ptr addrspace(1) %getElem3, align 16
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 48
+  %vec1 = load <2 x i64>, ptr addrspace(1) %getElem1, align 8
+  ret void
+}
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
new file mode 100644
index 0000000000000..82ebffed7f765
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S < %s | FileCheck %s
+
+; Load elements 0, 1, and 3, filling the gap with a generated load of element 2
+define void @test(ptr %ptr) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[LD01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LD12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL3:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LD34:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load i32, ptr %ptr, align 16
+  %gep1 = getelementptr inbounds i8, ptr %ptr, i32 4
+  %ld1 = load i32, ptr %gep1, align 4
+  %gep3 = getelementptr inbounds i8, ptr %ptr, i32 12
+  %ld3 = load i32, ptr %gep3, align 4
+  ret void
+}
+
+; Load elements 0, 2, and 3, filling the gap with a generated load of element 1
+define void @test2(ptr %ptr) {
+; CHECK-LABEL: define void @test2(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[LD01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[GAPFILL2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[LD23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LD34:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load i32, ptr %ptr, align 16
+  %gep2 = getelementptr inbounds i8, ptr %ptr, i32 8
+  %ld2 = load i32, ptr %gep2, align 4
+  %gep3 = getelementptr inbounds i8, ptr %ptr, i32 12
+  %ld3 = load i32, ptr %gep3, align 4
+  ret void
+}
+
+; This gap can be filled, but the types are too large to do a v4 load,
+; So we should end up with a v2 load and a single scalar load
+define void @test3(ptr %ptr) {
+; CHECK-LABEL: define void @test3(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[LD01:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LD12:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 24
+; CHECK-NEXT:    [[LD3:%.*]] = load i64, ptr [[GEP3]], align 4
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load i64, ptr %ptr, align 16
+  %gep1 = getelementptr inbounds i8, ptr %ptr, i32 8
+  %ld1 = load i64, ptr %gep1, align 4
+  %gep3 = getelementptr inbounds i8, ptr %ptr, i32 24
+  %ld3 = load i64, ptr %gep3, align 4
+  ret void
+}
+
+; This gap can be filled, but the types are too large to do a v4 load,
+; So we should end up with a v2 load and a single scalar load
+define void @test4(ptr %ptr) {
+; CHECK-LABEL: define void @test4(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[LD0:%.*]] = load i64, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[GEP2]], align 16
+; CHECK-NEXT:    [[LD21:%.*]] = extractelement <2 x i64> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LD32:%.*]] = extractelement <2 x i64> [[TMP1]], i32 1
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load i64, ptr %ptr, align 16
+  %gep2 = getelementptr inbounds i8, ptr %ptr, i32 16
+  %ld2 = load i64, ptr %gep2, align 16
+  %gep3 = getelementptr inbounds i8, ptr %ptr, i32 24
+  %ld3 = load i64, ptr %gep3, align 4
+  ret void
+}
+
+; Load elements 0 and 3, filling the gap with a generated load of element 1 and 2
+define void @test5(ptr %ptr) {
+; CHECK-LABEL: define void @test5(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load i32, ptr %ptr, align 16
+  %gep3 = getelementptr inbounds i8, ptr %ptr, i32 12
+  %ld3 = load i32, ptr %gep3, align 4
+  ret void
+}
+
+; Load elements 0, 1, 3, 4, 6, and 7, filling gaps at elements 2 and 5.
+define void @test6(ptr %ptr) {
+; CHECK-LABEL: define void @test6(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LD14:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL5:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[GEP4]], align 16
+; CHECK-NEXT:    [[LD47:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
+; CHECK-NEXT:    [[GAPFILL28:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
+; CHECK-NEXT:    [[LD69:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
+; CHECK-NEXT:    [[LD710:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load i32, ptr %ptr, align 16
+  %gep1 = getelementptr inbounds i8, ptr %ptr, i32 4
+  %ld1 = load i32, ptr %gep1, align 4
+  %gep3 = getelementptr inbounds i8, ptr %ptr, i32 12
+  %ld3 = load i32, ptr %gep3, align 4
+
+  %gep4 = getelementptr inbounds i8, ptr %ptr, i32 16
+  %ld4 = load i32, ptr %gep4, align 16
+  %gep6 = getelementptr inbounds i8, ptr %ptr, i32 24
+  %ld6 = load i32, ptr %gep6, align 4
+  %gep7 = getelementptr inbounds i8, ptr %ptr, i32 28
+  %ld7 = load i32, ptr %gep7, align 4
+  ret void
+}
+
+; Load elements 0, 1, 3, 4 and 7, elements 2, 5, and 6 will be filled
+define void @test7(ptr %ptr) {
+; CHECK-LABEL: define void @test7(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[LD05:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LD16:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL7:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LD38:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[GEP4]], align 16
+; CHECK-NEXT:    [[LD49:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
+; CHECK-NEXT:    [[GAPFILL210:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
+; CHECK-NEXT:    [[GAPFILL411:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
+; CHECK-NEXT:    [[LD712:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load i32, ptr %ptr, align 16
+  %gep1 = getelementptr inbounds i8, ptr %ptr, i32 4
+  %ld1 = load i32, ptr %gep1, align 4
+  %gep3 = getelementptr inbounds i8, ptr %ptr, i32 12
+  %ld3 = load i32, ptr %gep3, align 4
+
+  %gep4 = getelementptr inbounds i8, ptr %ptr, i32 16
+  %ld4 = load i32, ptr %gep4, align 16
+  %gep7 = getelementptr inbounds i8, ptr %ptr, i32 28
+  %ld7 = load i32, ptr %gep7, align 4
+  ret void
+}
+
+; Load elements 0, 1, 3, 5, 6, and 7. Elements 2 and 4 will be filled.
+; Element 4 will be created and well-aligned because of its
+; distance from the first load.
+define void @test8(ptr %ptr) {
+; CHECK-LABEL: define void @test8(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LD14:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL5:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 12
+; CHECK-NEXT:    [[GAPFILLGEP1:%.*]] = getelementptr i8, ptr [[GEP3]], i64 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[GAPFILLGEP1]], align 16
+; CHECK-NEXT:    [[GAPFILL27:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
+; CHECK-NEXT:    [[LD58:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
+; CHECK-NEXT:    [[LD69:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
+; CHECK-NEXT:    [[LD710:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load i32, ptr %ptr, align 16
+  %gep1 = getelementptr inbounds i8, ptr %ptr, i32 4
+  %ld1 = load i32, ptr %gep1, align 4
+  %gep3 = getelementptr inbounds i8, ptr %ptr, i32 12
+  %ld3 = load i32, ptr %gep3, align 4
+
+  %gep5 = getelementptr inbounds i8, ptr %ptr, i32 20
+  %ld5 = load i32, ptr %gep5, align 16
+  %gep6 = getelementptr inbounds i8, ptr %ptr, i32 24
+  %ld6 = load i32, ptr %gep6, align 4
+  %gep7 = getelementptr inbounds i8, ptr %ptr, i32 28
+  %ld7 = load i32, ptr %gep7, align 4
+  ret void
+}
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll
new file mode 100644
index 0000000000000..1346bd0a3fc26
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll
@@ -0,0 +1,541 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=infer-alignment,load-store-vectorizer -mcpu=sm_100 -mattr=+ptx88 -S -o - %s | FileCheck %s
+
+; POSITIVE TESTS
+
+; store elements 0, 1, and 3, filling the gap with a generated store of element 2
+define void @singleGap(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @singleGap(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 2, i64 poison, i64 4>, ptr addrspace(1) [[OUT]], i32 32, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    ret void
+;
+  store i64 1, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i64 2, ptr addrspace(1) %getElem1, align 8
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store i64 4, ptr addrspace(1) %getElem3, align 8
+  ret void
+}
+
+; store elements 0, 1, and 3, filling the gap with a generated store of element 2
+define void @singleGapDouble(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @singleGapDouble(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    call void @llvm.masked.store.v4f64.p1(<4 x double> <double 1.000000e+00, double 2.000000e+00, double poison, double 4.000000e+00>, ptr addrspace(1) [[OUT]], i32 32, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    ret void
+;
+  store double 1.0, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store double 2.0, ptr addrspace(1) %getElem1, align 8
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store double 4.0, ptr addrspace(1) %getElem3, align 8
+  ret void
+}
+
+; store elements 0, 3, filling the gaps with generated stores of elements 1 and 2
+define void @multipleGaps(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @multipleGaps(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 poison, i64 poison, i64 4>, ptr addrspace(1) [[OUT]], i32 32, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
+; CHECK-NEXT:    ret void
+;
+  store i64 1, ptr addrspace(1) %out, align 32
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store i64 4, ptr addrspace(1) %getElem3, align 8
+  ret void
+}
+
+; store elements 0, 3, 4, 7, filling the gaps with generated stores of elements 1, 2, 5, 6
+define void @multipleGaps8xi32(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @multipleGaps8xi32(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 poison, i32 poison, i32 2, i32 4, i32 poison, i32 poison, i32 8>, ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>)
+; CHECK-NEXT:    ret void
+;
+  store i32 1, ptr addrspace(1) %out, align 32
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 12
+  store i32 2, ptr addrspace(1) %getElem3, align 4
+  %getElem4 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 16
+  store i32 4, ptr addrspace(1) %getElem4, align 4
+  %getElem7 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 28
+  store i32 8, ptr addrspace(1) %getElem7, align 4
+  ret void
+}
+
+; store elements 0, 1, 2, 3, 5, 6, 7, filling the gap with a generated store of element 4,
+; resulting in two 4xi64 stores with the second one led by a gap filled store.
+define void @singleGapLongerChain(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @singleGapLongerChain(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[GETELEM3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 24
+; CHECK-NEXT:    store <4 x i64> <i64 1, i64 2, i64 3, i64 4>, ptr addrspace(1) [[OUT]], align 32
+; CHECK-NEXT:    [[GAPFILLGEP:%.*]] = getelementptr i8, ptr addrspace(1) [[GETELEM3]], i64 8
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 poison, i64 6, i64 7, i64 8>, ptr addrspace(1) [[GAPFILLGEP]], i32 32, <4 x i1> <i1 false, i1 true, i1 true, i1 true>)
+; CHECK-NEXT:    ret void
+;
+  store i64 1, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i64 2, ptr addrspace(1) %getElem1, align 8
+  %getElem2 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 16
+  store i64 3, ptr addrspace(1) %getElem2, align 8
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store i64 4, ptr addrspace(1) %getElem3, align 8
+  %getElem5 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 40
+  store i64 6, ptr addrspace(1) %getElem5, align 8
+  %getElem6 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 48
+  store i64 7, ptr addrspace(1) %getElem6, align 8
+  %getElem7 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 56
+  store i64 8, ptr addrspace(1) %getElem7, align 8
+  ret void
+}
+
+; store elements 0, 1, and 3, filling the gap with a generated store of element 2
+define void @vectorElements(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @vectorElements(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 poison, i32 poison, i32 7, i32 8>, ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true>)
+; CHECK-NEXT:    ret void
+;
+  store <2 x i32> <i32 1, i32 2>, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store <2 x i32> <i32 3, i32 4>, ptr addrspace(1) %getElem1, align 8
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store <2 x i32> <i32 7, i32 8>, ptr addrspace(1) %getElem3, align 8
+  ret void
+}
+
+; store elements 0, 1, 3. 2 should not end up filled because 8xi64 is not legal.
+define void @vectorElements64(ptr addrspace(1) %in) {
+; CHECK-LABEL: define void @vectorElements64(
+; CHECK-SAME: ptr addrspace(1) [[IN:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    store <4 x i64> <i64 1, i64 2, i64 3, i64 4>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[GETELEM1:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[IN]], i32 48
+; CHECK-NEXT:    store <2 x i64> <i64 7, i64 8>, ptr addrspace(1) [[GETELEM1]], align 16
+; CHECK-NEXT:    ret void
+;
+  store <2 x i64> <i64 1, i64 2>, ptr addrspace(1) %in, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 16
+  store <2 x i64> <i64 3, i64 4>, ptr addrspace(1) %getElem1, align 16
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 48
+  store <2 x i64> <i64 7, i64 8>, ptr addrspace(1) %getElem3, align 16
+  ret void
+}
+
+; store elements 0, 1, 2, extending element 3
+define void @extendStores(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @extendStores(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 2, i64 3, i64 poison>, ptr addrspace(1) [[OUT]], i32 32, <4 x i1> <i1 true, i1 true, i1 true, i1 false>)
+; CHECK-NEXT:    ret void
+;
+  store i64 1, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i64 2, ptr addrspace(1) %getElem1, align 8
+  %getElem2 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 16
+  store i64 3, ptr addrspace(1) %getElem2, align 8
+  ret void
+}
+
+; store elements 0, 1, 2, 3, 4 extending elements 5, 6, 7
+define void @extendStores8xi32(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @extendStores8xi32(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 poison, i32 poison, i32 poison>, ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>)
+; CHECK-NEXT:    ret void
+;
+  store i32 1, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 4
+  store i32 2, ptr addrspace(1) %getElem1, align 4
+  %getElem2 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i32 3, ptr addrspace(1) %getElem2, align 4
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 12
+  store i32 4, ptr addrspace(1) %getElem3, align 4
+  %getElem4 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 16
+  store i32 5, ptr addrspace(1) %getElem4, align 4
+  ret void
+}
+
+; store elements 0, 1, 2, 3, 4 extending elements 5, 6, 7
+define void @extendStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @extendStoresFromLoads8xi32(
+; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[LOAD05:%.*]] = extractelement <8 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LOAD16:%.*]] = extractelement <8 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[LOAD27:%.*]] = extractelement <8 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LOAD38:%.*]] = extractelement <8 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    [[LOAD49:%.*]] = extractelement <8 x i32> [[TMP1]], i32 4
+; CHECK-NEXT:    [[EXTENDLOAD10:%.*]] = extractelement <8 x i32> [[TMP1]], i32 5
+; CHECK-NEXT:    [[EXTENDLOAD211:%.*]] = extractelement <8 x i32> [[TMP1]], i32 6
+; CHECK-NEXT:    [[EXTENDLOAD412:%.*]] = extractelement <8 x i32> [[TMP1]], i32 7
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x i32> poison, i32 [[LOAD05]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <8 x i32> [[TMP2]], i32 [[LOAD16]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <8 x i32> [[TMP3]], i32 [[LOAD27]], i32 2
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <8 x i32> [[TMP4]], i32 [[LOAD38]], i32 3
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <8 x i32> [[TMP5]], i32 [[LOAD49]], i32 4
+; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 poison, i32 5
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <8 x i32> [[TMP7]], i32 poison, i32 6
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <8 x i32> [[TMP8]], i32 poison, i32 7
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP9]], ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>)
+; CHECK-NEXT:    ret void
+;
+  %load0 = load i32, ptr addrspace(1) %in, align 32
+  %loadGetElem1 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 4
+  %load1 = load i32, ptr addrspace(1) %loadGetElem1, align 4
+  %loadGetElem2 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 8
+  %load2 = load i32, ptr addrspace(1) %loadGetElem2, align 4
+  %loadGetElem3 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 12
+  %load3 = load i32, ptr addrspace(1) %loadGetElem3, align 4
+  %loadGetElem4 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 16
+  %load4 = load i32, ptr addrspace(1) %loadGetElem4, align 4
+
+  store i32 %load0, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 4
+  store i32 %load1, ptr addrspace(1) %getElem1, align 4
+  %getElem2 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i32 %load2, ptr addrspace(1) %getElem2, align 4
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 12
+  store i32 %load3, ptr addrspace(1) %getElem3, align 4
+  %getElem4 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 16
+  store i32 %load4, ptr addrspace(1) %getElem4, align 4
+  ret void
+}
+
+; store elements 0, 1, 3, 4, gap fill element 2, extend elements 5, 6, 7
+define void @extendAndGapFillStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @extendAndGapFillStoresFromLoads8xi32(
+; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[LOAD05:%.*]] = extractelement <8 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LOAD16:%.*]] = extractelement <8 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[LOAD27:%.*]] = extractelement <8 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LOAD38:%.*]] = extractelement <8 x i32> [[TMP1]], i32 3
+; CHECK-NEXT:    [[LOAD49:%.*]] = extractelement <8 x i32> [[TMP1]], i32 4
+; CHECK-NEXT:    [[EXTENDLOAD10:%.*]] = extractelement <8 x i32> [[TMP1]], i32 5
+; CHECK-NEXT:    [[EXTENDLOAD211:%.*]] = extractelement <8 x i32> [[TMP1]], i32 6
+; CHECK-NEXT:    [[EXTENDLOAD412:%.*]] = extractelement <8 x i32> [[TMP1]], i32 7
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <8 x i32> poison, i32 [[LOAD05]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <8 x i32> [[TMP2]], i32 [[LOAD16]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <8 x i32> [[TMP3]], i32 poison, i32 2
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <8 x i32> [[TMP4]], i32 [[LOAD38]], i32 3
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <8 x i32> [[TMP5]], i32 [[LOAD49]], i32 4
+; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 poison, i32 5
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <8 x i32> [[TMP7]], i32 poison, i32 6
+; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <8 x i32> [[TMP8]], i32 poison, i32 7
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP9]], ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false>)
+; CHECK-NEXT:    ret void
+;
+  %load0 = load i32, ptr addrspace(1) %in, align 32
+  %loadGetElem1 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 4
+  %load1 = load i32, ptr addrspace(1) %loadGetElem1, align 4
+  %loadGetElem3 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 12
+  %load3 = load i32, ptr addrspace(1) %loadGetElem3, align 4
+  %loadGetElem4 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 16
+  %load4 = load i32, ptr addrspace(1) %loadGetElem4, align 4
+
+  store i32 %load0, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 4
+  store i32 %load1, ptr addrspace(1) %getElem1, align 4
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 12
+  store i32 %load3, ptr addrspace(1) %getElem3, align 4
+  %getElem4 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 16
+  store i32 %load4, ptr addrspace(1) %getElem4, align 4
+  ret void
+}
+
+
+; NEGATIVE TESTS
+
+; Wrong address space, no gap filling
+define void @singleGapWrongAddrSpace(ptr addrspace(3) %out) {
+; CHECK-LABEL: define void @singleGapWrongAddrSpace(
+; CHECK-SAME: ptr addrspace(3) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    store <2 x i64> <i64 1, i64 2>, ptr addrspace(3) [[OUT]], align 32
+; CHECK-NEXT:    [[GETELEM3:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[OUT]], i32 24
+; CHECK-NEXT:    store i64 4, ptr addrspace(3) [[GETELEM3]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i64 1, ptr addrspace(3) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(3) %out, i32 8
+  store i64 2, ptr addrspace(3) %getElem1, align 8
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(3) %out, i32 24
+  store i64 4, ptr addrspace(3) %getElem3, align 8
+  ret void
+}
+
+; Not enough alignment for masked store, but we still vectorize the smaller vector
+define void @singleGapMisaligned(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @singleGapMisaligned(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    store <2 x i64> <i64 1, i64 2>, ptr addrspace(1) [[OUT]], align 16
+; CHECK-NEXT:    [[GETELEM3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 24
+; CHECK-NEXT:    store i64 4, ptr addrspace(1) [[GETELEM3]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i64 1, ptr addrspace(1) %out, align 16
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i64 2, ptr addrspace(1) %getElem1, align 8
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store i64 4, ptr addrspace(1) %getElem3, align 8
+  ret void
+}
+
+; Not enough bytes to meet the minimum masked store size for the target
+define void @singleGap4xi32(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @singleGap4xi32(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    store i32 1, ptr addrspace(1) [[OUT]], align 32
+; CHECK-NEXT:    [[GETELEM2:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 8
+; CHECK-NEXT:    store <2 x i32> <i32 3, i32 4>, ptr addrspace(1) [[GETELEM2]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i32 1, ptr addrspace(1) %out, align 32
+  %getElem2 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i32 3, ptr addrspace(1) %getElem2, align 4
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 12
+  store i32 4, ptr addrspace(1) %getElem3, align 4
+  ret void
+}
+
+; store elements 0, 1, 2, 5, 6, 7. 3 and 4 don't get filled because the heuristic
+; only fills 2-element gaps that are in the middle of a multiple of 4
+define void @gapInWrongLocation(ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @gapInWrongLocation(
+; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    store <2 x i32> <i32 1, i32 2>, ptr addrspace(1) [[OUT]], align 32
+; CHECK-NEXT:    [[GETELEM2:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 8
+; CHECK-NEXT:    store i32 3, ptr addrspace(1) [[GETELEM2]], align 8
+; CHECK-NEXT:    [[GETELEM5:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 20
+; CHECK-NEXT:    store i32 5, ptr addrspace(1) [[GETELEM5]], align 4
+; CHECK-NEXT:    [[GETELEM6:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 24
+; CHECK-NEXT:    store <2 x i32> <i32 6, i32 7>, ptr addrspace(1) [[GETELEM6]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i32 1, ptr addrspace(1) %out, align 32
+  %getElem1 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 4
+  store i32 2, ptr addrspace(1) %getElem1, align 4
+  %getElem2 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i32 3, ptr addrspace(1) %getElem2, align 4
+  %getElem5 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 20
+  store i32 5, ptr addrspace(1) %getElem5, align 4
+  %getElem6 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store i32 6, ptr addrspace(1) %getElem6, align 4
+  %getElem7 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 28
+  store i32 7, ptr addrspace(1) %getElem7, align 4
+  ret void
+}
+
+; This test has 32-bytes of i8s with a 2-element gap in the middle of each 4-byte chunk.
+; i8s are not supported by masked stores on the target, so the stores will not be vectorized.
+; The loads, on the other hand, get gap filled.
+define void @cantMaski8(ptr addrspace(1) %in, ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @cantMaski8(
+; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[LOAD031:%.*]] = extractelement <32 x i8> [[TMP1]], i32 0
+; CHECK-NEXT:    [[GAPFILL32:%.*]] = extractelement <32 x i8> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL233:%.*]] = extractelement <32 x i8> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LOAD334:%.*]] = extractelement <32 x i8> [[TMP1]], i32 3
+; CHECK-NEXT:    [[LOAD435:%.*]] = extractelement <32 x i8> [[TMP1]], i32 4
+; CHECK-NEXT:    [[GAPFILL436:%.*]] = extractelement <32 x i8> [[TMP1]], i32 5
+; CHECK-NEXT:    [[GAPFILL637:%.*]] = extractelement <32 x i8> [[TMP1]], i32 6
+; CHECK-NEXT:    [[LOAD738:%.*]] = extractelement <32 x i8> [[TMP1]], i32 7
+; CHECK-NEXT:    [[LOAD839:%.*]] = extractelement <32 x i8> [[TMP1]], i32 8
+; CHECK-NEXT:    [[GAPFILL840:%.*]] = extractelement <32 x i8> [[TMP1]], i32 9
+; CHECK-NEXT:    [[GAPFILL1041:%.*]] = extractelement <32 x i8> [[TMP1]], i32 10
+; CHECK-NEXT:    [[LOAD1142:%.*]] = extractelement <32 x i8> [[TMP1]], i32 11
+; CHECK-NEXT:    [[LOAD1243:%.*]] = extractelement <32 x i8> [[TMP1]], i32 12
+; CHECK-NEXT:    [[GAPFILL1244:%.*]] = extractelement <32 x i8> [[TMP1]], i32 13
+; CHECK-NEXT:    [[GAPFILL1445:%.*]] = extractelement <32 x i8> [[TMP1]], i32 14
+; CHECK-NEXT:    [[LOAD1546:%.*]] = extractelement <32 x i8> [[TMP1]], i32 15
+; CHECK-NEXT:    [[LOAD1647:%.*]] = extractelement <32 x i8> [[TMP1]], i32 16
+; CHECK-NEXT:    [[GAPFILL1648:%.*]] = extractelement <32 x i8> [[TMP1]], i32 17
+; CHECK-NEXT:    [[GAPFILL1849:%.*]] = extractelement <32 x i8> [[TMP1]], i32 18
+; CHECK-NEXT:    [[LOAD1950:%.*]] = extractelement <32 x i8> [[TMP1]], i32 19
+; CHECK-NEXT:    [[LOAD2051:%.*]] = extractelement <32 x i8> [[TMP1]], i32 20
+; CHECK-NEXT:    [[GAPFILL2052:%.*]] = extractelement <32 x i8> [[TMP1]], i32 21
+; CHECK-NEXT:    [[GAPFILL2253:%.*]] = extractelement <32 x i8> [[TMP1]], i32 22
+; CHECK-NEXT:    [[LOAD2354:%.*]] = extractelement <32 x i8> [[TMP1]], i32 23
+; CHECK-NEXT:    [[LOAD2455:%.*]] = extractelement <32 x i8> [[TMP1]], i32 24
+; CHECK-NEXT:    [[GAPFILL2456:%.*]] = extractelement <32 x i8> [[TMP1]], i32 25
+; CHECK-NEXT:    [[GAPFILL2657:%.*]] = extractelement <32 x i8> [[TMP1]], i32 26
+; CHECK-NEXT:    [[LOAD2758:%.*]] = extractelement <32 x i8> [[TMP1]], i32 27
+; CHECK-NEXT:    [[LOAD2859:%.*]] = extractelement <32 x i8> [[TMP1]], i32 28
+; CHECK-NEXT:    [[GAPFILL2860:%.*]] = extractelement <32 x i8> [[TMP1]], i32 29
+; CHECK-NEXT:    [[GAPFILL3061:%.*]] = extractelement <32 x i8> [[TMP1]], i32 30
+; CHECK-NEXT:    [[LOAD3162:%.*]] = extractelement <32 x i8> [[TMP1]], i32 31
+; CHECK-NEXT:    store i8 [[LOAD031]], ptr addrspace(1) [[OUT]], align 32
+; CHECK-NEXT:    [[OUTELEM3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 3
+; CHECK-NEXT:    store i8 [[LOAD334]], ptr addrspace(1) [[OUTELEM3]], align 1
+; CHECK-NEXT:    [[OUTELEM4:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 4
+; CHECK-NEXT:    store i8 [[LOAD435]], ptr addrspace(1) [[OUTELEM4]], align 4
+; CHECK-NEXT:    [[OUTELEM7:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 7
+; CHECK-NEXT:    store i8 [[LOAD738]], ptr addrspace(1) [[OUTELEM7]], align 1
+; CHECK-NEXT:    [[OUTELEM8:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 8
+; CHECK-NEXT:    store i8 [[LOAD839]], ptr addrspace(1) [[OUTELEM8]], align 8
+; CHECK-NEXT:    [[OUTELEM11:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 11
+; CHECK-NEXT:    store i8 [[LOAD1142]], ptr addrspace(1) [[OUTELEM11]], align 1
+; CHECK-NEXT:    [[OUTELEM12:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 12
+; CHECK-NEXT:    store i8 [[LOAD1243]], ptr addrspace(1) [[OUTELEM12]], align 4
+; CHECK-NEXT:    [[OUTELEM15:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 15
+; CHECK-NEXT:    store i8 [[LOAD1546]], ptr addrspace(1) [[OUTELEM15]], align 1
+; CHECK-NEXT:    [[OUTELEM16:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 16
+; CHECK-NEXT:    store i8 [[LOAD1647]], ptr addrspace(1) [[OUTELEM16]], align 16
+; CHECK-NEXT:    [[OUTELEM19:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 19
+; CHECK-NEXT:    store i8 [[LOAD1950]], ptr addrspace(1) [[OUTELEM19]], align 1
+; CHECK-NEXT:    [[OUTELEM20:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 20
+; CHECK-NEXT:    store i8 [[LOAD2051]], ptr addrspace(1) [[OUTELEM20]], align 4
+; CHECK-NEXT:    [[OUTELEM23:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 23
+; CHECK-NEXT:    store i8 [[LOAD2354]], ptr addrspace(1) [[OUTELEM23]], align 1
+; CHECK-NEXT:    [[OUTELEM24:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 24
+; CHECK-NEXT:    store i8 [[LOAD2455]], ptr addrspace(1) [[OUTELEM24]], align 8
+; CHECK-NEXT:    [[OUTELEM27:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 27
+; CHECK-NEXT:    store i8 [[LOAD2758]], ptr addrspace(1) [[OUTELEM27]], align 1
+; CHECK-NEXT:    [[OUTELEM28:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 28
+; CHECK-NEXT:    store i8 [[LOAD2859]], ptr addrspace(1) [[OUTELEM28]], align 4
+; CHECK-NEXT:    [[OUTELEM31:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 31
+; CHECK-NEXT:    store i8 [[LOAD3162]], ptr addrspace(1) [[OUTELEM31]], align 1
+; CHECK-NEXT:    ret void
+;
+  %load0 = load i8, ptr addrspace(1) %in, align 32
+  %getElem3 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 3
+  %load3 = load i8, ptr addrspace(1) %getElem3, align 1
+  %getElem4 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 4
+  %load4 = load i8, ptr addrspace(1) %getElem4, align 4
+  %getElem7 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 7
+  %load7 = load i8, ptr addrspace(1) %getElem7, align 1
+  %getElem8 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 8
+  %load8 = load i8, ptr addrspace(1) %getElem8, align 8
+  %getElem11 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 11
+  %load11 = load i8, ptr addrspace(1) %getElem11, align 1
+  %getElem12 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 12
+  %load12 = load i8, ptr addrspace(1) %getElem12, align 4
+  %getElem15 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 15
+  %load15 = load i8, ptr addrspace(1) %getElem15, align 1
+  %getElem16 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 16
+  %load16 = load i8, ptr addrspace(1) %getElem16, align 16
+  %getElem19 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 19
+  %load19 = load i8, ptr addrspace(1) %getElem19, align 1
+  %getElem20 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 20
+  %load20 = load i8, ptr addrspace(1) %getElem20, align 4
+  %getElem23 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 23
+  %load23 = load i8, ptr addrspace(1) %getElem23, align 1
+  %getElem24 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 24
+  %load24 = load i8, ptr addrspace(1) %getElem24, align 8
+  %getElem27 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 27
+  %load27 = load i8, ptr addrspace(1) %getElem27, align 1
+  %getElem28 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 28
+  %load28 = load i8, ptr addrspace(1) %getElem28, align 4
+  %getElem31 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 31
+  %load31 = load i8, ptr addrspace(1) %getElem31, align 1
+
+  store i8 %load0, ptr addrspace(1) %out, align 32
+  %outElem3 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 3
+  store i8 %load3, ptr addrspace(1) %outElem3, align 1
+  %outElem4 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 4
+  store i8 %load4, ptr addrspace(1) %outElem4, align 4
+  %outElem7 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 7
+  store i8 %load7, ptr addrspace(1) %outElem7, align 1
+  %outElem8 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i8 %load8, ptr addrspace(1) %outElem8, align 8
+  %outElem11 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 11
+  store i8 %load11, ptr addrspace(1) %outElem11, align 1
+  %outElem12 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 12
+  store i8 %load12, ptr addrspace(1) %outElem12, align 4
+  %outElem15 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 15
+  store i8 %load15, ptr addrspace(1) %outElem15, align 1
+  %outElem16 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 16
+  store i8 %load16, ptr addrspace(1) %outElem16, align 16
+  %outElem19 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 19
+  store i8 %load19, ptr addrspace(1) %outElem19, align 1
+  %outElem20 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 20
+  store i8 %load20, ptr addrspace(1) %outElem20, align 4
+  %outElem23 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 23
+  store i8 %load23, ptr addrspace(1) %outElem23, align 1
+  %outElem24 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store i8 %load24, ptr addrspace(1) %outElem24, align 8
+  %outElem27 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 27
+  store i8 %load27, ptr addrspace(1) %outElem27, align 1
+  %outElem28 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 28
+  store i8 %load28, ptr addrspace(1) %outElem28, align 4
+  %outElem31 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 31
+  store i8 %load31, ptr addrspace(1) %outElem31, align 1
+
+  ret void
+}
+
+; This test has 32-bytes of i16s with a 2-element gap in the middle of each 4-element chunk.
+; i16s are not supported by masked stores on the target, so the stores will not be vectorized.
+; The loads, on the other hand, get gap filled.
+define void @cantMaski16(ptr addrspace(1) %in, ptr addrspace(1) %out) {
+; CHECK-LABEL: define void @cantMaski16(
+; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[LOAD015:%.*]] = extractelement <16 x i16> [[TMP1]], i32 0
+; CHECK-NEXT:    [[GAPFILL16:%.*]] = extractelement <16 x i16> [[TMP1]], i32 1
+; CHECK-NEXT:    [[GAPFILL217:%.*]] = extractelement <16 x i16> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LOAD318:%.*]] = extractelement <16 x i16> [[TMP1]], i32 3
+; CHECK-NEXT:    [[LOAD419:%.*]] = extractelement <16 x i16> [[TMP1]], i32 4
+; CHECK-NEXT:    [[GAPFILL420:%.*]] = extractelement <16 x i16> [[TMP1]], i32 5
+; CHECK-NEXT:    [[GAPFILL621:%.*]] = extractelement <16 x i16> [[TMP1]], i32 6
+; CHECK-NEXT:    [[LOAD722:%.*]] = extractelement <16 x i16> [[TMP1]], i32 7
+; CHECK-NEXT:    [[LOAD823:%.*]] = extractelement <16 x i16> [[TMP1]], i32 8
+; CHECK-NEXT:    [[GAPFILL824:%.*]] = extractelement <16 x i16> [[TMP1]], i32 9
+; CHECK-NEXT:    [[GAPFILL1025:%.*]] = extractelement <16 x i16> [[TMP1]], i32 10
+; CHECK-NEXT:    [[LOAD1126:%.*]] = extractelement <16 x i16> [[TMP1]], i32 11
+; CHECK-NEXT:    [[LOAD1227:%.*]] = extractelement <16 x i16> [[TMP1]], i32 12
+; CHECK-NEXT:    [[GAPFILL1228:%.*]] = extractelement <16 x i16> [[TMP1]], i32 13
+; CHECK-NEXT:    [[GAPFILL1429:%.*]] = extractelement <16 x i16> [[TMP1]], i32 14
+; CHECK-NEXT:    [[LOAD1530:%.*]] = extractelement <16 x i16> [[TMP1]], i32 15
+; CHECK-NEXT:    store i16 [[LOAD015]], ptr addrspace(1) [[OUT]], align 32
+; CHECK-NEXT:    [[OUTELEM6:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 6
+; CHECK-NEXT:    store i16 [[LOAD318]], ptr addrspace(1) [[OUTELEM6]], align 2
+; CHECK-NEXT:    [[OUTELEM8:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 8
+; CHECK-NEXT:    store i16 [[LOAD419]], ptr addrspace(1) [[OUTELEM8]], align 8
+; CHECK-NEXT:    [[OUTELEM14:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 14
+; CHECK-NEXT:    store i16 [[LOAD722]], ptr addrspace(1) [[OUTELEM14]], align 2
+; CHECK-NEXT:    [[OUTELEM16:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 16
+; CHECK-NEXT:    store i16 [[LOAD823]], ptr addrspace(1) [[OUTELEM16]], align 16
+; CHECK-NEXT:    [[OUTELEM22:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 22
+; CHECK-NEXT:    store i16 [[LOAD1126]], ptr addrspace(1) [[OUTELEM22]], align 2
+; CHECK-NEXT:    [[OUTELEM24:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 24
+; CHECK-NEXT:    store i16 [[LOAD1227]], ptr addrspace(1) [[OUTELEM24]], align 8
+; CHECK-NEXT:    [[OUTELEM30:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 30
+; CHECK-NEXT:    store i16 [[LOAD1530]], ptr addrspace(1) [[OUTELEM30]], align 2
+; CHECK-NEXT:    ret void
+;
+  %load0 = load i16, ptr addrspace(1) %in, align 32
+  %getElem6 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 6
+  %load3 = load i16, ptr addrspace(1) %getElem6, align 2
+  %getElem8 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 8
+  %load4 = load i16, ptr addrspace(1) %getElem8, align 8
+  %getElem14 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 14
+  %load7 = load i16, ptr addrspace(1) %getElem14, align 2
+  %getElem16 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 16
+  %load8 = load i16, ptr addrspace(1) %getElem16, align 16
+  %getElem22 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 22
+  %load11 = load i16, ptr addrspace(1) %getElem22, align 2
+  %getElem24 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 24
+  %load12 = load i16, ptr addrspace(1) %getElem24, align 8
+  %getElem30 = getelementptr inbounds i8, ptr addrspace(1) %in, i32 30
+  %load15 = load i16, ptr addrspace(1) %getElem30, align 2
+
+  store i16 %load0, ptr addrspace(1) %out, align 32
+  %outElem6 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 6
+  store i16 %load3, ptr addrspace(1) %outElem6, align 2
+  %outElem8 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 8
+  store i16 %load4, ptr addrspace(1) %outElem8, align 8
+  %outElem14 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 14
+  store i16 %load7, ptr addrspace(1) %outElem14, align 2
+  %outElem16 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 16
+  store i16 %load8, ptr addrspace(1) %outElem16, align 16
+  %outElem22 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 22
+  store i16 %load11, ptr addrspace(1) %outElem22, align 2
+  %outElem24 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 24
+  store i16 %load12, ptr addrspace(1) %outElem24, align 8
+  %outElem30 = getelementptr inbounds i8, ptr addrspace(1) %out, i32 30
+  store i16 %load15, ptr addrspace(1) %outElem30, align 2
+
+  ret void
+}
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll
index 2d3c289c2a12b..e031daab6d786 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll
@@ -40,8 +40,7 @@ define void @int8x3a4(ptr nocapture align 4 %ptr) {
   ret void
 
 ; CHECK-LABEL: @int8x3a4
-; CHECK: load <2 x i8>
-; CHECK: load i8
+; CHECK: load <4 x i8>
 ; CHECK: store <2 x i8>
 ; CHECK: store i8
 }

>From b147e233b5666f1895edc01018dd29951eb84b3c Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 17 Sep 2025 15:48:02 +0000
Subject: [PATCH 02/24] Clang format

---
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 04f4e92826a52..d452e1609957a 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -930,9 +930,7 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
       }
 
       Chain ExtendingLoadsStores;
-      bool ExtendChain = IsLoadChain
-                             ? ExtendLoads
-                             : ExtendStores;
+      bool ExtendChain = IsLoadChain ? ExtendLoads : ExtendStores;
       if (ExtendChain && NumVecElems < TargetVF && NumVecElems % 2 != 0 &&
           VecElemBits >= 8) {
         // TargetVF may be a lot higher than NumVecElems,
@@ -1047,8 +1045,8 @@ bool Vectorizer::vectorizeChain(Chain &C) {
 
   // If we are left with a two-element chain, and one of the elements is an
   // extra element, we don't want to vectorize
-  if (C.size() == 2 && (ExtraElements.contains(C[0].Inst) ||
-                        ExtraElements.contains(C[1].Inst)))
+  if (C.size() == 2 &&
+      (ExtraElements.contains(C[0].Inst) || ExtraElements.contains(C[1].Inst)))
     return false;
 
   sortChainInOffsetOrder(C);

>From c6d98ba3c172e6a6fb7ebdc6e6da70f39cf5f4e1 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Thu, 18 Sep 2025 15:37:56 +0000
Subject: [PATCH 03/24] Remove cl opts

---
 .../Vectorize/LoadStoreVectorizer.cpp         | 32 ++-----------------
 1 file changed, 3 insertions(+), 29 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index d452e1609957a..b0f7f12b157f3 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -119,29 +119,6 @@ using namespace llvm;
 
 #define DEBUG_TYPE "load-store-vectorizer"
 
-cl::opt<bool>
-    ExtendLoads("vect-extend-loads", cl::Hidden,
-                cl::desc("Load more elements if the target VF is higher "
-                         "than the chain length."),
-                cl::init(true));
-
-cl::opt<bool> ExtendStores(
-    "vect-extend-stores", cl::Hidden,
-    cl::desc("Store more elements if the target VF is higher "
-             "than the chain length and we have access to masked stores."),
-    cl::init(true));
-
-cl::opt<bool> FillLoadGaps(
-    "vect-fill-load-gaps", cl::Hidden,
-    cl::desc("Should Loads be introduced in gaps to enable vectorization."),
-    cl::init(true));
-
-cl::opt<bool>
-    FillStoreGaps("vect-fill-store-gaps", cl::Hidden,
-                  cl::desc("Should Stores be introduced in gaps to enable "
-                           "vectorization into masked stores."),
-                  cl::init(true));
-
 STATISTIC(NumVectorInstructions, "Number of vector accesses generated");
 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized");
 
@@ -689,9 +666,8 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
   //   store for the target. If later on, we don't end up with a chain that
   //   could be vectorized into a legal masked store, the chains with extra
   //   elements will be filtered out in splitChainByAlignment.
-  bool TryFillGaps = isa<LoadInst>(C[0].Inst)
-                         ? (FillLoadGaps && TTI.isLegalToWidenLoads())
-                         : (FillStoreGaps && shouldAttemptMaskedStore(C));
+  bool TryFillGaps = isa<LoadInst>(C[0].Inst) ? TTI.isLegalToWidenLoads()
+                                              : shouldAttemptMaskedStore(C);
 
   unsigned ASPtrBits =
       DL.getIndexSizeInBits(getLoadStoreAddressSpace(C[0].Inst));
@@ -930,9 +906,7 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
       }
 
       Chain ExtendingLoadsStores;
-      bool ExtendChain = IsLoadChain ? ExtendLoads : ExtendStores;
-      if (ExtendChain && NumVecElems < TargetVF && NumVecElems % 2 != 0 &&
-          VecElemBits >= 8) {
+      if (NumVecElems < TargetVF && NumVecElems % 2 != 0 && VecElemBits >= 8) {
         // TargetVF may be a lot higher than NumVecElems,
         // so only extend to the next power of 2.
         assert(VecElemBits % 8 == 0);

>From adeacace7010165267ae689445e3b4ea3367f175 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Thu, 18 Sep 2025 15:58:56 +0000
Subject: [PATCH 04/24] Add context argument to TTI API

---
 llvm/include/llvm/Analysis/TargetTransformInfo.h      | 2 +-
 llvm/include/llvm/Analysis/TargetTransformInfoImpl.h  | 2 +-
 llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h      | 4 +++-
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp | 7 ++++---
 4 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 45355d1732c83..0b861d2e61dc1 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -827,7 +827,7 @@ class TargetTransformInfo {
   /// assuming the result is still well-aligned. For example, converting a load
   /// i32 to a load i64, or vectorizing three continuous load i32s into a load
   /// <4 x i32>.
-  LLVM_ABI bool isLegalToWidenLoads() const;
+  LLVM_ABI bool isLegalToWidenLoads(LLVMContext &Context) const;
 
   /// Return true if the target supports nontemporal store.
   LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const;
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 979d20c2ec299..b370def9ece3e 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -318,7 +318,7 @@ class TargetTransformInfoImplBase {
     return false;
   }
 
-  virtual bool isLegalToWidenLoads() const { return false; }
+  virtual bool isLegalToWidenLoads(LLVMContext &Context) const { return false; }
 
   virtual bool isLegalNTStore(Type *DataType, Align Alignment) const {
     // By default, assume nontemporal memory stores are available for stores
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index 6cc891a2db591..58e5472f4a67c 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -72,7 +72,9 @@ class NVPTXTTIImpl final : public BasicTTIImplBase<NVPTXTTIImpl> {
     return isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, AddrSpace);
   }
 
-  bool isLegalToWidenLoads() const override { return true; };
+  bool isLegalToWidenLoads(LLVMContext &Context) const override {
+    return true;
+  };
 
   // NVPTX has infinite registers of all kinds, but the actual machine doesn't.
   // We conservatively return 1 here which is just enough to enable the
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index b0f7f12b157f3..bfdf18b582e7f 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -666,8 +666,9 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
   //   store for the target. If later on, we don't end up with a chain that
   //   could be vectorized into a legal masked store, the chains with extra
   //   elements will be filtered out in splitChainByAlignment.
-  bool TryFillGaps = isa<LoadInst>(C[0].Inst) ? TTI.isLegalToWidenLoads()
-                                              : shouldAttemptMaskedStore(C);
+  bool TryFillGaps = isa<LoadInst>(C[0].Inst)
+                         ? TTI.isLegalToWidenLoads(F.getContext())
+                         : shouldAttemptMaskedStore(C);
 
   unsigned ASPtrBits =
       DL.getIndexSizeInBits(getLoadStoreAddressSpace(C[0].Inst));
@@ -924,7 +925,7 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         // otherwise we may unnecessary split the chain when the target actually
         // supports non-pow2 VF.
         if (accessIsAllowedAndFast(NewSizeBytes, AS, Alignment, VecElemBits) &&
-            ((IsLoadChain ? TTI.isLegalToWidenLoads()
+            ((IsLoadChain ? TTI.isLegalToWidenLoads(F.getContext())
                           : TTI.isLegalMaskedStore(
                                 FixedVectorType::get(VecElemTy, NewNumVecElems),
                                 Alignment, AS, /*IsMaskConstant=*/true)))) {

>From 47913a39097368d6f72c46cffdd32f95f5036809 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dakersnar at me.com>
Date: Thu, 25 Sep 2025 11:21:06 -0500
Subject: [PATCH 05/24] Update
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp

Co-authored-by: Matt Arsenault <arsenm2 at gmail.com>
---
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index bfdf18b582e7f..bf8dd2580ff80 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -365,7 +365,7 @@ class Vectorizer {
   /// deleted before the end of the pass.
   ChainElem createExtraElementAfter(const ChainElem &PrevElem, APInt Offset,
                                     StringRef Prefix,
-                                    Align Alignment = Align(1));
+                                    Align Alignment = Align());
 
   /// Delete dead GEPs and extra Load/Store instructions created by
   /// createExtraElementAfter

>From b6b87e70fc778d74a852e510a3bf202853245d40 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Thu, 16 Oct 2025 19:08:21 +0000
Subject: [PATCH 06/24] Update tests to test for masked load generation in the
 LSV

---
 llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll |  1 +
 .../CodeGen/NVPTX/param-vectorize-device.ll    |  2 ++
 .../LoadStoreVectorizer/NVPTX/extend-chain.ll  |  4 ++--
 .../NVPTX/gap-fill-cleanup.ll                  |  2 +-
 .../NVPTX/gap-fill-invariant.ll                |  2 +-
 .../NVPTX/gap-fill-vectors.ll                  | 10 +++++-----
 .../LoadStoreVectorizer/NVPTX/gap-fill.ll      | 18 +++++++++---------
 .../LoadStoreVectorizer/NVPTX/masked-store.ll  |  8 ++++----
 .../LoadStoreVectorizer/NVPTX/vectorize_i8.ll  |  2 +-
 9 files changed, 26 insertions(+), 23 deletions(-)

diff --git a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
index 19ec2574e32b4..21b18555371dc 100644
--- a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
+++ b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
@@ -50,6 +50,7 @@ define half @fh(ptr %p) {
 ; ENABLED-EMPTY:
 ; ENABLED-NEXT:  // %bb.0:
 ; ENABLED-NEXT:    ld.param.b64 %rd1, [fh_param_0];
+; ENABLED-NEXT:    .pragma "used_bytes_mask 1023";
 ; ENABLED-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
 ; ENABLED-NEXT:    { .reg .b16 tmp; mov.b32 {%rs1, tmp}, %r3; }
 ; ENABLED-NEXT:    mov.b32 {%rs2, %rs3}, %r2;
diff --git a/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll b/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
index 4870050dd2d43..67dd29b1b6ca6 100644
--- a/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
+++ b/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
@@ -171,6 +171,7 @@ define internal fastcc [3 x i32] @callee_St4x3(ptr nocapture noundef readonly by
   ; CHECK:       .func  (.param .align 16 .b8 func_retval0[12])
   ; CHECK-LABEL: callee_St4x3(
   ; CHECK-NEXT:  .param .align 16 .b8 callee_St4x3_param_0[12]
+  ; CHECK:       .pragma "used_bytes_mask 4095";
   ; CHECK:       ld.param.v4.b32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], %{{.*}}}, [callee_St4x3_param_0];
   ; CHECK-DAG:   st.param.v2.b32 [func_retval0], {[[R1]], [[R2]]};
   ; CHECK-DAG:   st.param.b32    [func_retval0+8], [[R3]];
@@ -393,6 +394,7 @@ define internal fastcc [7 x i32] @callee_St4x7(ptr nocapture noundef readonly by
   ; CHECK-LABEL: callee_St4x7(
   ; CHECK-NEXT:  .param .align 16 .b8 callee_St4x7_param_0[28]
   ; CHECK:       ld.param.v4.b32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x7_param_0];
+  ; CHECK:       .pragma "used_bytes_mask 4095";
   ; CHECK:       ld.param.v4.b32 {[[R5:%r[0-9]+]], [[R6:%r[0-9]+]], [[R7:%r[0-9]+]], %{{.*}}}, [callee_St4x7_param_0+16];
   ; CHECK-DAG:   st.param.v4.b32 [func_retval0],  {[[R1]], [[R2]], [[R3]], [[R4]]};
   ; CHECK-DAG:   st.param.v2.b32 [func_retval0+16], {[[R5]], [[R6]]};
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
index 24d0dea086ba8..2207d5b471d20 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
@@ -10,7 +10,7 @@ define void @load3to4(ptr %p) #0 {
 ; CHECK-LABEL: define void @load3to4(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i32, ptr [[P]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[P_0]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[P_0]], i32 16, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x i32> poison)
 ; CHECK-NEXT:    [[V01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[V12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[V23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
@@ -32,7 +32,7 @@ define void @load5to8(ptr %p) #0 {
 ; CHECK-LABEL: define void @load5to8(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i16, ptr [[P]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[P_0]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[P_0]], i32 16, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i16> poison)
 ; CHECK-NEXT:    [[V05:%.*]] = extractelement <8 x i16> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[V16:%.*]] = extractelement <8 x i16> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[V27:%.*]] = extractelement <8 x i16> [[TMP1]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
index e812f8750fa76..e92ffe8eadbc3 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
@@ -8,7 +8,7 @@
 define void @fillTwoGapsCanVectorize(ptr %in) {
 ; CHECK-LABEL: define void @fillTwoGapsCanVectorize(
 ; CHECK-SAME: ptr [[IN:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[IN]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[IN]], i32 16, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LOAD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
index 6d0dfc677780d..7a28faf8b4810 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
@@ -62,7 +62,7 @@ define i32 @noGapsMissingInvariant(ptr %in) {
 define i32 @twoGaps(ptr %in) {
 ; CHECK-LABEL: define i32 @twoGaps(
 ; CHECK-SAME: ptr [[IN:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[IN]], align 16, !invariant.load [[META0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[IN]], i32 16, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison), !invariant.load [[META0]]
 ; CHECK-NEXT:    [[LOAD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
index fe7123898d450..303ec7e564d49 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
@@ -9,7 +9,7 @@ define void @i1x8_gap_gap_i1x8(ptr %ptr) {
 ; CHECK-LABEL: define void @i1x8_gap_gap_i1x8(
 ; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i1>, ptr [[PTR0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i1> @llvm.masked.load.v32i1.p0(ptr [[PTR0]], i32 4, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i1> poison)
 ; CHECK-NEXT:    [[L03:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
@@ -49,7 +49,7 @@ define void @i8x2_gap_gap_i8x2(ptr %ptr) {
 ; CHECK-LABEL: define void @i8x2_gap_gap_i8x2(
 ; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, ptr [[PTR0]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[PTR0]], i32 8, <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <8 x i8> poison)
 ; CHECK-NEXT:    [[L03:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 4, i32 5>
@@ -89,7 +89,7 @@ define void @i16x2_gap_i16x2_i16x2(ptr %ptr) {
 ; CHECK-LABEL: define void @i16x2_gap_i16x2_i16x2(
 ; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[PTR0]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[PTR0]], i32 16, <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i16> poison)
 ; CHECK-NEXT:    [[L01:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[GAPFILL2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    [[L23:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 4, i32 5>
@@ -111,7 +111,7 @@ define void @i16x2_gap_gap_i16x2(ptr %ptr) {
 ; CHECK-LABEL: define void @i16x2_gap_gap_i16x2(
 ; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr [[PTR0]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[PTR0]], i32 16, <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <8 x i16> poison)
 ; CHECK-NEXT:    [[L03:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 4, i32 5>
@@ -130,7 +130,7 @@ define void @i16x2_gap_gap_i16x2(ptr %ptr) {
 define void @i32x2_i32x2_gap_i32x2(ptr addrspace(1) %in) {
 ; CHECK-LABEL: define void @i32x2_i32x2_gap_i32x2(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) [[IN]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true>, <8 x i32> poison)
 ; CHECK-NEXT:    [[VEC01:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[VEC12:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    [[GAPFILL3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 4, i32 5>
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
index 82ebffed7f765..aae1a5f7266c4 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
@@ -5,7 +5,7 @@
 define void @test(ptr %ptr) {
 ; CHECK-LABEL: define void @test(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LD12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL3:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
@@ -24,7 +24,7 @@ define void @test(ptr %ptr) {
 define void @test2(ptr %ptr) {
 ; CHECK-LABEL: define void @test2(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[LD23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
@@ -83,7 +83,7 @@ define void @test4(ptr %ptr) {
 define void @test5(ptr %ptr) {
 ; CHECK-LABEL: define void @test5(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
@@ -100,13 +100,13 @@ define void @test5(ptr %ptr) {
 define void @test6(ptr %ptr) {
 ; CHECK-LABEL: define void @test6(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LD14:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL5:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
 ; CHECK-NEXT:    [[LD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
 ; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[GEP4]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[GEP4]], i32 16, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD47:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
 ; CHECK-NEXT:    [[GAPFILL28:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
 ; CHECK-NEXT:    [[LD69:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
@@ -132,13 +132,13 @@ define void @test6(ptr %ptr) {
 define void @test7(ptr %ptr) {
 ; CHECK-LABEL: define void @test7(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD05:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LD16:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL7:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
 ; CHECK-NEXT:    [[LD38:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
 ; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[GEP4]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[GEP4]], i32 16, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD49:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
 ; CHECK-NEXT:    [[GAPFILL210:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
 ; CHECK-NEXT:    [[GAPFILL411:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
@@ -164,14 +164,14 @@ define void @test7(ptr %ptr) {
 define void @test8(ptr %ptr) {
 ; CHECK-LABEL: define void @test8(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[PTR]], align 16
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LD14:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL5:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
 ; CHECK-NEXT:    [[LD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
 ; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 12
 ; CHECK-NEXT:    [[GAPFILLGEP1:%.*]] = getelementptr i8, ptr [[GEP3]], i64 4
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr [[GAPFILLGEP1]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[GAPFILLGEP1]], i32 16, <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[GAPFILL27:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
 ; CHECK-NEXT:    [[LD58:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
 ; CHECK-NEXT:    [[LD69:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll
index 1346bd0a3fc26..75d9c4b6e3125 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll
@@ -160,7 +160,7 @@ define void @extendStores8xi32(ptr addrspace(1) %out) {
 define void @extendStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @extendStoresFromLoads8xi32(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) [[IN]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i32> poison)
 ; CHECK-NEXT:    [[LOAD05:%.*]] = extractelement <8 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LOAD16:%.*]] = extractelement <8 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[LOAD27:%.*]] = extractelement <8 x i32> [[TMP1]], i32 2
@@ -206,7 +206,7 @@ define void @extendStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %
 define void @extendAndGapFillStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @extendAndGapFillStoresFromLoads8xi32(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) [[IN]], i32 32, <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i32> poison)
 ; CHECK-NEXT:    [[LOAD05:%.*]] = extractelement <8 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LOAD16:%.*]] = extractelement <8 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[LOAD27:%.*]] = extractelement <8 x i32> [[TMP1]], i32 2
@@ -332,7 +332,7 @@ define void @gapInWrongLocation(ptr addrspace(1) %out) {
 define void @cantMaski8(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @cantMaski8(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <32 x i8>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i8> @llvm.masked.load.v32i8.p1(ptr addrspace(1) [[IN]], i32 32, <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>, <32 x i8> poison)
 ; CHECK-NEXT:    [[LOAD031:%.*]] = extractelement <32 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL32:%.*]] = extractelement <32 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL233:%.*]] = extractelement <32 x i8> [[TMP1]], i32 2
@@ -471,7 +471,7 @@ define void @cantMaski8(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 define void @cantMaski16(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @cantMaski16(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i16>, ptr addrspace(1) [[IN]], align 32
+; CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i16> @llvm.masked.load.v16i16.p1(ptr addrspace(1) [[IN]], i32 32, <16 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>, <16 x i16> poison)
 ; CHECK-NEXT:    [[LOAD015:%.*]] = extractelement <16 x i16> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL16:%.*]] = extractelement <16 x i16> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL217:%.*]] = extractelement <16 x i16> [[TMP1]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll
index e031daab6d786..03c7f31b40d85 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/vectorize_i8.ll
@@ -40,7 +40,7 @@ define void @int8x3a4(ptr nocapture align 4 %ptr) {
   ret void
 
 ; CHECK-LABEL: @int8x3a4
-; CHECK: load <4 x i8>
+; CHECK: call <4 x i8> @llvm.masked.load.v4i8.p0
 ; CHECK: store <2 x i8>
 ; CHECK: store i8
 }

>From 8854d5af219289c945819715eaff92e5d5bd315f Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Thu, 16 Oct 2025 19:48:39 +0000
Subject: [PATCH 07/24] Remove isLegalToWidenLoads API

---
 llvm/include/llvm/Analysis/TargetTransformInfo.h     | 6 ------
 llvm/include/llvm/Analysis/TargetTransformInfoImpl.h | 2 --
 llvm/lib/Analysis/TargetTransformInfo.cpp            | 4 ----
 llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h     | 4 ----
 4 files changed, 16 deletions(-)

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 0b861d2e61dc1..7b7dc1b46dd80 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -823,12 +823,6 @@ class TargetTransformInfo {
   LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment,
                                   unsigned AddressSpace) const;
 
-  /// Return true if it is legal to widen loads beyond their current width,
-  /// assuming the result is still well-aligned. For example, converting a load
-  /// i32 to a load i64, or vectorizing three continuous load i32s into a load
-  /// <4 x i32>.
-  LLVM_ABI bool isLegalToWidenLoads(LLVMContext &Context) const;
-
   /// Return true if the target supports nontemporal store.
   LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const;
   /// Return true if the target supports nontemporal load.
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index b370def9ece3e..4cd607c0d0c8d 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -318,8 +318,6 @@ class TargetTransformInfoImplBase {
     return false;
   }
 
-  virtual bool isLegalToWidenLoads(LLVMContext &Context) const { return false; }
-
   virtual bool isLegalNTStore(Type *DataType, Align Alignment) const {
     // By default, assume nontemporal memory stores are available for stores
     // that are aligned and have a size that is a power of 2.
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index b9be4ca569f73..c47a1c1b23a37 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -477,10 +477,6 @@ bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType, Align Alignment,
   return TTIImpl->isLegalMaskedLoad(DataType, Alignment, AddressSpace);
 }
 
-bool TargetTransformInfo::isLegalToWidenLoads() const {
-  return TTIImpl->isLegalToWidenLoads();
-}
-
 bool TargetTransformInfo::isLegalNTStore(Type *DataType,
                                          Align Alignment) const {
   return TTIImpl->isLegalNTStore(DataType, Alignment);
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index 58e5472f4a67c..78eb751cf3c2e 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -72,10 +72,6 @@ class NVPTXTTIImpl final : public BasicTTIImplBase<NVPTXTTIImpl> {
     return isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, AddrSpace);
   }
 
-  bool isLegalToWidenLoads(LLVMContext &Context) const override {
-    return true;
-  };
-
   // NVPTX has infinite registers of all kinds, but the actual machine doesn't.
   // We conservatively return 1 here which is just enough to enable the
   // vectorizers but disables heuristics based on the number of registers.

>From a1d28278d5bdf93226062027dcaba6ad264c2f03 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Thu, 16 Oct 2025 20:12:01 +0000
Subject: [PATCH 08/24] Change LSV to create masked loads

---
 .../Vectorize/LoadStoreVectorizer.cpp         | 173 ++++++++++--------
 1 file changed, 98 insertions(+), 75 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index bf8dd2580ff80..155d4119ea1fe 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -355,18 +355,23 @@ class Vectorizer {
                               unsigned VecElemBits) const;
 
   /// Before attempting to fill gaps, check if the chain is a candidate for
-  /// a masked store, to save compile time if it is not possible for the address
-  /// space and element type.
-  bool shouldAttemptMaskedStore(const ArrayRef<ChainElem> C) const;
+  /// a masked load/store, to save compile time if it is not possible for the
+  /// address space and element type.
+  bool shouldAttemptMaskedLoadStore(const ArrayRef<ChainElem> C) const;
 
   /// Create a new GEP and a new Load/Store instruction such that the GEP
   /// is pointing at PrevElem + Offset. In the case of stores, store poison.
-  /// Extra elements will either be combined into a vector/masked store or
+  /// Extra elements will either be combined into a masked load/store or
   /// deleted before the end of the pass.
   ChainElem createExtraElementAfter(const ChainElem &PrevElem, APInt Offset,
                                     StringRef Prefix,
                                     Align Alignment = Align());
 
+  /// Create a mask that masks off the extra elements in the chain, to be used
+  /// for the creation of a masked load/store vector.
+  Value *createMaskForExtraElements(const ArrayRef<ChainElem> C, Type *VecTy,
+                                    Align Alignment, unsigned AS);
+
   /// Delete dead GEPs and extra Load/Store instructions created by
   /// createExtraElementAfter
   void deleteExtraElements();
@@ -660,15 +665,11 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
 
   // If the chain is not contiguous, we try to fill the gap with "extra"
   // elements to artificially make it contiguous, to try to enable
-  // vectorization.
-  // - Filling gaps in loads is always ok if the target supports widening loads.
-  // - For stores, we only fill gaps if there is a potentially legal masked
-  //   store for the target. If later on, we don't end up with a chain that
-  //   could be vectorized into a legal masked store, the chains with extra
-  //   elements will be filtered out in splitChainByAlignment.
-  bool TryFillGaps = isa<LoadInst>(C[0].Inst)
-                         ? TTI.isLegalToWidenLoads(F.getContext())
-                         : shouldAttemptMaskedStore(C);
+  // vectorization. We only fill gaps if there is a potentially legal masked
+  // load/store for the target. If later on, we don't end up with a chain that
+  // could be vectorized into a legal masked load/store, the chains with extra
+  // elements will be filtered out in splitChainByAlignment.
+  bool TryFillGaps = shouldAttemptMaskedLoadStore(C);
 
   unsigned ASPtrBits =
       DL.getIndexSizeInBits(getLoadStoreAddressSpace(C[0].Inst));
@@ -826,11 +827,9 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
 
   // For compile time reasons, we cache whether or not the superset
   // of all candidate chains contains any extra stores from earlier gap
-  // filling.
-  bool CandidateChainsMayContainExtraStores =
-      !IsLoadChain && any_of(C, [this](const ChainElem &E) {
-        return ExtraElements.contains(E.Inst);
-      });
+  // of all candidate chains contains any extra loads/stores from earlier gap
+  bool CandidateChainsMayContainExtraLoadsStores = any_of(
+      C, [this](const ChainElem &E) { return ExtraElements.contains(E.Inst); });
 
   std::vector<Chain> Ret;
   for (unsigned CBegin = 0; CBegin < C.size(); ++CBegin) {
@@ -925,10 +924,14 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         // otherwise we may unnecessary split the chain when the target actually
         // supports non-pow2 VF.
         if (accessIsAllowedAndFast(NewSizeBytes, AS, Alignment, VecElemBits) &&
-            ((IsLoadChain ? TTI.isLegalToWidenLoads(F.getContext())
-                          : TTI.isLegalMaskedStore(
-                                FixedVectorType::get(VecElemTy, NewNumVecElems),
-                                Alignment, AS, /*IsMaskConstant=*/true)))) {
+            ((IsLoadChain &&
+              TTI.isLegalMaskedLoad(
+                  FixedVectorType::get(VecElemTy, NewNumVecElems), Alignment,
+                  AS, true)) ||
+             (!IsLoadChain &&
+              TTI.isLegalMaskedStore(
+                  FixedVectorType::get(VecElemTy, NewNumVecElems), Alignment,
+                  AS, true)))) {
           LLVM_DEBUG(dbgs()
                      << "LSV: extending " << (IsLoadChain ? "load" : "store")
                      << " chain of " << NumVecElems << " "
@@ -972,31 +975,34 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         continue;
       }
 
-      if (CandidateChainsMayContainExtraStores) {
-        // The legality of adding extra stores to ExtendingLoadsStores has
+      if (CandidateChainsMayContainExtraLoadsStores) {
+        // The legality of adding extra loads/stores to ExtendingLoadsStores has
         // already been checked, but if the candidate chain contains extra
-        // stores from an earlier optimization, confirm legality now.
+        // loads/stores from an earlier optimization, confirm legality now.
         // This filter is essential because, when filling gaps in
         // splitChainByContinuity, we queried the API to check that (for a given
-        // element type and address space) there *may* be a legal masked store
-        // we can try to create. Now, we need to check if the actual chain we
-        // ended up with is legal to turn into a masked store.
-        // This is relevant for NVPTX targets, for example, where a masked store
-        // is only legal if we have ended up with a 256-bit vector.
-        bool CandidateChainContainsExtraStores = llvm::any_of(
+        // element type and address space) there *may* be a legal masked
+        // load/store we can aspire to create. Now, we need to check if the
+        // actual chain we ended up with is legal to turn into a masked
+        // load/store. This is relevant for NVPTX, for example, where a masked
+        // store is only legal if we have ended up with a 256-bit vector.
+        bool CandidateChainContainsExtraLoadsStores = llvm::any_of(
             ArrayRef<ChainElem>(C).slice(CBegin, CEnd - CBegin + 1),
             [this](const ChainElem &E) {
               return ExtraElements.contains(E.Inst);
             });
 
-        if (CandidateChainContainsExtraStores &&
-            !TTI.isLegalMaskedStore(
-                FixedVectorType::get(VecElemTy, NumVecElems), Alignment, AS,
-                /*IsMaskConstant=*/true)) {
+        if (CandidateChainContainsExtraLoadsStores &&
+            ((IsLoadChain && !TTI.isLegalMaskedLoad(
+                                 FixedVectorType::get(VecElemTy, NumVecElems),
+                                 Alignment, AS, true)) ||
+             (!IsLoadChain && !TTI.isLegalMaskedStore(
+                                  FixedVectorType::get(VecElemTy, NumVecElems),
+                                  Alignment, AS, true)))) {
           LLVM_DEBUG(dbgs()
                      << "LSV: splitChainByAlignment discarding candidate chain "
-                        "because it contains extra stores that we cannot "
-                        "legally vectorize into a masked store \n");
+                        "because it contains extra loads/stores that we cannot "
+                        "legally vectorize into a masked load/store \n");
           continue;
         }
       }
@@ -1024,6 +1030,9 @@ bool Vectorizer::vectorizeChain(Chain &C) {
       (ExtraElements.contains(C[0].Inst) || ExtraElements.contains(C[1].Inst)))
     return false;
 
+  bool ChainContainsExtraLoadsStores = llvm::any_of(
+      C, [this](const ChainElem &E) { return ExtraElements.contains(E.Inst); });
+
   sortChainInOffsetOrder(C);
 
   LLVM_DEBUG({
@@ -1070,11 +1079,19 @@ bool Vectorizer::vectorizeChain(Chain &C) {
           return A.Inst->comesBefore(B.Inst);
         })->Inst);
 
-    // Chain is in offset order, so C[0] is the instr with the lowest offset,
-    // i.e. the root of the vector.
-    VecInst = Builder.CreateAlignedLoad(VecTy,
-                                        getLoadStorePointerOperand(C[0].Inst),
-                                        Alignment);
+    // If the chain contains extra loads, we need to vectorize into a
+    // masked load.
+    if (ChainContainsExtraLoadsStores) {
+      assert(TTI.isLegalMaskedLoad(VecTy, Alignment, AS, true));
+      Value *Mask = createMaskForExtraElements(C, VecTy, Alignment, AS);
+      VecInst = Builder.CreateMaskedLoad(
+          VecTy, getLoadStorePointerOperand(C[0].Inst), Alignment, Mask);
+    } else {
+      // Chain is in offset order, so C[0] is the instr with the lowest offset,
+      // i.e. the root of the vector.
+      VecInst = Builder.CreateAlignedLoad(
+          VecTy, getLoadStorePointerOperand(C[0].Inst), Alignment);
+    }
 
     unsigned VecIdx = 0;
     for (const ChainElem &E : C) {
@@ -1145,31 +1162,10 @@ bool Vectorizer::vectorizeChain(Chain &C) {
 
     // If the chain originates from extra stores, we need to vectorize into a
     // masked store.
-    bool ChainContainsExtraStores = llvm::any_of(C, [this](const ChainElem &E) {
-      return ExtraElements.contains(E.Inst);
-    });
-    if (ChainContainsExtraStores) {
-      assert(TTI.isLegalMaskedStore(Vec->getType(), Alignment, AS,
-                                    /*IsMaskConstant=*/true));
-      unsigned MaskIdx = 0;
-      // loop through the chain and create a mask for the masked store
-      Value *Mask = PoisonValue::get(FixedVectorType::get(
-          Builder.getInt1Ty(), cast<FixedVectorType>(VecTy)->getNumElements()));
-      for (const ChainElem &E : C) {
-        bool IsExtraStore = ExtraElements.contains(E.Inst);
-        if (FixedVectorType *VT =
-                dyn_cast<FixedVectorType>(getLoadStoreType(E.Inst))) {
-          for (int J = 0, JE = VT->getNumElements(); J < JE; ++J) {
-            Mask = Builder.CreateInsertElement(Mask,
-                                               Builder.getInt1(!IsExtraStore),
-                                               Builder.getInt32(MaskIdx++));
-          }
-        } else {
-          Mask =
-              Builder.CreateInsertElement(Mask, Builder.getInt1(!IsExtraStore),
-                                          Builder.getInt32(MaskIdx++));
-        }
-      }
+    if (ChainContainsExtraLoadsStores) {
+      assert(TTI.isLegalMaskedStore(Vec->getType(), Alignment, AS, true));
+      Value *Mask =
+          createMaskForExtraElements(C, Vec->getType(), Alignment, AS);
       VecInst = Builder.CreateMaskedStore(
           Vec, getLoadStorePointerOperand(C[0].Inst), Alignment, Mask);
     } else {
@@ -1862,8 +1858,9 @@ bool Vectorizer::accessIsAllowedAndFast(unsigned SizeBytes, unsigned AS,
   return true;
 }
 
-bool Vectorizer::shouldAttemptMaskedStore(const ArrayRef<ChainElem> C) const {
-  assert(isa<StoreInst>(C[0].Inst));
+bool Vectorizer::shouldAttemptMaskedLoadStore(
+    const ArrayRef<ChainElem> C) const {
+  bool IsLoadChain = isa<LoadInst>(C[0].Inst);
 
   unsigned AS = getLoadStoreAddressSpace(C[0].Inst);
   Type *ElementType = getLoadStoreType(C[0].Inst)->getScalarType();
@@ -1875,17 +1872,20 @@ bool Vectorizer::shouldAttemptMaskedStore(const ArrayRef<ChainElem> C) const {
       VecRegBits / DL.getTypeSizeInBits(ElementType);
 
   // Attempt to find the smallest power-of-two number of elements that, if
-  // well aligned, could be represented as a legal masked store.
+  // well aligned, could be represented as a legal masked load/store.
   // If one exists for a given element type and address space, it is worth
-  // attempting to fill gaps as we may be able to create a legal masked store.
-  // If we do not end up with a legal masked store, chains with extra elements
-  // will be discarded.
+  // attempting to fill gaps as we may be able to create a legal masked
+  // load/store. If we do not end up with a legal masked load/store, chains with
+  // extra elements will be discarded.
   const unsigned MinMaskedStoreNumElems = 4;
   for (unsigned NumElems = MinMaskedStoreNumElems;
        NumElems <= MaxVectorNumElems; NumElems *= 2) {
     FixedVectorType *VectorType = FixedVectorType::get(ElementType, NumElems);
-    if (TTI.isLegalMaskedStore(VectorType, OptimisticAlign, AS,
-                               /*IsMaskConstant=*/true))
+    bool IsLegalMaskedInstruction =
+        IsLoadChain
+            ? TTI.isLegalMaskedLoad(VectorType, OptimisticAlign, AS, true)
+            : TTI.isLegalMaskedStore(VectorType, OptimisticAlign, AS, true);
+    if (IsLegalMaskedInstruction)
       return true;
   }
   return false;
@@ -1927,6 +1927,29 @@ ChainElem Vectorizer::createExtraElementAfter(const ChainElem &Prev,
   return ChainElem{NewElement, NewOffsetFromLeader};
 }
 
+Value *Vectorizer::createMaskForExtraElements(const ArrayRef<ChainElem> C,
+                                              Type *VecTy, Align Alignment,
+                                              unsigned AS) {
+  unsigned MaskIdx = 0;
+  Value *Mask = PoisonValue::get(FixedVectorType::get(
+      Builder.getInt1Ty(), cast<FixedVectorType>(VecTy)->getNumElements()));
+  for (const ChainElem &E : C) {
+    bool IsExtraElement = ExtraElements.contains(E.Inst);
+    if (FixedVectorType *VT =
+            dyn_cast<FixedVectorType>(getLoadStoreType(E.Inst))) {
+      for (int J = 0, JE = VT->getNumElements(); J < JE; ++J) {
+        Mask =
+            Builder.CreateInsertElement(Mask, Builder.getInt1(!IsExtraElement),
+                                        Builder.getInt32(MaskIdx++));
+      }
+    } else {
+      Mask = Builder.CreateInsertElement(Mask, Builder.getInt1(!IsExtraElement),
+                                         Builder.getInt32(MaskIdx++));
+    }
+  }
+  return Mask;
+}
+
 void Vectorizer::deleteExtraElements() {
   for (auto *ExtraElement : ExtraElements) {
     if (isa<LoadInst>(ExtraElement)) {

>From bb25df1bf239e08d3b93765b7b0ed48992a58810 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 22 Oct 2025 21:22:17 +0000
Subject: [PATCH 09/24] Update calls to TTI to match changes in lowering PR

---
 .../Vectorize/LoadStoreVectorizer.cpp         | 26 +++++++++++--------
 1 file changed, 15 insertions(+), 11 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 155d4119ea1fe..1e8190777fac6 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -927,11 +927,11 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
             ((IsLoadChain &&
               TTI.isLegalMaskedLoad(
                   FixedVectorType::get(VecElemTy, NewNumVecElems), Alignment,
-                  AS, true)) ||
+                  AS, TTI::MaskKind::ConstantMask)) ||
              (!IsLoadChain &&
               TTI.isLegalMaskedStore(
                   FixedVectorType::get(VecElemTy, NewNumVecElems), Alignment,
-                  AS, true)))) {
+                  AS, TTI::MaskKind::ConstantMask)))) {
           LLVM_DEBUG(dbgs()
                      << "LSV: extending " << (IsLoadChain ? "load" : "store")
                      << " chain of " << NumVecElems << " "
@@ -995,10 +995,11 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         if (CandidateChainContainsExtraLoadsStores &&
             ((IsLoadChain && !TTI.isLegalMaskedLoad(
                                  FixedVectorType::get(VecElemTy, NumVecElems),
-                                 Alignment, AS, true)) ||
-             (!IsLoadChain && !TTI.isLegalMaskedStore(
-                                  FixedVectorType::get(VecElemTy, NumVecElems),
-                                  Alignment, AS, true)))) {
+                                 Alignment, AS, TTI::MaskKind::ConstantMask)) ||
+             (!IsLoadChain &&
+              !TTI.isLegalMaskedStore(
+                  FixedVectorType::get(VecElemTy, NumVecElems), Alignment, AS,
+                  TTI::MaskKind::ConstantMask)))) {
           LLVM_DEBUG(dbgs()
                      << "LSV: splitChainByAlignment discarding candidate chain "
                         "because it contains extra loads/stores that we cannot "
@@ -1082,7 +1083,8 @@ bool Vectorizer::vectorizeChain(Chain &C) {
     // If the chain contains extra loads, we need to vectorize into a
     // masked load.
     if (ChainContainsExtraLoadsStores) {
-      assert(TTI.isLegalMaskedLoad(VecTy, Alignment, AS, true));
+      assert(TTI.isLegalMaskedLoad(VecTy, Alignment, AS,
+                                   TTI::MaskKind::ConstantMask));
       Value *Mask = createMaskForExtraElements(C, VecTy, Alignment, AS);
       VecInst = Builder.CreateMaskedLoad(
           VecTy, getLoadStorePointerOperand(C[0].Inst), Alignment, Mask);
@@ -1163,7 +1165,8 @@ bool Vectorizer::vectorizeChain(Chain &C) {
     // If the chain originates from extra stores, we need to vectorize into a
     // masked store.
     if (ChainContainsExtraLoadsStores) {
-      assert(TTI.isLegalMaskedStore(Vec->getType(), Alignment, AS, true));
+      assert(TTI.isLegalMaskedStore(Vec->getType(), Alignment, AS,
+                                    TTI::MaskKind::ConstantMask));
       Value *Mask =
           createMaskForExtraElements(C, Vec->getType(), Alignment, AS);
       VecInst = Builder.CreateMaskedStore(
@@ -1882,9 +1885,10 @@ bool Vectorizer::shouldAttemptMaskedLoadStore(
        NumElems <= MaxVectorNumElems; NumElems *= 2) {
     FixedVectorType *VectorType = FixedVectorType::get(ElementType, NumElems);
     bool IsLegalMaskedInstruction =
-        IsLoadChain
-            ? TTI.isLegalMaskedLoad(VectorType, OptimisticAlign, AS, true)
-            : TTI.isLegalMaskedStore(VectorType, OptimisticAlign, AS, true);
+        IsLoadChain ? TTI.isLegalMaskedLoad(VectorType, OptimisticAlign, AS,
+                                            TTI::MaskKind::ConstantMask)
+                    : TTI.isLegalMaskedStore(VectorType, OptimisticAlign, AS,
+                                             TTI::MaskKind::ConstantMask);
     if (IsLegalMaskedInstruction)
       return true;
   }

>From 030c0bba3f7dd2184a4965255da6c8ff1b3fb306 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 22 Oct 2025 21:23:10 +0000
Subject: [PATCH 10/24] Update tests to match the new masked load/store syntax,
 moving alignment to an attribute

---
 .../LoadStoreVectorizer/NVPTX/extend-chain.ll |  4 +--
 .../NVPTX/gap-fill-cleanup.ll                 |  2 +-
 .../NVPTX/gap-fill-invariant.ll               |  2 +-
 .../NVPTX/gap-fill-vectors.ll                 | 10 +++----
 .../LoadStoreVectorizer/NVPTX/gap-fill.ll     | 18 ++++++------
 .../LoadStoreVectorizer/NVPTX/masked-store.ll | 28 +++++++++----------
 6 files changed, 32 insertions(+), 32 deletions(-)

diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
index 2207d5b471d20..892249e87e4bf 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
@@ -10,7 +10,7 @@ define void @load3to4(ptr %p) #0 {
 ; CHECK-LABEL: define void @load3to4(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i32, ptr [[P]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[P_0]], i32 16, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[P_0]], <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x i32> poison)
 ; CHECK-NEXT:    [[V01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[V12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[V23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
@@ -32,7 +32,7 @@ define void @load5to8(ptr %p) #0 {
 ; CHECK-LABEL: define void @load5to8(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i16, ptr [[P]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[P_0]], i32 16, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i16> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 16 [[P_0]], <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i16> poison)
 ; CHECK-NEXT:    [[V05:%.*]] = extractelement <8 x i16> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[V16:%.*]] = extractelement <8 x i16> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[V27:%.*]] = extractelement <8 x i16> [[TMP1]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
index e92ffe8eadbc3..7b659b0feeb03 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-cleanup.ll
@@ -8,7 +8,7 @@
 define void @fillTwoGapsCanVectorize(ptr %in) {
 ; CHECK-LABEL: define void @fillTwoGapsCanVectorize(
 ; CHECK-SAME: ptr [[IN:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[IN]], i32 16, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[IN]], <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LOAD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
index 7a28faf8b4810..145512863f4d7 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-invariant.ll
@@ -62,7 +62,7 @@ define i32 @noGapsMissingInvariant(ptr %in) {
 define i32 @twoGaps(ptr %in) {
 ; CHECK-LABEL: define i32 @twoGaps(
 ; CHECK-SAME: ptr [[IN:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[IN]], i32 16, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison), !invariant.load [[META0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[IN]], <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison), !invariant.load [[META0]]
 ; CHECK-NEXT:    [[LOAD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
index 303ec7e564d49..9162ea00a199c 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
@@ -9,7 +9,7 @@ define void @i1x8_gap_gap_i1x8(ptr %ptr) {
 ; CHECK-LABEL: define void @i1x8_gap_gap_i1x8(
 ; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i1> @llvm.masked.load.v32i1.p0(ptr [[PTR0]], i32 4, <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i1> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i1> @llvm.masked.load.v32i1.p0(ptr align 4 [[PTR0]], <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i1> poison)
 ; CHECK-NEXT:    [[L03:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
@@ -49,7 +49,7 @@ define void @i8x2_gap_gap_i8x2(ptr %ptr) {
 ; CHECK-LABEL: define void @i8x2_gap_gap_i8x2(
 ; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr [[PTR0]], i32 8, <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <8 x i8> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr align 8 [[PTR0]], <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <8 x i8> poison)
 ; CHECK-NEXT:    [[L03:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 4, i32 5>
@@ -89,7 +89,7 @@ define void @i16x2_gap_i16x2_i16x2(ptr %ptr) {
 ; CHECK-LABEL: define void @i16x2_gap_i16x2_i16x2(
 ; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[PTR0]], i32 16, <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i16> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 16 [[PTR0]], <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i16> poison)
 ; CHECK-NEXT:    [[L01:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[GAPFILL2:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    [[L23:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 4, i32 5>
@@ -111,7 +111,7 @@ define void @i16x2_gap_gap_i16x2(ptr %ptr) {
 ; CHECK-LABEL: define void @i16x2_gap_gap_i16x2(
 ; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[PTR0]], i32 16, <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <8 x i16> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 16 [[PTR0]], <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <8 x i16> poison)
 ; CHECK-NEXT:    [[L03:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> poison, <2 x i32> <i32 4, i32 5>
@@ -130,7 +130,7 @@ define void @i16x2_gap_gap_i16x2(ptr %ptr) {
 define void @i32x2_i32x2_gap_i32x2(ptr addrspace(1) %in) {
 ; CHECK-LABEL: define void @i32x2_i32x2_gap_i32x2(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) [[IN]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true>, <8 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 32 [[IN]], <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true>, <8 x i32> poison)
 ; CHECK-NEXT:    [[VEC01:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 0, i32 1>
 ; CHECK-NEXT:    [[VEC12:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 2, i32 3>
 ; CHECK-NEXT:    [[GAPFILL3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 4, i32 5>
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
index aae1a5f7266c4..83152ece5c4d1 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill.ll
@@ -5,7 +5,7 @@
 define void @test(ptr %ptr) {
 ; CHECK-LABEL: define void @test(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[PTR]], <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LD12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL3:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
@@ -24,7 +24,7 @@ define void @test(ptr %ptr) {
 define void @test2(ptr %ptr) {
 ; CHECK-LABEL: define void @test2(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[PTR]], <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[LD23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
@@ -83,7 +83,7 @@ define void @test4(ptr %ptr) {
 define void @test5(ptr %ptr) {
 ; CHECK-LABEL: define void @test5(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[PTR]], <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL25:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
@@ -100,13 +100,13 @@ define void @test5(ptr %ptr) {
 define void @test6(ptr %ptr) {
 ; CHECK-LABEL: define void @test6(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[PTR]], <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LD14:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL5:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
 ; CHECK-NEXT:    [[LD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
 ; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16
-; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[GEP4]], i32 16, <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[GEP4]], <4 x i1> <i1 true, i1 false, i1 true, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD47:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
 ; CHECK-NEXT:    [[GAPFILL28:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
 ; CHECK-NEXT:    [[LD69:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
@@ -132,13 +132,13 @@ define void @test6(ptr %ptr) {
 define void @test7(ptr %ptr) {
 ; CHECK-LABEL: define void @test7(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[PTR]], <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD05:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LD16:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL7:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
 ; CHECK-NEXT:    [[LD38:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
 ; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 16
-; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[GEP4]], i32 16, <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[GEP4]], <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD49:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
 ; CHECK-NEXT:    [[GAPFILL210:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
 ; CHECK-NEXT:    [[GAPFILL411:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
@@ -164,14 +164,14 @@ define void @test7(ptr %ptr) {
 define void @test8(ptr %ptr) {
 ; CHECK-LABEL: define void @test8(
 ; CHECK-SAME: ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR]], i32 16, <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[PTR]], <4 x i1> <i1 true, i1 true, i1 false, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[LD03:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LD14:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL5:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
 ; CHECK-NEXT:    [[LD36:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
 ; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i32 12
 ; CHECK-NEXT:    [[GAPFILLGEP1:%.*]] = getelementptr i8, ptr [[GEP3]], i64 4
-; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[GAPFILLGEP1]], i32 16, <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> poison)
+; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr align 16 [[GAPFILLGEP1]], <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x i32> poison)
 ; CHECK-NEXT:    [[GAPFILL27:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
 ; CHECK-NEXT:    [[LD58:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
 ; CHECK-NEXT:    [[LD69:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll
index 75d9c4b6e3125..a9e9cf674c72e 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/masked-store.ll
@@ -7,7 +7,7 @@
 define void @singleGap(ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @singleGap(
 ; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 2, i64 poison, i64 4>, ptr addrspace(1) [[OUT]], i32 32, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 2, i64 poison, i64 4>, ptr addrspace(1) align 32 [[OUT]], <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
   store i64 1, ptr addrspace(1) %out, align 32
@@ -22,7 +22,7 @@ define void @singleGap(ptr addrspace(1) %out) {
 define void @singleGapDouble(ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @singleGapDouble(
 ; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    call void @llvm.masked.store.v4f64.p1(<4 x double> <double 1.000000e+00, double 2.000000e+00, double poison, double 4.000000e+00>, ptr addrspace(1) [[OUT]], i32 32, <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4f64.p1(<4 x double> <double 1.000000e+00, double 2.000000e+00, double poison, double 4.000000e+00>, ptr addrspace(1) align 32 [[OUT]], <4 x i1> <i1 true, i1 true, i1 false, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
   store double 1.0, ptr addrspace(1) %out, align 32
@@ -37,7 +37,7 @@ define void @singleGapDouble(ptr addrspace(1) %out) {
 define void @multipleGaps(ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @multipleGaps(
 ; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 poison, i64 poison, i64 4>, ptr addrspace(1) [[OUT]], i32 32, <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 poison, i64 poison, i64 4>, ptr addrspace(1) align 32 [[OUT]], <4 x i1> <i1 true, i1 false, i1 false, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
   store i64 1, ptr addrspace(1) %out, align 32
@@ -50,7 +50,7 @@ define void @multipleGaps(ptr addrspace(1) %out) {
 define void @multipleGaps8xi32(ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @multipleGaps8xi32(
 ; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 poison, i32 poison, i32 2, i32 4, i32 poison, i32 poison, i32 8>, ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 poison, i32 poison, i32 2, i32 4, i32 poison, i32 poison, i32 8>, ptr addrspace(1) align 32 [[OUT]], <8 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
   store i32 1, ptr addrspace(1) %out, align 32
@@ -71,7 +71,7 @@ define void @singleGapLongerChain(ptr addrspace(1) %out) {
 ; CHECK-NEXT:    [[GETELEM3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[OUT]], i32 24
 ; CHECK-NEXT:    store <4 x i64> <i64 1, i64 2, i64 3, i64 4>, ptr addrspace(1) [[OUT]], align 32
 ; CHECK-NEXT:    [[GAPFILLGEP:%.*]] = getelementptr i8, ptr addrspace(1) [[GETELEM3]], i64 8
-; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 poison, i64 6, i64 7, i64 8>, ptr addrspace(1) [[GAPFILLGEP]], i32 32, <4 x i1> <i1 false, i1 true, i1 true, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 poison, i64 6, i64 7, i64 8>, ptr addrspace(1) align 32 [[GAPFILLGEP]], <4 x i1> <i1 false, i1 true, i1 true, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
   store i64 1, ptr addrspace(1) %out, align 32
@@ -94,7 +94,7 @@ define void @singleGapLongerChain(ptr addrspace(1) %out) {
 define void @vectorElements(ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @vectorElements(
 ; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 poison, i32 poison, i32 7, i32 8>, ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true>)
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 poison, i32 poison, i32 7, i32 8>, ptr addrspace(1) align 32 [[OUT]], <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true>)
 ; CHECK-NEXT:    ret void
 ;
   store <2 x i32> <i32 1, i32 2>, ptr addrspace(1) %out, align 32
@@ -126,7 +126,7 @@ define void @vectorElements64(ptr addrspace(1) %in) {
 define void @extendStores(ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @extendStores(
 ; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 2, i64 3, i64 poison>, ptr addrspace(1) [[OUT]], i32 32, <4 x i1> <i1 true, i1 true, i1 true, i1 false>)
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p1(<4 x i64> <i64 1, i64 2, i64 3, i64 poison>, ptr addrspace(1) align 32 [[OUT]], <4 x i1> <i1 true, i1 true, i1 true, i1 false>)
 ; CHECK-NEXT:    ret void
 ;
   store i64 1, ptr addrspace(1) %out, align 32
@@ -141,7 +141,7 @@ define void @extendStores(ptr addrspace(1) %out) {
 define void @extendStores8xi32(ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @extendStores8xi32(
 ; CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 poison, i32 poison, i32 poison>, ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>)
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 poison, i32 poison, i32 poison>, ptr addrspace(1) align 32 [[OUT]], <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>)
 ; CHECK-NEXT:    ret void
 ;
   store i32 1, ptr addrspace(1) %out, align 32
@@ -160,7 +160,7 @@ define void @extendStores8xi32(ptr addrspace(1) %out) {
 define void @extendStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @extendStoresFromLoads8xi32(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) [[IN]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 32 [[IN]], <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i32> poison)
 ; CHECK-NEXT:    [[LOAD05:%.*]] = extractelement <8 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LOAD16:%.*]] = extractelement <8 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[LOAD27:%.*]] = extractelement <8 x i32> [[TMP1]], i32 2
@@ -177,7 +177,7 @@ define void @extendStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %
 ; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 poison, i32 5
 ; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <8 x i32> [[TMP7]], i32 poison, i32 6
 ; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <8 x i32> [[TMP8]], i32 poison, i32 7
-; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP9]], ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>)
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP9]], ptr addrspace(1) align 32 [[OUT]], <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false>)
 ; CHECK-NEXT:    ret void
 ;
   %load0 = load i32, ptr addrspace(1) %in, align 32
@@ -206,7 +206,7 @@ define void @extendStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %
 define void @extendAndGapFillStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @extendAndGapFillStoresFromLoads8xi32(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) [[IN]], i32 32, <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i32> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 32 [[IN]], <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false>, <8 x i32> poison)
 ; CHECK-NEXT:    [[LOAD05:%.*]] = extractelement <8 x i32> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[LOAD16:%.*]] = extractelement <8 x i32> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[LOAD27:%.*]] = extractelement <8 x i32> [[TMP1]], i32 2
@@ -223,7 +223,7 @@ define void @extendAndGapFillStoresFromLoads8xi32(ptr addrspace(1) %in, ptr addr
 ; CHECK-NEXT:    [[TMP7:%.*]] = insertelement <8 x i32> [[TMP6]], i32 poison, i32 5
 ; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <8 x i32> [[TMP7]], i32 poison, i32 6
 ; CHECK-NEXT:    [[TMP9:%.*]] = insertelement <8 x i32> [[TMP8]], i32 poison, i32 7
-; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP9]], ptr addrspace(1) [[OUT]], i32 32, <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false>)
+; CHECK-NEXT:    call void @llvm.masked.store.v8i32.p1(<8 x i32> [[TMP9]], ptr addrspace(1) align 32 [[OUT]], <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false>)
 ; CHECK-NEXT:    ret void
 ;
   %load0 = load i32, ptr addrspace(1) %in, align 32
@@ -332,7 +332,7 @@ define void @gapInWrongLocation(ptr addrspace(1) %out) {
 define void @cantMaski8(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @cantMaski8(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i8> @llvm.masked.load.v32i8.p1(ptr addrspace(1) [[IN]], i32 32, <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>, <32 x i8> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i8> @llvm.masked.load.v32i8.p1(ptr addrspace(1) align 32 [[IN]], <32 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>, <32 x i8> poison)
 ; CHECK-NEXT:    [[LOAD031:%.*]] = extractelement <32 x i8> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL32:%.*]] = extractelement <32 x i8> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL233:%.*]] = extractelement <32 x i8> [[TMP1]], i32 2
@@ -471,7 +471,7 @@ define void @cantMaski8(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 define void @cantMaski16(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; CHECK-LABEL: define void @cantMaski16(
 ; CHECK-SAME: ptr addrspace(1) [[IN:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i16> @llvm.masked.load.v16i16.p1(ptr addrspace(1) [[IN]], i32 32, <16 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>, <16 x i16> poison)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i16> @llvm.masked.load.v16i16.p1(ptr addrspace(1) align 32 [[IN]], <16 x i1> <i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 true>, <16 x i16> poison)
 ; CHECK-NEXT:    [[LOAD015:%.*]] = extractelement <16 x i16> [[TMP1]], i32 0
 ; CHECK-NEXT:    [[GAPFILL16:%.*]] = extractelement <16 x i16> [[TMP1]], i32 1
 ; CHECK-NEXT:    [[GAPFILL217:%.*]] = extractelement <16 x i16> [[TMP1]], i32 2

>From 0a0aa2e57a54bd2e69b0a53b4d9b823f9a8b5ed8 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Fri, 24 Oct 2025 15:20:43 +0000
Subject: [PATCH 11/24] Simplify pre-gap-filling TTI legality check

---
 .../Vectorize/LoadStoreVectorizer.cpp         | 66 ++++++-------------
 1 file changed, 20 insertions(+), 46 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 1e8190777fac6..bfa16b60ae2c5 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -354,11 +354,6 @@ class Vectorizer {
   bool accessIsAllowedAndFast(unsigned SizeBytes, unsigned AS, Align Alignment,
                               unsigned VecElemBits) const;
 
-  /// Before attempting to fill gaps, check if the chain is a candidate for
-  /// a masked load/store, to save compile time if it is not possible for the
-  /// address space and element type.
-  bool shouldAttemptMaskedLoadStore(const ArrayRef<ChainElem> C) const;
-
   /// Create a new GEP and a new Load/Store instruction such that the GEP
   /// is pointing at PrevElem + Offset. In the case of stores, store poison.
   /// Extra elements will either be combined into a masked load/store or
@@ -665,14 +660,27 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
 
   // If the chain is not contiguous, we try to fill the gap with "extra"
   // elements to artificially make it contiguous, to try to enable
-  // vectorization. We only fill gaps if there is a potentially legal masked
-  // load/store for the target. If later on, we don't end up with a chain that
-  // could be vectorized into a legal masked load/store, the chains with extra
-  // elements will be filtered out in splitChainByAlignment.
-  bool TryFillGaps = shouldAttemptMaskedLoadStore(C);
+  // vectorization. We only fill gaps if there is potential to end up with a
+  // legal masked load/store given the target, address space, and element type.
+  // At this point, when querying the TTI, optimistically assume max alignment
+  // and max vector size, as splitChainByAlignment will ensure the final vector
+  // shape passes the legalization check.
+  unsigned AS = getLoadStoreAddressSpace(C[0].Inst);
+  Type *ElementType = getLoadStoreType(C[0].Inst)->getScalarType();
+  unsigned MaxVecRegBits = TTI.getLoadStoreVecRegBitWidth(AS);
+  Align OptimisticAlign = Align(MaxVecRegBits / 8);
+  unsigned int MaxVectorNumElems =
+      MaxVecRegBits / DL.getTypeSizeInBits(ElementType);
+  FixedVectorType *OptimisticVectorType =
+      FixedVectorType::get(ElementType, MaxVectorNumElems);
+  bool TryFillGaps =
+      isa<LoadInst>(C[0].Inst)
+          ? TTI.isLegalMaskedLoad(OptimisticVectorType, OptimisticAlign, AS,
+                                  TTI::MaskKind::ConstantMask)
+          : TTI.isLegalMaskedStore(OptimisticVectorType, OptimisticAlign, AS,
+                                   TTI::MaskKind::ConstantMask);
 
-  unsigned ASPtrBits =
-      DL.getIndexSizeInBits(getLoadStoreAddressSpace(C[0].Inst));
+  unsigned ASPtrBits = DL.getIndexSizeInBits(AS);
 
   // Compute the alignment of the leader of the chain (which every stored offset
   // is based on) using the current first element of the chain. This is
@@ -1861,40 +1869,6 @@ bool Vectorizer::accessIsAllowedAndFast(unsigned SizeBytes, unsigned AS,
   return true;
 }
 
-bool Vectorizer::shouldAttemptMaskedLoadStore(
-    const ArrayRef<ChainElem> C) const {
-  bool IsLoadChain = isa<LoadInst>(C[0].Inst);
-
-  unsigned AS = getLoadStoreAddressSpace(C[0].Inst);
-  Type *ElementType = getLoadStoreType(C[0].Inst)->getScalarType();
-  unsigned VecRegBits = TTI.getLoadStoreVecRegBitWidth(AS);
-  // Assume max alignment, splitChainByAlignment will legalize it later if the
-  // necessary alignment is not reached.
-  Align OptimisticAlign = Align(VecRegBits / 8);
-  unsigned int MaxVectorNumElems =
-      VecRegBits / DL.getTypeSizeInBits(ElementType);
-
-  // Attempt to find the smallest power-of-two number of elements that, if
-  // well aligned, could be represented as a legal masked load/store.
-  // If one exists for a given element type and address space, it is worth
-  // attempting to fill gaps as we may be able to create a legal masked
-  // load/store. If we do not end up with a legal masked load/store, chains with
-  // extra elements will be discarded.
-  const unsigned MinMaskedStoreNumElems = 4;
-  for (unsigned NumElems = MinMaskedStoreNumElems;
-       NumElems <= MaxVectorNumElems; NumElems *= 2) {
-    FixedVectorType *VectorType = FixedVectorType::get(ElementType, NumElems);
-    bool IsLegalMaskedInstruction =
-        IsLoadChain ? TTI.isLegalMaskedLoad(VectorType, OptimisticAlign, AS,
-                                            TTI::MaskKind::ConstantMask)
-                    : TTI.isLegalMaskedStore(VectorType, OptimisticAlign, AS,
-                                             TTI::MaskKind::ConstantMask);
-    if (IsLegalMaskedInstruction)
-      return true;
-  }
-  return false;
-}
-
 ChainElem Vectorizer::createExtraElementAfter(const ChainElem &Prev,
                                               APInt Offset, StringRef Prefix,
                                               Align Alignment) {

>From 20bf7ad358f8defbc663154a27554194f346c824 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Fri, 24 Oct 2025 16:05:26 +0000
Subject: [PATCH 12/24] Clean up comments and simplify some logic

---
 .../Vectorize/LoadStoreVectorizer.cpp         | 65 +++++++++----------
 1 file changed, 32 insertions(+), 33 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index bfa16b60ae2c5..5145f2368ac47 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -834,8 +834,8 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
   unsigned VecRegBytes = TTI.getLoadStoreVecRegBitWidth(AS) / 8;
 
   // For compile time reasons, we cache whether or not the superset
-  // of all candidate chains contains any extra stores from earlier gap
   // of all candidate chains contains any extra loads/stores from earlier gap
+  // filling.
   bool CandidateChainsMayContainExtraLoadsStores = any_of(
       C, [this](const ChainElem &E) { return ExtraElements.contains(E.Inst); });
 
@@ -913,6 +913,7 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         }
       }
 
+      // Attempt to extend non-power-of-2 chains to the next power of 2.
       Chain ExtendingLoadsStores;
       if (NumVecElems < TargetVF && NumVecElems % 2 != 0 && VecElemBits >= 8) {
         // TargetVF may be a lot higher than NumVecElems,
@@ -928,18 +929,17 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
                           << NumVecElems << " "
                           << (IsLoadChain ? "loads" : "stores") << " to "
                           << NewNumVecElems << " elements\n");
-        // Do not artificially increase the chain if it becomes misaligned,
-        // otherwise we may unnecessary split the chain when the target actually
-        // supports non-pow2 VF.
+        // Do not artificially increase the chain if it becomes misaligned or if
+        // the associated masked load/store is not legal, otherwise we may
+        // unnecessarily split the chain when the target actually supports
+        // non-pow2 VF.
         if (accessIsAllowedAndFast(NewSizeBytes, AS, Alignment, VecElemBits) &&
-            ((IsLoadChain &&
-              TTI.isLegalMaskedLoad(
-                  FixedVectorType::get(VecElemTy, NewNumVecElems), Alignment,
-                  AS, TTI::MaskKind::ConstantMask)) ||
-             (!IsLoadChain &&
-              TTI.isLegalMaskedStore(
-                  FixedVectorType::get(VecElemTy, NewNumVecElems), Alignment,
-                  AS, TTI::MaskKind::ConstantMask)))) {
+            (IsLoadChain ? TTI.isLegalMaskedLoad(
+                               FixedVectorType::get(VecElemTy, NewNumVecElems),
+                               Alignment, AS, TTI::MaskKind::ConstantMask)
+                         : TTI.isLegalMaskedStore(
+                               FixedVectorType::get(VecElemTy, NewNumVecElems),
+                               Alignment, AS, TTI::MaskKind::ConstantMask))) {
           LLVM_DEBUG(dbgs()
                      << "LSV: extending " << (IsLoadChain ? "load" : "store")
                      << " chain of " << NumVecElems << " "
@@ -950,13 +950,14 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
                      << " with total byte size of " << NewSizeBytes
                      << ", TargetVF=" << TargetVF << " \n");
 
+          // Create (NewNumVecElems - NumVecElems) extra elements.
           unsigned ASPtrBits = DL.getIndexSizeInBits(AS);
           ChainElem Prev = C[CEnd];
-          for (unsigned i = 0; i < (NewNumVecElems - NumVecElems); i++) {
+          for (unsigned I = (NewNumVecElems - NumVecElems); I != 0; --I) {
             ChainElem NewElem = createExtraElementAfter(
                 Prev, APInt(ASPtrBits, VecElemBytes), "Extend");
             ExtendingLoadsStores.push_back(NewElem);
-            Prev = ExtendingLoadsStores.back();
+            Prev = NewElem;
           }
 
           // Update the size and number of elements for upcoming checks.
@@ -984,30 +985,28 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
       }
 
       if (CandidateChainsMayContainExtraLoadsStores) {
-        // The legality of adding extra loads/stores to ExtendingLoadsStores has
-        // already been checked, but if the candidate chain contains extra
-        // loads/stores from an earlier optimization, confirm legality now.
-        // This filter is essential because, when filling gaps in
-        // splitChainByContinuity, we queried the API to check that (for a given
-        // element type and address space) there *may* be a legal masked
-        // load/store we can aspire to create. Now, we need to check if the
-        // actual chain we ended up with is legal to turn into a masked
-        // load/store. This is relevant for NVPTX, for example, where a masked
-        // store is only legal if we have ended up with a 256-bit vector.
-        bool CandidateChainContainsExtraLoadsStores = llvm::any_of(
+        // If the candidate chain contains extra loads/stores from an earlier
+        // optimization, confirm legality now. This filter is essential because
+        // when filling gaps in splitChainByContiguity, we queried the API to
+        // check that (for a given element type and address space) there *may*
+        // have been a legal masked load/store we could possibly create. Now, we
+        // need to check if the actual chain we ended up with is legal to turn
+        // into a masked load/store. This is relevant for NVPTX, for example,
+        // where a masked store is only legal if we have ended up with a 256-bit
+        // vector.
+        bool CurrCandContainsExtraLoadsStores = llvm::any_of(
             ArrayRef<ChainElem>(C).slice(CBegin, CEnd - CBegin + 1),
             [this](const ChainElem &E) {
               return ExtraElements.contains(E.Inst);
             });
 
-        if (CandidateChainContainsExtraLoadsStores &&
-            ((IsLoadChain && !TTI.isLegalMaskedLoad(
-                                 FixedVectorType::get(VecElemTy, NumVecElems),
-                                 Alignment, AS, TTI::MaskKind::ConstantMask)) ||
-             (!IsLoadChain &&
-              !TTI.isLegalMaskedStore(
-                  FixedVectorType::get(VecElemTy, NumVecElems), Alignment, AS,
-                  TTI::MaskKind::ConstantMask)))) {
+        if (CurrCandContainsExtraLoadsStores &&
+            (IsLoadChain ? !TTI.isLegalMaskedLoad(
+                               FixedVectorType::get(VecElemTy, NumVecElems),
+                               Alignment, AS, TTI::MaskKind::ConstantMask)
+                         : !TTI.isLegalMaskedStore(
+                               FixedVectorType::get(VecElemTy, NumVecElems),
+                               Alignment, AS, TTI::MaskKind::ConstantMask))) {
           LLVM_DEBUG(dbgs()
                      << "LSV: splitChainByAlignment discarding candidate chain "
                         "because it contains extra loads/stores that we cannot "

>From 8d0d2e91f1a4f56e86ac07e7107b85c131568d6f Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Fri, 24 Oct 2025 17:04:25 +0000
Subject: [PATCH 13/24] More comment improvement

---
 .../Transforms/Vectorize/LoadStoreVectorizer.cpp    | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 5145f2368ac47..44c676a4a0532 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -680,16 +680,17 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
           : TTI.isLegalMaskedStore(OptimisticVectorType, OptimisticAlign, AS,
                                    TTI::MaskKind::ConstantMask);
 
-  unsigned ASPtrBits = DL.getIndexSizeInBits(AS);
-
-  // Compute the alignment of the leader of the chain (which every stored offset
-  // is based on) using the current first element of the chain. This is
-  // conservative, we may be able to derive better alignment by iterating over
-  // the chain and finding the leader.
+  // Derive the alignment of the leader of the chain (which every
+  // OffsetFromLeader is based on) using the current first element of the chain.
+  // We could derive a better alignment by iterating over the entire chain but
+  // this should be sufficient. We use this value to derive the alignment of any
+  // extra elements we create while gap filling.
   Align LeaderOfChainAlign =
       commonAlignment(getLoadStoreAlignment(C[0].Inst),
                       C[0].OffsetFromLeader.abs().getLimitedValue());
 
+  unsigned ASPtrBits = DL.getIndexSizeInBits(AS);
+
   std::vector<Chain> Ret;
   Ret.push_back({C.front()});
 

>From cae6020d4bcda291cb204f947c87b343d92bcbde Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Mon, 27 Oct 2025 15:30:39 +0000
Subject: [PATCH 14/24] Add comment to clarify API usage

---
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 44c676a4a0532..6ec0fc0e3f7c1 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -671,6 +671,12 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
   Align OptimisticAlign = Align(MaxVecRegBits / 8);
   unsigned int MaxVectorNumElems =
       MaxVecRegBits / DL.getTypeSizeInBits(ElementType);
+  // Note: This check decides whether to try to fill gaps based on the masked
+  // legality of the target's maximum vector size (getLoadStoreVecRegBitWidth).
+  // If a target *does not* support a masked load/store with this max vector
+  // size, but *does* support a masked load/store with a *smaller* vector size,
+  // that optimization will be missed. This does not occur in any of the targets
+  // that currently support this API.
   FixedVectorType *OptimisticVectorType =
       FixedVectorType::get(ElementType, MaxVectorNumElems);
   bool TryFillGaps =

>From 4113e637afa7873f36968dba318a99f712a107ca Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 5 Nov 2025 16:42:40 +0000
Subject: [PATCH 15/24] Address review feedback

---
 .../Vectorize/LoadStoreVectorizer.cpp         | 56 +++++++++++--------
 .../LoadStoreVectorizer/NVPTX/extend-chain.ll | 42 ++++++++++++--
 2 files changed, 70 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 6ec0fc0e3f7c1..3ac8ed4a5f50f 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -283,13 +283,15 @@ class Vectorizer {
   bool runOnChain(Chain &C);
 
   /// Splits the chain into subchains of instructions which read/write a
-  /// contiguous block of memory.  Discards any length-1 subchains (because
-  /// there's nothing to vectorize in there).
+  /// contiguous block of memory. Discards any length-1 subchains (because
+  /// there's nothing to vectorize in there). Also attempts to fill gaps with
+  /// "extra" elements to artificially make chains contiguous in some cases.
   std::vector<Chain> splitChainByContiguity(Chain &C);
 
   /// Splits the chain into subchains where it's safe to hoist loads up to the
   /// beginning of the sub-chain and it's safe to sink loads up to the end of
-  /// the sub-chain.  Discards any length-1 subchains.
+  /// the sub-chain. Discards any length-1 subchains. Also attempts to extend
+  /// non-power-of-two chains by adding "extra" elements in some cases.
   std::vector<Chain> splitChainByMayAliasInstrs(Chain &C);
 
   /// Splits the chain into subchains that make legal, aligned accesses.
@@ -730,14 +732,15 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
     // which could cancel out the benefits of reducing number of load/stores.
     if (TryFillGaps &&
         SzBits == DL.getTypeSizeInBits(getLoadStoreType(It->Inst))) {
-      APInt OffsetOfGapStart = Prev.OffsetFromLeader + PrevSzBytes;
-      APInt GapSzBytes = It->OffsetFromLeader - OffsetOfGapStart;
+      APInt OffsetFromLeaderOfGapStart = Prev.OffsetFromLeader + PrevSzBytes;
+      APInt GapSzBytes = It->OffsetFromLeader - OffsetFromLeaderOfGapStart;
       if (GapSzBytes == PrevSzBytes) {
         // There is a single gap between Prev and Curr, create one extra element
         ChainElem NewElem = createExtraElementAfter(
             Prev, PrevSzBytes, "GapFill",
-            commonAlignment(LeaderOfChainAlign,
-                            OffsetOfGapStart.abs().getLimitedValue()));
+            commonAlignment(
+                LeaderOfChainAlign,
+                OffsetFromLeaderOfGapStart.abs().getLimitedValue()));
         CurChain.push_back(NewElem);
         CurChain.push_back(*It);
         continue;
@@ -748,13 +751,15 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
       if ((GapSzBytes == 2 * PrevSzBytes) && (CurChain.size() % 4 == 1)) {
         ChainElem NewElem1 = createExtraElementAfter(
             Prev, PrevSzBytes, "GapFill",
-            commonAlignment(LeaderOfChainAlign,
-                            OffsetOfGapStart.abs().getLimitedValue()));
-        ChainElem NewElem2 = createExtraElementAfter(
-            NewElem1, PrevSzBytes, "GapFill",
             commonAlignment(
                 LeaderOfChainAlign,
-                (OffsetOfGapStart + PrevSzBytes).abs().getLimitedValue()));
+                OffsetFromLeaderOfGapStart.abs().getLimitedValue()));
+        ChainElem NewElem2 = createExtraElementAfter(
+            NewElem1, PrevSzBytes, "GapFill",
+            commonAlignment(LeaderOfChainAlign,
+                            (OffsetFromLeaderOfGapStart + PrevSzBytes)
+                                .abs()
+                                .getLimitedValue()));
         CurChain.push_back(NewElem1);
         CurChain.push_back(NewElem2);
         CurChain.push_back(*It);
@@ -920,9 +925,14 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         }
       }
 
-      // Attempt to extend non-power-of-2 chains to the next power of 2.
+      // The vectorizer does not support non-power-of-2 element count vectors.
+      // Extend the chain to the next power-of-2 if the current chain:
+      //  1. Does not have a power-of-2 element count
+      //  2. Would be legal to vectorize if the element count was extended to
+      //     the next power-of-2
       Chain ExtendingLoadsStores;
-      if (NumVecElems < TargetVF && NumVecElems % 2 != 0 && VecElemBits >= 8) {
+      if (NumVecElems < TargetVF && !isPowerOf2_32(NumVecElems) &&
+          VecElemBits >= 8 && isPowerOf2_32(TargetVF)) {
         // TargetVF may be a lot higher than NumVecElems,
         // so only extend to the next power of 2.
         assert(VecElemBits % 8 == 0);
@@ -936,10 +946,8 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
                           << NumVecElems << " "
                           << (IsLoadChain ? "loads" : "stores") << " to "
                           << NewNumVecElems << " elements\n");
-        // Do not artificially increase the chain if it becomes misaligned or if
-        // the associated masked load/store is not legal, otherwise we may
-        // unnecessarily split the chain when the target actually supports
-        // non-pow2 VF.
+        // Only artificially increase the chain if it would be AllowedAndFast
+        // and if the resulting masked load/store will be legal for the target.
         if (accessIsAllowedAndFast(NewSizeBytes, AS, Alignment, VecElemBits) &&
             (IsLoadChain ? TTI.isLegalMaskedLoad(
                                FixedVectorType::get(VecElemTy, NewNumVecElems),
@@ -1039,15 +1047,14 @@ bool Vectorizer::vectorizeChain(Chain &C) {
   if (C.size() < 2)
     return false;
 
+  bool ChainContainsExtraLoadsStores = llvm::any_of(
+      C, [this](const ChainElem &E) { return ExtraElements.contains(E.Inst); });
+
   // If we are left with a two-element chain, and one of the elements is an
   // extra element, we don't want to vectorize
-  if (C.size() == 2 &&
-      (ExtraElements.contains(C[0].Inst) || ExtraElements.contains(C[1].Inst)))
+  if (C.size() == 2 && ChainContainsExtraLoadsStores)
     return false;
 
-  bool ChainContainsExtraLoadsStores = llvm::any_of(
-      C, [this](const ChainElem &E) { return ExtraElements.contains(E.Inst); });
-
   sortChainInOffsetOrder(C);
 
   LLVM_DEBUG({
@@ -1847,8 +1854,11 @@ std::optional<APInt> Vectorizer::getConstantOffset(Value *PtrA, Value *PtrB,
 bool Vectorizer::accessIsAllowedAndFast(unsigned SizeBytes, unsigned AS,
                                         Align Alignment,
                                         unsigned VecElemBits) const {
+  // Aligned vector accesses are ALWAYS faster than element-wise accesses.
   if (Alignment.value() % SizeBytes == 0)
     return true;
+
+  // Element-wise access *might* be faster than misaligned vector accesses.
   unsigned VectorizedSpeed = 0;
   bool AllowsMisaligned = TTI.allowsMisalignedMemoryAccesses(
       F.getContext(), SizeBytes * 8, AS, Alignment, &VectorizedSpeed);
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
index 892249e87e4bf..5c3757867f71f 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/extend-chain.ll
@@ -6,7 +6,7 @@
 ;; code. Alignment and other requirement for vectorization should
 ;; still be met.
 
-define void @load3to4(ptr %p) #0 {
+define void @load3to4(ptr %p) {
 ; CHECK-LABEL: define void @load3to4(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i32, ptr [[P]], i32 0
@@ -28,7 +28,7 @@ define void @load3to4(ptr %p) #0 {
   ret void
 }
 
-define void @load5to8(ptr %p) #0 {
+define void @load5to8(ptr %p) {
 ; CHECK-LABEL: define void @load5to8(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i16, ptr [[P]], i32 0
@@ -52,13 +52,45 @@ define void @load5to8(ptr %p) #0 {
   %v0 = load i16, ptr %p.0, align 16
   %v1 = load i16, ptr %p.1, align 2
   %v2 = load i16, ptr %p.2, align 4
-  %v3 = load i16, ptr %p.3, align 8
-  %v4 = load i16, ptr %p.4, align 2
+  %v3 = load i16, ptr %p.3, align 2
+  %v4 = load i16, ptr %p.4, align 8
 
   ret void
 }
 
-define void @load3to4_unaligned(ptr %p) #0 {
+define void @load6to8(ptr %p) {
+; CHECK-LABEL: define void @load6to8(
+; CHECK-SAME: ptr [[P:%.*]]) {
+; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i16, ptr [[P]], i32 0
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr align 16 [[P_0]], <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>, <8 x i16> poison)
+; CHECK-NEXT:    [[V05:%.*]] = extractelement <8 x i16> [[TMP1]], i32 0
+; CHECK-NEXT:    [[V16:%.*]] = extractelement <8 x i16> [[TMP1]], i32 1
+; CHECK-NEXT:    [[V27:%.*]] = extractelement <8 x i16> [[TMP1]], i32 2
+; CHECK-NEXT:    [[V38:%.*]] = extractelement <8 x i16> [[TMP1]], i32 3
+; CHECK-NEXT:    [[V49:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4
+; CHECK-NEXT:    [[EXTEND10:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5
+; CHECK-NEXT:    [[EXTEND211:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6
+; CHECK-NEXT:    [[EXTEND412:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
+; CHECK-NEXT:    ret void
+;
+  %p.0 = getelementptr i16, ptr %p, i32 0
+  %p.1 = getelementptr i16, ptr %p, i32 1
+  %p.2 = getelementptr i16, ptr %p, i32 2
+  %p.3 = getelementptr i16, ptr %p, i32 3
+  %p.4 = getelementptr i16, ptr %p, i32 4
+  %p.5 = getelementptr i16, ptr %p, i32 5
+
+  %v0 = load i16, ptr %p.0, align 16
+  %v1 = load i16, ptr %p.1, align 2
+  %v2 = load i16, ptr %p.2, align 4
+  %v3 = load i16, ptr %p.3, align 2
+  %v4 = load i16, ptr %p.4, align 8
+  %v5 = load i16, ptr %p.5, align 2
+
+  ret void
+}
+
+define void @load3to4_unaligned(ptr %p) {
 ; CHECK-LABEL: define void @load3to4_unaligned(
 ; CHECK-SAME: ptr [[P:%.*]]) {
 ; CHECK-NEXT:    [[P_0:%.*]] = getelementptr i32, ptr [[P]], i32 0

>From f02c6f866e89253def3e19b0c5fa98587c743b9f Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 5 Nov 2025 17:38:56 +0000
Subject: [PATCH 16/24] Rework alignment deriving while gap filling

---
 .../Vectorize/LoadStoreVectorizer.cpp         | 45 ++++++++++---------
 1 file changed, 25 insertions(+), 20 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 3ac8ed4a5f50f..77e125d7bd5c0 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -688,14 +688,25 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
           : TTI.isLegalMaskedStore(OptimisticVectorType, OptimisticAlign, AS,
                                    TTI::MaskKind::ConstantMask);
 
-  // Derive the alignment of the leader of the chain (which every
-  // OffsetFromLeader is based on) using the current first element of the chain.
-  // We could derive a better alignment by iterating over the entire chain but
-  // this should be sufficient. We use this value to derive the alignment of any
-  // extra elements we create while gap filling.
-  Align LeaderOfChainAlign =
-      commonAlignment(getLoadStoreAlignment(C[0].Inst),
-                      C[0].OffsetFromLeader.abs().getLimitedValue());
+  // Cache the best aligned element in the chain for use when creating extra
+  // elements.
+  Align BestAlignedElemAlign;
+  APInt OffsetOfBestAlignedElemFromLeader;
+  for (const auto &E : C) {
+    Align ElementAlignment = getLoadStoreAlignment(E.Inst);
+    if (ElementAlignment > BestAlignedElemAlign) {
+      BestAlignedElemAlign = ElementAlignment;
+      OffsetOfBestAlignedElemFromLeader = E.OffsetFromLeader;
+    }
+  }
+
+  auto DeriveAlignFromBestAlignedElem = [&](APInt NewElemOffsetFromLeader) {
+    return commonAlignment(
+        BestAlignedElemAlign,
+        (NewElemOffsetFromLeader - OffsetOfBestAlignedElemFromLeader)
+            .abs()
+            .getLimitedValue());
+  };
 
   unsigned ASPtrBits = DL.getIndexSizeInBits(AS);
 
@@ -732,15 +743,13 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
     // which could cancel out the benefits of reducing number of load/stores.
     if (TryFillGaps &&
         SzBits == DL.getTypeSizeInBits(getLoadStoreType(It->Inst))) {
-      APInt OffsetFromLeaderOfGapStart = Prev.OffsetFromLeader + PrevSzBytes;
-      APInt GapSzBytes = It->OffsetFromLeader - OffsetFromLeaderOfGapStart;
+      APInt OffsetOfGapStartFromLeader = Prev.OffsetFromLeader + PrevSzBytes;
+      APInt GapSzBytes = It->OffsetFromLeader - OffsetOfGapStartFromLeader;
       if (GapSzBytes == PrevSzBytes) {
         // There is a single gap between Prev and Curr, create one extra element
         ChainElem NewElem = createExtraElementAfter(
             Prev, PrevSzBytes, "GapFill",
-            commonAlignment(
-                LeaderOfChainAlign,
-                OffsetFromLeaderOfGapStart.abs().getLimitedValue()));
+            DeriveAlignFromBestAlignedElem(OffsetOfGapStartFromLeader));
         CurChain.push_back(NewElem);
         CurChain.push_back(*It);
         continue;
@@ -751,15 +760,11 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
       if ((GapSzBytes == 2 * PrevSzBytes) && (CurChain.size() % 4 == 1)) {
         ChainElem NewElem1 = createExtraElementAfter(
             Prev, PrevSzBytes, "GapFill",
-            commonAlignment(
-                LeaderOfChainAlign,
-                OffsetFromLeaderOfGapStart.abs().getLimitedValue()));
+            DeriveAlignFromBestAlignedElem(OffsetOfGapStartFromLeader));
         ChainElem NewElem2 = createExtraElementAfter(
             NewElem1, PrevSzBytes, "GapFill",
-            commonAlignment(LeaderOfChainAlign,
-                            (OffsetFromLeaderOfGapStart + PrevSzBytes)
-                                .abs()
-                                .getLimitedValue()));
+            DeriveAlignFromBestAlignedElem(OffsetOfGapStartFromLeader +
+                                           PrevSzBytes));
         CurChain.push_back(NewElem1);
         CurChain.push_back(NewElem2);
         CurChain.push_back(*It);

>From 8240ccb03178fdcf4d2a96a34016760551cf85f1 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 5 Nov 2025 18:39:37 +0000
Subject: [PATCH 17/24] Fix bug in alignment derive, update test to show
 improvement

---
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp        | 4 ++--
 .../LoadStoreVectorizer/NVPTX/many_loads_stores.ll           | 5 +++--
 2 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 77e125d7bd5c0..adcbea61d75b1 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -690,8 +690,8 @@ std::vector<Chain> Vectorizer::splitChainByContiguity(Chain &C) {
 
   // Cache the best aligned element in the chain for use when creating extra
   // elements.
-  Align BestAlignedElemAlign;
-  APInt OffsetOfBestAlignedElemFromLeader;
+  Align BestAlignedElemAlign = getLoadStoreAlignment(C[0].Inst);
+  APInt OffsetOfBestAlignedElemFromLeader = C[0].OffsetFromLeader;
   for (const auto &E : C) {
     Align ElementAlignment = getLoadStoreAlignment(E.Inst);
     if (ElementAlignment > BestAlignedElemAlign) {
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/many_loads_stores.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/many_loads_stores.ll
index 11063dfeca54f..abe15c00c494b 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/many_loads_stores.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/many_loads_stores.ll
@@ -1,9 +1,10 @@
 ; This is an end-to-end test that checks that LSV succeeds at vectorizing a
 ; large program with many loads.
 ; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -S -o - %s > %t
-; RUN: grep 'load i8' < %t | count 18
-; RUN: grep 'load <2 x i8>' < %t | count 9
+; RUN: grep 'load i8' < %t | count 12
+; RUN: grep 'load <2 x i8>' < %t | count 3
 ; RUN: grep 'load <4 x i8>' < %t | count 27
+; RUN: grep 'call <4 x i8> @llvm.masked.load.v4i8.p1.*<4 x i1> <i1 false, i1 true, i1 true, i1 true>' < %t | count 6
 
 target datalayout = "e-i64:64-i128:128-v16:16-v32:32-n16:32:64"
 target triple = "nvptx64-nvidia-cuda"

>From 01dad11d6f69ee30f924cfc11ca70c606d22e835 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 5 Nov 2025 18:39:56 +0000
Subject: [PATCH 18/24] Update tests to check for hex pragma

---
 llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll    | 2 +-
 llvm/test/CodeGen/NVPTX/param-vectorize-device.ll | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
index 21b18555371dc..a75ddd032d4c0 100644
--- a/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
+++ b/llvm/test/CodeGen/NVPTX/LoadStoreVectorizer.ll
@@ -50,7 +50,7 @@ define half @fh(ptr %p) {
 ; ENABLED-EMPTY:
 ; ENABLED-NEXT:  // %bb.0:
 ; ENABLED-NEXT:    ld.param.b64 %rd1, [fh_param_0];
-; ENABLED-NEXT:    .pragma "used_bytes_mask 1023";
+; ENABLED-NEXT:    .pragma "used_bytes_mask 0x3ff";
 ; ENABLED-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
 ; ENABLED-NEXT:    { .reg .b16 tmp; mov.b32 {%rs1, tmp}, %r3; }
 ; ENABLED-NEXT:    mov.b32 {%rs2, %rs3}, %r2;
diff --git a/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll b/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
index 67dd29b1b6ca6..643de006f14c4 100644
--- a/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
+++ b/llvm/test/CodeGen/NVPTX/param-vectorize-device.ll
@@ -171,7 +171,7 @@ define internal fastcc [3 x i32] @callee_St4x3(ptr nocapture noundef readonly by
   ; CHECK:       .func  (.param .align 16 .b8 func_retval0[12])
   ; CHECK-LABEL: callee_St4x3(
   ; CHECK-NEXT:  .param .align 16 .b8 callee_St4x3_param_0[12]
-  ; CHECK:       .pragma "used_bytes_mask 4095";
+  ; CHECK:       .pragma "used_bytes_mask 0xfff";
   ; CHECK:       ld.param.v4.b32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], %{{.*}}}, [callee_St4x3_param_0];
   ; CHECK-DAG:   st.param.v2.b32 [func_retval0], {[[R1]], [[R2]]};
   ; CHECK-DAG:   st.param.b32    [func_retval0+8], [[R3]];
@@ -394,7 +394,7 @@ define internal fastcc [7 x i32] @callee_St4x7(ptr nocapture noundef readonly by
   ; CHECK-LABEL: callee_St4x7(
   ; CHECK-NEXT:  .param .align 16 .b8 callee_St4x7_param_0[28]
   ; CHECK:       ld.param.v4.b32 {[[R1:%r[0-9]+]], [[R2:%r[0-9]+]], [[R3:%r[0-9]+]], [[R4:%r[0-9]+]]}, [callee_St4x7_param_0];
-  ; CHECK:       .pragma "used_bytes_mask 4095";
+  ; CHECK:       .pragma "used_bytes_mask 0xfff";
   ; CHECK:       ld.param.v4.b32 {[[R5:%r[0-9]+]], [[R6:%r[0-9]+]], [[R7:%r[0-9]+]], %{{.*}}}, [callee_St4x7_param_0+16];
   ; CHECK-DAG:   st.param.v4.b32 [func_retval0],  {[[R1]], [[R2]], [[R3]], [[R4]]};
   ; CHECK-DAG:   st.param.v2.b32 [func_retval0+16], {[[R5]], [[R6]]};

>From 6dc716dc0e4fe5b671ac67acd701e3b5c6b96f8e Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 5 Nov 2025 20:46:13 +0000
Subject: [PATCH 19/24] Add more specific asserts, remove if condition

---
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index adcbea61d75b1..f5df12c17f334 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -937,7 +937,7 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
       //     the next power-of-2
       Chain ExtendingLoadsStores;
       if (NumVecElems < TargetVF && !isPowerOf2_32(NumVecElems) &&
-          VecElemBits >= 8 && isPowerOf2_32(TargetVF)) {
+          VecElemBits >= 8) {
         // TargetVF may be a lot higher than NumVecElems,
         // so only extend to the next power of 2.
         assert(VecElemBits % 8 == 0);
@@ -945,7 +945,8 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         unsigned NewNumVecElems = PowerOf2Ceil(NumVecElems);
         unsigned NewSizeBytes = VecElemBytes * NewNumVecElems;
 
-        assert(NewNumVecElems <= TargetVF);
+        assert(isPowerOf2_32(TargetVF) && "TargetVF expected to be a power of 2");
+        assert(NewNumVecElems <= TargetVF && "Should not extend past TargetVF");
 
         LLVM_DEBUG(dbgs() << "LSV: attempting to extend chain of "
                           << NumVecElems << " "

>From 7a05ee3b15ae2ac8a6d252518281f1428a567140 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 5 Nov 2025 22:10:21 +0000
Subject: [PATCH 20/24] Update test to account for change in sub-byte element
 type legalization for NVPTX

---
 .../NVPTX/gap-fill-vectors.ll                 | 60 +++++++------------
 1 file changed, 20 insertions(+), 40 deletions(-)

diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
index 9162ea00a199c..15a92fbb452ac 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-vectors.ll
@@ -5,49 +5,10 @@
 ; currently, we do not gap fill when the loads enclosing the gap are different sizes
 ; Otherwise, vectors are treated the same as any other scalar types
 
-define void @i1x8_gap_gap_i1x8(ptr %ptr) {
-; CHECK-LABEL: define void @i1x8_gap_gap_i1x8(
-; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i1> @llvm.masked.load.v32i1.p0(ptr align 4 [[PTR0]], <32 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <32 x i1> poison)
-; CHECK-NEXT:    [[L03:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT:    [[GAPFILL4:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT:    [[GAPFILL25:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
-; CHECK-NEXT:    [[L36:%.*]] = shufflevector <32 x i1> [[TMP1]], <32 x i1> poison, <8 x i32> <i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
-; CHECK-NEXT:    ret void
-;
-  %ptr0 = getelementptr i8, ptr %ptr, i64 0
-  %ptr3 = getelementptr i8, ptr %ptr, i64 3
-
-  %l0 = load <8 x i1>,  ptr %ptr0, align 4
-  %l3 = load <8 x i1>,  ptr %ptr3, align 1
-
-  ret void
-}
-
-; The chain elements are different sizes, gap filling won't kick in
-define void @i1x8_gap_gap_i1x16(ptr %ptr) {
-; CHECK-LABEL: define void @i1x8_gap_gap_i1x16(
-; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
-; CHECK-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr [[PTR]], i64 3
-; CHECK-NEXT:    [[L0:%.*]] = load <8 x i1>, ptr [[PTR0]], align 4
-; CHECK-NEXT:    [[L3:%.*]] = load <16 x i1>, ptr [[PTR3]], align 2
-; CHECK-NEXT:    ret void
-;
-  %ptr0 = getelementptr i8, ptr %ptr, i64 0
-  %ptr3 = getelementptr i8, ptr %ptr, i64 3
-
-  %l0 = load <8 x i1>,  ptr %ptr0, align 4
-  %l3 = load <16 x i1>,  ptr %ptr3, align 2
-
-  ret void
-}
-
 ; Gap of two load <2 x i8>s gets filled
 define void @i8x2_gap_gap_i8x2(ptr %ptr) {
 ; CHECK-LABEL: define void @i8x2_gap_gap_i8x2(
-; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr align 8 [[PTR0]], <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <8 x i8> poison)
 ; CHECK-NEXT:    [[L03:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> poison, <2 x i32> <i32 0, i32 1>
@@ -184,3 +145,22 @@ define void @i64x2_i64x2_gap_i64x2(ptr addrspace(1) %in) {
   %vec1 = load <2 x i64>, ptr addrspace(1) %getElem1, align 8
   ret void
 }
+
+; Masked loads are not supported for sub-byte element types.
+define void @i1x8_gap_gap_i1x8(ptr %ptr) {
+; CHECK-LABEL: define void @i1x8_gap_gap_i1x8(
+; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr i8, ptr [[PTR]], i64 0
+; CHECK-NEXT:    [[PTR3:%.*]] = getelementptr i8, ptr [[PTR]], i64 3
+; CHECK-NEXT:    [[L0:%.*]] = load <8 x i1>, ptr [[PTR0]], align 4
+; CHECK-NEXT:    [[L3:%.*]] = load <8 x i1>, ptr [[PTR3]], align 1
+; CHECK-NEXT:    ret void
+;
+  %ptr0 = getelementptr i8, ptr %ptr, i64 0
+  %ptr3 = getelementptr i8, ptr %ptr, i64 3
+
+  %l0 = load <8 x i1>,  ptr %ptr0, align 4
+  %l3 = load <8 x i1>,  ptr %ptr3, align 1
+
+  ret void
+}

>From 82b6fcd813836cae04fd564c21651b2aaa3b0d45 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Wed, 5 Nov 2025 22:16:03 +0000
Subject: [PATCH 21/24] Formatting

---
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index f5df12c17f334..3ed84f69dc8ac 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -945,7 +945,8 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
         unsigned NewNumVecElems = PowerOf2Ceil(NumVecElems);
         unsigned NewSizeBytes = VecElemBytes * NewNumVecElems;
 
-        assert(isPowerOf2_32(TargetVF) && "TargetVF expected to be a power of 2");
+        assert(isPowerOf2_32(TargetVF) &&
+               "TargetVF expected to be a power of 2");
         assert(NewNumVecElems <= TargetVF && "Should not extend past TargetVF");
 
         LLVM_DEBUG(dbgs() << "LSV: attempting to extend chain of "

>From ccd5893c405910f6801a1897b8777b3185e30b2c Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Fri, 21 Nov 2025 22:22:24 +0000
Subject: [PATCH 22/24] Fix formatting

---
 llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index d24f0882f9b19..b3df0352a6093 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -1965,7 +1965,8 @@ ChainElem Vectorizer::createExtraElementAfter(const ChainElem &Prev, Type *Ty,
 Value *Vectorizer::createMaskForExtraElements(const ArrayRef<ChainElem> C,
                                               FixedVectorType *VecTy) {
   // Start each mask element as false
-  SmallVector<Constant *, 64> MaskElts(VecTy->getNumElements(), Builder.getInt1(false));
+  SmallVector<Constant *, 64> MaskElts(VecTy->getNumElements(),
+                                       Builder.getInt1(false));
   // Iterate over the chain and set the corresponding mask element to true for
   // each element that is not an extra element.
   for (const ChainElem &E : C) {

>From 551f136fe621027307a66954d1974cdd80432271 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Fri, 21 Nov 2025 23:41:59 +0000
Subject: [PATCH 23/24] Add redundant element test with gap filling

---
 .../NVPTX/gap-fill-with-redundant-elements.ll | 71 +++++++++++++++++++
 1 file changed, 71 insertions(+)
 create mode 100644 llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-with-redundant-elements.ll

diff --git a/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-with-redundant-elements.ll b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-with-redundant-elements.ll
new file mode 100644
index 0000000000000..e22035b4ed922
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/NVPTX/gap-fill-with-redundant-elements.ll
@@ -0,0 +1,71 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mtriple=nvptx64-nvidia-cuda -passes=load-store-vectorizer -mcpu=sm_100 -mattr=+ptx88 -S -o - %s | FileCheck %s
+
+define void @test_redundant_no_gap(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @test_redundant_no_gap(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr addrspace(1) [[PTR]], align 32
+; CHECK-NEXT:    [[LD03:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[LD14:%.*]] = extractelement <8 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[LD25:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[LD37:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT:    [[LD45:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 6, i32 7>
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load <2 x i32>, ptr addrspace(1) %ptr, align 32
+  %gep1 = getelementptr inbounds i8, ptr addrspace(1) %ptr, i32 4
+  %ld1 = load i32, ptr addrspace(1) %gep1, align 4
+  %gep2 = getelementptr inbounds i8, ptr addrspace(1) %ptr, i32 8
+  %ld2 = load <2 x i32>, ptr addrspace(1) %gep2, align 8
+  %gep3 = getelementptr inbounds i8, ptr addrspace(1) %ptr, i32 16
+  %ld3 = load <2 x i32>, ptr addrspace(1) %gep3, align 16
+  %gep4 = getelementptr inbounds i8, ptr addrspace(1) %ptr, i32 24
+  %ld4 = load <2 x i32>, ptr addrspace(1) %gep4, align 8
+  ret void
+}
+
+; This fills the 2-byte gap between ld0 and ld3.
+; ld1 is folded into the vector but ld0 is treated as the end of the chain
+; at the point when the gap is considered, because it reads further than ld1.
+define void @test_redundant_gap(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @test_redundant_gap(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 32 [[PTR]], <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <8 x i32> poison)
+; CHECK-NEXT:    [[LD01:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[LD12:%.*]] = extractelement <8 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[GAPFILL3:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[LD34:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT:    [[LD45:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 6, i32 7>
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load <2 x i32>, ptr addrspace(1) %ptr, align 32
+  %ld1 = load i32, ptr addrspace(1) %ptr, align 4
+  %gep3 = getelementptr inbounds i8, ptr addrspace(1) %ptr, i32 16
+  %ld3 = load <2 x i32>, ptr addrspace(1) %gep3, align 16
+  %gep4 = getelementptr inbounds i8, ptr addrspace(1) %ptr, i32 24
+  %ld4 = load <2 x i32>, ptr addrspace(1) %gep4, align 8
+  ret void
+}
+
+; This chain contains two elements, one before a gap,
+; and one before the end of the chain. Chain should be correctly extended.
+define void @test_redundant_gap_and_extend(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @test_redundant_gap_and_extend(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 32 [[PTR]], <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false>, <8 x i32> poison)
+; CHECK-NEXT:    [[LD03:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[LD14:%.*]] = extractelement <8 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[GAPFILL5:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[LD36:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> poison, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT:    [[LD47:%.*]] = extractelement <8 x i32> [[TMP1]], i32 4
+; CHECK-NEXT:    [[EXTEND8:%.*]] = extractelement <8 x i32> [[TMP1]], i32 6
+; CHECK-NEXT:    [[EXTEND29:%.*]] = extractelement <8 x i32> [[TMP1]], i32 7
+; CHECK-NEXT:    ret void
+;
+  %ld0 = load <2 x i32>, ptr addrspace(1) %ptr, align 32
+  %ld1 = load i32, ptr addrspace(1) %ptr, align 4
+  %gep3 = getelementptr inbounds i8, ptr addrspace(1) %ptr, i32 16
+  %ld3 = load <2 x i32>, ptr addrspace(1) %gep3, align 16
+  %ld4 = load i32, ptr addrspace(1) %gep3, align 4
+  ret void
+}

>From ce4b7e0a1ec713e333fa058c816c412e9fdd8019 Mon Sep 17 00:00:00 2001
From: Drew Kersnar <dkersnar at nvidia.com>
Date: Fri, 21 Nov 2025 23:42:17 +0000
Subject: [PATCH 24/24] Squashed cherry-pick of 13 commits from
 github/dkersnar/masked-store-lowering

---
 .../llvm/Analysis/TargetTransformInfo.h       |  16 +-
 .../llvm/Analysis/TargetTransformInfoImpl.h   |   6 +-
 llvm/lib/Analysis/TargetTransformInfo.cpp     |  12 +-
 .../SelectionDAG/LegalizeVectorTypes.cpp      |  10 +-
 .../SelectionDAG/SelectionDAGBuilder.cpp      |   2 +
 .../AArch64/AArch64TargetTransformInfo.h      |   6 +-
 .../lib/Target/ARM/ARMTargetTransformInfo.cpp |   3 +-
 llvm/lib/Target/ARM/ARMTargetTransformInfo.h  |  16 +-
 .../Hexagon/HexagonTargetTransformInfo.cpp    |   6 +-
 .../Hexagon/HexagonTargetTransformInfo.h      |   7 +-
 .../NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp   |  20 +
 .../NVPTX/MCTargetDesc/NVPTXInstPrinter.h     |   3 +
 llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp  |   2 +-
 llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp   |  39 +-
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp   | 202 +++++++++-
 llvm/lib/Target/NVPTX/NVPTXISelLowering.h     |   1 +
 llvm/lib/Target/NVPTX/NVPTXInstrInfo.td       |  34 +-
 llvm/lib/Target/NVPTX/NVPTXIntrinsics.td      |  17 +-
 .../Target/NVPTX/NVPTXReplaceImageHandles.cpp |   4 +-
 .../Target/NVPTX/NVPTXSelectionDAGInfo.cpp    |   1 +
 llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.h |   1 +
 .../Target/NVPTX/NVPTXTagInvariantLoads.cpp   |  23 +-
 .../Target/NVPTX/NVPTXTargetTransformInfo.cpp |  39 ++
 .../Target/NVPTX/NVPTXTargetTransformInfo.h   |   6 +
 .../Target/RISCV/RISCVTargetTransformInfo.h   |   6 +-
 llvm/lib/Target/VE/VETargetTransformInfo.h    |  10 +-
 .../lib/Target/X86/X86TargetTransformInfo.cpp |   6 +-
 llvm/lib/Target/X86/X86TargetTransformInfo.h  |  12 +-
 .../Scalar/ScalarizeMaskedMemIntrin.cpp       |  10 +-
 .../floating-point-immediate-operands.mir     |   8 +-
 llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll  |  16 +-
 .../NVPTX/machinelicm-no-preheader.mir        |  12 +-
 .../test/CodeGen/NVPTX/masked-load-vectors.ll | 366 ++++++++++++++++++
 .../NVPTX/masked-store-variable-mask.ll       |  56 +++
 .../CodeGen/NVPTX/masked-store-vectors-256.ll | 318 +++++++++++++++
 llvm/test/CodeGen/NVPTX/proxy-reg-erasure.mir |   4 +-
 36 files changed, 1196 insertions(+), 104 deletions(-)
 create mode 100644 llvm/test/CodeGen/NVPTX/masked-load-vectors.ll
 create mode 100644 llvm/test/CodeGen/NVPTX/masked-store-variable-mask.ll
 create mode 100644 llvm/test/CodeGen/NVPTX/masked-store-vectors-256.ll

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index a65e4667ab76c..a808979f385a6 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -842,12 +842,20 @@ class TargetTransformInfo {
   LLVM_ABI AddressingModeKind
   getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const;
 
+  /// Some targets only support masked load/store with a constant mask.
+  enum MaskKind {
+    VariableOrConstantMask,
+    ConstantMask,
+  };
+
   /// Return true if the target supports masked store.
-  LLVM_ABI bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                                   unsigned AddressSpace) const;
+  LLVM_ABI bool
+  isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace,
+                     MaskKind MaskKind = VariableOrConstantMask) const;
   /// Return true if the target supports masked load.
-  LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                                  unsigned AddressSpace) const;
+  LLVM_ABI bool
+  isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace,
+                    MaskKind MaskKind = VariableOrConstantMask) const;
 
   /// Return true if the target supports nontemporal store.
   LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const;
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index d8e35748f53e5..af295fc28022b 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -309,12 +309,14 @@ class TargetTransformInfoImplBase {
   }
 
   virtual bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                                  unsigned AddressSpace) const {
+                                  unsigned AddressSpace,
+                                  TTI::MaskKind MaskKind) const {
     return false;
   }
 
   virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                                 unsigned AddressSpace) const {
+                                 unsigned AddressSpace,
+                                 TTI::MaskKind MaskKind) const {
     return false;
   }
 
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 45369f0ffe137..f9d330dfbd0ed 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -468,13 +468,17 @@ TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
 }
 
 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType, Align Alignment,
-                                             unsigned AddressSpace) const {
-  return TTIImpl->isLegalMaskedStore(DataType, Alignment, AddressSpace);
+                                             unsigned AddressSpace,
+                                             TTI::MaskKind MaskKind) const {
+  return TTIImpl->isLegalMaskedStore(DataType, Alignment, AddressSpace,
+                                     MaskKind);
 }
 
 bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType, Align Alignment,
-                                            unsigned AddressSpace) const {
-  return TTIImpl->isLegalMaskedLoad(DataType, Alignment, AddressSpace);
+                                            unsigned AddressSpace,
+                                            TTI::MaskKind MaskKind) const {
+  return TTIImpl->isLegalMaskedLoad(DataType, Alignment, AddressSpace,
+                                    MaskKind);
 }
 
 bool TargetTransformInfo::isLegalNTStore(Type *DataType,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 24a18e181ba80..4274e951446b8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -2465,6 +2465,7 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
   SDValue PassThru = MLD->getPassThru();
   Align Alignment = MLD->getBaseAlign();
   ISD::LoadExtType ExtType = MLD->getExtensionType();
+  MachineMemOperand::Flags MMOFlags = MLD->getMemOperand()->getFlags();
 
   // Split Mask operand
   SDValue MaskLo, MaskHi;
@@ -2490,9 +2491,8 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
     std::tie(PassThruLo, PassThruHi) = DAG.SplitVector(PassThru, dl);
 
   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
-      MLD->getPointerInfo(), MachineMemOperand::MOLoad,
-      LocationSize::beforeOrAfterPointer(), Alignment, MLD->getAAInfo(),
-      MLD->getRanges());
+      MLD->getPointerInfo(), MMOFlags, LocationSize::beforeOrAfterPointer(),
+      Alignment, MLD->getAAInfo(), MLD->getRanges());
 
   Lo = DAG.getMaskedLoad(LoVT, dl, Ch, Ptr, Offset, MaskLo, PassThruLo, LoMemVT,
                          MMO, MLD->getAddressingMode(), ExtType,
@@ -2515,8 +2515,8 @@ void DAGTypeLegalizer::SplitVecRes_MLOAD(MaskedLoadSDNode *MLD,
           LoMemVT.getStoreSize().getFixedValue());
 
     MMO = DAG.getMachineFunction().getMachineMemOperand(
-        MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
-        Alignment, MLD->getAAInfo(), MLD->getRanges());
+        MPI, MMOFlags, LocationSize::beforeOrAfterPointer(), Alignment,
+        MLD->getAAInfo(), MLD->getRanges());
 
     Hi = DAG.getMaskedLoad(HiVT, dl, Ch, Ptr, Offset, MaskHi, PassThruHi,
                            HiMemVT, MMO, MLD->getAddressingMode(), ExtType,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 985a54ca83256..88b35582a9f7d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5063,6 +5063,8 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
   auto MMOFlags = MachineMemOperand::MOLoad;
   if (I.hasMetadata(LLVMContext::MD_nontemporal))
     MMOFlags |= MachineMemOperand::MONonTemporal;
+  if (I.hasMetadata(LLVMContext::MD_invariant_load))
+    MMOFlags |= MachineMemOperand::MOInvariant;
 
   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
       MachinePointerInfo(PtrOperand), MMOFlags,
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 6cc4987428567..52fc28a98449b 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -323,12 +323,14 @@ class AArch64TTIImpl final : public BasicTTIImplBase<AArch64TTIImpl> {
   }
 
   bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned /*AddressSpace*/) const override {
+                         unsigned /*AddressSpace*/,
+                         TTI::MaskKind /*MaskKind*/) const override {
     return isLegalMaskedLoadStore(DataType, Alignment);
   }
 
   bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned /*AddressSpace*/) const override {
+                          unsigned /*AddressSpace*/,
+                          TTI::MaskKind /*MaskKind*/) const override {
     return isLegalMaskedLoadStore(DataType, Alignment);
   }
 
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index d12b802fe234f..fdb0ec40cb41f 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1125,7 +1125,8 @@ bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) const {
 }
 
 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment,
-                                   unsigned /*AddressSpace*/) const {
+                                   unsigned /*AddressSpace*/,
+                                   TTI::MaskKind /*MaskKind*/) const {
   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
     return false;
 
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index 919a6fc9fd0b0..30f2151b41239 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -186,12 +186,16 @@ class ARMTTIImpl final : public BasicTTIImplBase<ARMTTIImpl> {
 
   bool isProfitableLSRChainElement(Instruction *I) const override;
 
-  bool isLegalMaskedLoad(Type *DataTy, Align Alignment,
-                         unsigned AddressSpace) const override;
-
-  bool isLegalMaskedStore(Type *DataTy, Align Alignment,
-                          unsigned AddressSpace) const override {
-    return isLegalMaskedLoad(DataTy, Alignment, AddressSpace);
+  bool
+  isLegalMaskedLoad(Type *DataTy, Align Alignment, unsigned AddressSpace,
+                    TTI::MaskKind MaskKind =
+                        TTI::MaskKind::VariableOrConstantMask) const override;
+
+  bool
+  isLegalMaskedStore(Type *DataTy, Align Alignment, unsigned AddressSpace,
+                     TTI::MaskKind MaskKind =
+                         TTI::MaskKind::VariableOrConstantMask) const override {
+    return isLegalMaskedLoad(DataTy, Alignment, AddressSpace, MaskKind);
   }
 
   bool forceScalarizeMaskedGather(VectorType *VTy,
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
index 8f3f0cc8abb01..3f84cbb6555ed 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp
@@ -343,14 +343,16 @@ InstructionCost HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
 }
 
 bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/,
-                                        unsigned /*AddressSpace*/) const {
+                                        unsigned /*AddressSpace*/,
+                                        TTI::MaskKind /*MaskKind*/) const {
   // This function is called from scalarize-masked-mem-intrin, which runs
   // in pre-isel. Use ST directly instead of calling isHVXVectorType.
   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
 }
 
 bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/,
-                                       unsigned /*AddressSpace*/) const {
+                                       unsigned /*AddressSpace*/,
+                                       TTI::MaskKind /*MaskKind*/) const {
   // This function is called from scalarize-masked-mem-intrin, which runs
   // in pre-isel. Use ST directly instead of calling isHVXVectorType.
   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
index e95b5a10b76a7..67388984bb3e3 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.h
@@ -165,9 +165,10 @@ class HexagonTTIImpl final : public BasicTTIImplBase<HexagonTTIImpl> {
   }
 
   bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned AddressSpace) const override;
-  bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned AddressSpace) const override;
+                          unsigned AddressSpace,
+                          TTI::MaskKind MaskKind) const override;
+  bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace,
+                         TTI::MaskKind MaskKind) const override;
   bool isLegalMaskedGather(Type *Ty, Align Alignment) const override;
   bool isLegalMaskedScatter(Type *Ty, Align Alignment) const override;
   bool forceScalarizeMaskedGather(VectorType *VTy,
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index 77913f27838e2..6f747b70100b7 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -395,6 +395,26 @@ void NVPTXInstPrinter::printMemOperand(const MCInst *MI, int OpNum,
   }
 }
 
+void NVPTXInstPrinter::printUsedBytesMaskPragma(const MCInst *MI, int OpNum,
+                                                raw_ostream &O) {
+  auto &Op = MI->getOperand(OpNum);
+  assert(Op.isImm() && "Invalid operand");
+  uint32_t Imm = (uint32_t)Op.getImm();
+  if (Imm != UINT32_MAX) {
+    O << ".pragma \"used_bytes_mask " << format_hex(Imm, 1) << "\";\n\t";
+  }
+}
+
+void NVPTXInstPrinter::printRegisterOrSinkSymbol(const MCInst *MI, int OpNum,
+                                                 raw_ostream &O,
+                                                 const char *Modifier) {
+  const MCOperand &Op = MI->getOperand(OpNum);
+  if (Op.isReg() && Op.getReg() == MCRegister::NoRegister)
+    O << "_";
+  else
+    printOperand(MI, OpNum, O);
+}
+
 void NVPTXInstPrinter::printHexu32imm(const MCInst *MI, int OpNum,
                                       raw_ostream &O) {
   int64_t Imm = MI->getOperand(OpNum).getImm();
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
index 92155b01464e8..89137a954d2d8 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
@@ -46,6 +46,9 @@ class NVPTXInstPrinter : public MCInstPrinter {
                     StringRef Modifier = {});
   void printMemOperand(const MCInst *MI, int OpNum, raw_ostream &O,
                        StringRef Modifier = {});
+  void printUsedBytesMaskPragma(const MCInst *MI, int OpNum, raw_ostream &O);
+  void printRegisterOrSinkSymbol(const MCInst *MI, int OpNum, raw_ostream &O,
+                                 const char *Modifier = nullptr);
   void printHexu32imm(const MCInst *MI, int OpNum, raw_ostream &O);
   void printProtoIdent(const MCInst *MI, int OpNum, raw_ostream &O);
   void printPrmtMode(const MCInst *MI, int OpNum, raw_ostream &O);
diff --git a/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp b/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
index a3496090def3c..c8b53571c1e59 100644
--- a/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
@@ -96,7 +96,7 @@ static bool eliminateMove(MachineInstr &Mov, const MachineRegisterInfo &MRI,
   const MachineOperand *ParamSymbol = Mov.uses().begin();
   assert(ParamSymbol->isSymbol());
 
-  constexpr unsigned LDInstBasePtrOpIdx = 5;
+  constexpr unsigned LDInstBasePtrOpIdx = 6;
   constexpr unsigned LDInstAddrSpaceOpIdx = 2;
   for (auto *LI : LoadInsts) {
     (LI->uses().begin() + LDInstBasePtrOpIdx)
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 996d653940118..0e1125ab8d8b3 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -105,6 +105,7 @@ void NVPTXDAGToDAGISel::Select(SDNode *N) {
   switch (N->getOpcode()) {
   case ISD::LOAD:
   case ISD::ATOMIC_LOAD:
+  case NVPTXISD::MLoad:
     if (tryLoad(N))
       return;
     break;
@@ -1132,6 +1133,19 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
           ? NVPTX::PTXLdStInstCode::Signed
           : NVPTX::PTXLdStInstCode::Untyped;
 
+  uint32_t UsedBytesMask;
+  switch (N->getOpcode()) {
+  case ISD::LOAD:
+  case ISD::ATOMIC_LOAD:
+    UsedBytesMask = UINT32_MAX;
+    break;
+  case NVPTXISD::MLoad:
+    UsedBytesMask = N->getConstantOperandVal(3);
+    break;
+  default:
+    llvm_unreachable("Unexpected opcode");
+  }
+
   assert(isPowerOf2_32(FromTypeWidth) && FromTypeWidth >= 8 &&
          FromTypeWidth <= 128 && "Invalid width for load");
 
@@ -1142,6 +1156,7 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
                    getI32Imm(CodeAddrSpace, DL),
                    getI32Imm(FromType, DL),
                    getI32Imm(FromTypeWidth, DL),
+                   getI32Imm(UsedBytesMask, DL),
                    Base,
                    Offset,
                    Chain};
@@ -1196,14 +1211,14 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
   //          type is integer
   // Float  : ISD::NON_EXTLOAD or ISD::EXTLOAD and the type is float
   // Read at least 8 bits (predicates are stored as 8-bit values)
-  // The last operand holds the original LoadSDNode::getExtensionType() value
-  const unsigned ExtensionType =
-      N->getConstantOperandVal(N->getNumOperands() - 1);
+  // Get the original LoadSDNode::getExtensionType() value
+  const unsigned ExtensionType = N->getConstantOperandVal(4);
   const unsigned FromType = (ExtensionType == ISD::SEXTLOAD)
                                 ? NVPTX::PTXLdStInstCode::Signed
                                 : NVPTX::PTXLdStInstCode::Untyped;
 
   const unsigned FromTypeWidth = getFromTypeWidthForLoad(LD);
+  const uint32_t UsedBytesMask = N->getConstantOperandVal(3);
 
   assert(!(EltVT.isVector() && ExtensionType != ISD::NON_EXTLOAD));
 
@@ -1213,6 +1228,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
                    getI32Imm(CodeAddrSpace, DL),
                    getI32Imm(FromType, DL),
                    getI32Imm(FromTypeWidth, DL),
+                   getI32Imm(UsedBytesMask, DL),
                    Base,
                    Offset,
                    Chain};
@@ -1250,10 +1266,13 @@ bool NVPTXDAGToDAGISel::tryLDG(MemSDNode *LD) {
   SDLoc DL(LD);
 
   unsigned ExtensionType;
+  uint32_t UsedBytesMask;
   if (const auto *Load = dyn_cast<LoadSDNode>(LD)) {
     ExtensionType = Load->getExtensionType();
+    UsedBytesMask = UINT32_MAX;
   } else {
-    ExtensionType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
+    ExtensionType = LD->getConstantOperandVal(4);
+    UsedBytesMask = LD->getConstantOperandVal(3);
   }
   const unsigned FromType = (ExtensionType == ISD::SEXTLOAD)
                                 ? NVPTX::PTXLdStInstCode::Signed
@@ -1265,8 +1284,12 @@ bool NVPTXDAGToDAGISel::tryLDG(MemSDNode *LD) {
            ExtensionType != ISD::NON_EXTLOAD));
 
   const auto [Base, Offset] = selectADDR(LD->getOperand(1), CurDAG);
-  SDValue Ops[] = {getI32Imm(FromType, DL), getI32Imm(FromTypeWidth, DL), Base,
-                   Offset, LD->getChain()};
+  SDValue Ops[] = {getI32Imm(FromType, DL),
+                   getI32Imm(FromTypeWidth, DL),
+                   getI32Imm(UsedBytesMask, DL),
+                   Base,
+                   Offset,
+                   LD->getChain()};
 
   const MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
   std::optional<unsigned> Opcode;
@@ -1277,6 +1300,10 @@ bool NVPTXDAGToDAGISel::tryLDG(MemSDNode *LD) {
     Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_i16,
                              NVPTX::LD_GLOBAL_NC_i32, NVPTX::LD_GLOBAL_NC_i64);
     break;
+  case NVPTXISD::MLoad:
+    Opcode = pickOpcodeForVT(TargetVT, std::nullopt, NVPTX::LD_GLOBAL_NC_i32,
+                             NVPTX::LD_GLOBAL_NC_i64);
+    break;
   case NVPTXISD::LoadV2:
     Opcode =
         pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_v2i16,
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index a77eb0240e677..e9026cdf3d699 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -771,7 +771,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::i128, MVT::f128}, Custom);
   for (MVT VT : MVT::fixedlen_vector_valuetypes())
     if (!isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
-      setOperationAction({ISD::STORE, ISD::LOAD}, VT, Custom);
+      setOperationAction({ISD::STORE, ISD::LOAD, ISD::MSTORE, ISD::MLOAD}, VT,
+                         Custom);
 
   // Custom legalization for LDU intrinsics.
   // TODO: The logic to lower these is not very robust and we should rewrite it.
@@ -3092,6 +3093,86 @@ static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) {
   return Or;
 }
 
+static SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG) {
+  SDNode *N = Op.getNode();
+
+  SDValue Chain = N->getOperand(0);
+  SDValue Val = N->getOperand(1);
+  SDValue BasePtr = N->getOperand(2);
+  SDValue Offset = N->getOperand(3);
+  SDValue Mask = N->getOperand(4);
+
+  SDLoc DL(N);
+  EVT ValVT = Val.getValueType();
+  MemSDNode *MemSD = cast<MemSDNode>(N);
+  assert(ValVT.isVector() && "Masked vector store must have vector type");
+  assert(MemSD->getAlign() >= DAG.getEVTAlign(ValVT) &&
+         "Unexpected alignment for masked store");
+
+  unsigned Opcode = 0;
+  switch (ValVT.getSimpleVT().SimpleTy) {
+  default:
+    llvm_unreachable("Unexpected masked vector store type");
+  case MVT::v4i64:
+  case MVT::v4f64: {
+    Opcode = NVPTXISD::StoreV4;
+    break;
+  }
+  case MVT::v8i32:
+  case MVT::v8f32: {
+    Opcode = NVPTXISD::StoreV8;
+    break;
+  }
+  }
+
+  SmallVector<SDValue, 8> Ops;
+
+  // Construct the new SDNode. First operand is the chain.
+  Ops.push_back(Chain);
+
+  // The next N operands are the values to store. Encode the mask into the
+  // values using the sentinel register 0 to represent a masked-off element.
+  assert(Mask.getValueType().isVector() &&
+         Mask.getValueType().getVectorElementType() == MVT::i1 &&
+         "Mask must be a vector of i1");
+  assert(Mask.getOpcode() == ISD::BUILD_VECTOR &&
+         "Mask expected to be a BUILD_VECTOR");
+  assert(Mask.getValueType().getVectorNumElements() ==
+             ValVT.getVectorNumElements() &&
+         "Mask size must be the same as the vector size");
+  for (auto [I, Op] : enumerate(Mask->ops())) {
+    // Mask elements must be constants.
+    if (Op.getNode()->getAsZExtVal() == 0) {
+      // Append a sentinel register 0 to the Ops vector to represent a masked
+      // off element, this will be handled in tablegen
+      Ops.push_back(DAG.getRegister(MCRegister::NoRegister,
+                                    ValVT.getVectorElementType()));
+    } else {
+      // Extract the element from the vector to store
+      SDValue ExtVal =
+          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ValVT.getVectorElementType(),
+                      Val, DAG.getIntPtrConstant(I, DL));
+      Ops.push_back(ExtVal);
+    }
+  }
+
+  // Next, the pointer operand.
+  Ops.push_back(BasePtr);
+
+  // Finally, the offset operand. We expect this to always be undef, and it will
+  // be ignored in lowering, but to mirror the handling of the other vector
+  // store instructions we include it in the new SDNode.
+  assert(Offset.getOpcode() == ISD::UNDEF &&
+         "Offset operand expected to be undef");
+  Ops.push_back(Offset);
+
+  SDValue NewSt =
+      DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
+                              MemSD->getMemoryVT(), MemSD->getMemOperand());
+
+  return NewSt;
+}
+
 SDValue
 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   switch (Op.getOpcode()) {
@@ -3128,8 +3209,16 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
     return LowerVECREDUCE(Op, DAG);
   case ISD::STORE:
     return LowerSTORE(Op, DAG);
+  case ISD::MSTORE: {
+    assert(STI.has256BitVectorLoadStore(
+               cast<MemSDNode>(Op.getNode())->getAddressSpace()) &&
+           "Masked store vector not supported on subtarget.");
+    return lowerMSTORE(Op, DAG);
+  }
   case ISD::LOAD:
     return LowerLOAD(Op, DAG);
+  case ISD::MLOAD:
+    return LowerMLOAD(Op, DAG);
   case ISD::SHL_PARTS:
     return LowerShiftLeftParts(Op, DAG);
   case ISD::SRA_PARTS:
@@ -3321,10 +3410,56 @@ SDValue NVPTXTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
                       MachinePointerInfo(SV));
 }
 
+static std::pair<MemSDNode *, uint32_t>
+convertMLOADToLoadWithUsedBytesMask(MemSDNode *N, SelectionDAG &DAG) {
+  SDValue Chain = N->getOperand(0);
+  SDValue BasePtr = N->getOperand(1);
+  SDValue Mask = N->getOperand(3);
+  SDValue Passthru = N->getOperand(4);
+
+  SDLoc DL(N);
+  EVT ResVT = N->getValueType(0);
+  assert(ResVT.isVector() && "Masked vector load must have vector type");
+  // While we only expect poison passthru vectors as an input to the backend,
+  // when the legalization framework splits a poison vector in half, it creates
+  // two undef vectors, so we can technically expect those too.
+  assert((Passthru.getOpcode() == ISD::POISON ||
+          Passthru.getOpcode() == ISD::UNDEF) &&
+         "Passthru operand expected to be poison or undef");
+
+  // Extract the mask and convert it to a uint32_t representing the used bytes
+  // of the entire vector load
+  uint32_t UsedBytesMask = 0;
+  uint32_t ElementSizeInBits = ResVT.getVectorElementType().getSizeInBits();
+  assert(ElementSizeInBits % 8 == 0 && "Unexpected element size");
+  uint32_t ElementSizeInBytes = ElementSizeInBits / 8;
+  uint32_t ElementMask = (1u << ElementSizeInBytes) - 1u;
+
+  for (SDValue Op : reverse(Mask->ops())) {
+    // We technically only want to do this shift for every
+    // iteration *but* the first, but in the first iteration UsedBytesMask is 0,
+    // so this shift is a no-op.
+    UsedBytesMask <<= ElementSizeInBytes;
+
+    // Mask elements must be constants.
+    if (Op->getAsZExtVal() != 0)
+      UsedBytesMask |= ElementMask;
+  }
+
+  assert(UsedBytesMask != 0 && UsedBytesMask != UINT32_MAX &&
+         "Unexpected masked load with elements masked all on or all off");
+
+  // Create a new load sd node to be handled normally by ReplaceLoadVector.
+  MemSDNode *NewLD = cast<MemSDNode>(
+      DAG.getLoad(ResVT, DL, Chain, BasePtr, N->getMemOperand()).getNode());
+
+  return {NewLD, UsedBytesMask};
+}
+
 /// replaceLoadVector - Convert vector loads into multi-output scalar loads.
 static std::optional<std::pair<SDValue, SDValue>>
 replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI) {
-  LoadSDNode *LD = cast<LoadSDNode>(N);
+  MemSDNode *LD = cast<MemSDNode>(N);
   const EVT ResVT = LD->getValueType(0);
   const EVT MemVT = LD->getMemoryVT();
 
@@ -3351,6 +3486,11 @@ replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI) {
     return std::nullopt;
   }
 
+  // If we have a masked load, convert it to a normal load now
+  std::optional<uint32_t> UsedBytesMask = std::nullopt;
+  if (LD->getOpcode() == ISD::MLOAD)
+    std::tie(LD, UsedBytesMask) = convertMLOADToLoadWithUsedBytesMask(LD, DAG);
+
   // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
   // Therefore, we must ensure the type is legal.  For i1 and i8, we set the
   // loaded type to i16 and propagate the "real" type as the memory type.
@@ -3379,9 +3519,13 @@ replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI) {
   // Copy regular operands
   SmallVector<SDValue, 8> OtherOps(LD->ops());
 
+  OtherOps.push_back(
+      DAG.getConstant(UsedBytesMask.value_or(UINT32_MAX), DL, MVT::i32));
+
   // The select routine does not have access to the LoadSDNode instance, so
   // pass along the extension information
-  OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
+  OtherOps.push_back(
+      DAG.getIntPtrConstant(cast<LoadSDNode>(LD)->getExtensionType(), DL));
 
   SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, MemVT,
                                           LD->getMemOperand());
@@ -3469,6 +3613,42 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
   llvm_unreachable("Unexpected custom lowering for load");
 }
 
+SDValue NVPTXTargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
+  // v2f16/v2bf16/v2i16/v4i8 are legal, so we can't rely on legalizer to handle
+  // masked loads of these types and have to handle them here.
+  // v2f32 also needs to be handled here if the subtarget has f32x2
+  // instructions, making it legal.
+  //
+  // Note: misaligned masked loads should never reach this point
+  // because the override of isLegalMaskedLoad in NVPTXTargetTransformInfo.cpp
+  // will validate alignment. Therefore, we do not need to special case handle
+  // them here.
+  EVT VT = Op.getValueType();
+  if (NVPTX::isPackedVectorTy(VT)) {
+    auto Result =
+        convertMLOADToLoadWithUsedBytesMask(cast<MemSDNode>(Op.getNode()), DAG);
+    MemSDNode *LD = std::get<0>(Result);
+    uint32_t UsedBytesMask = std::get<1>(Result);
+
+    SDLoc DL(LD);
+
+    // Copy regular operands
+    SmallVector<SDValue, 8> OtherOps(LD->ops());
+
+    OtherOps.push_back(DAG.getConstant(UsedBytesMask, DL, MVT::i32));
+
+    // We currently are not lowering extending loads, but pass the extension
+    // type anyway as later handling expects it.
+    OtherOps.push_back(
+        DAG.getIntPtrConstant(cast<LoadSDNode>(LD)->getExtensionType(), DL));
+    SDValue NewLD =
+        DAG.getMemIntrinsicNode(NVPTXISD::MLoad, DL, LD->getVTList(), OtherOps,
+                                LD->getMemoryVT(), LD->getMemOperand());
+    return NewLD;
+  }
+  return SDValue();
+}
+
 static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG,
                                 const NVPTXSubtarget &STI) {
   MemSDNode *N = cast<MemSDNode>(Op.getNode());
@@ -5377,6 +5557,9 @@ combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
     // ISD::LOAD -> NVPTXISD::Load (unless it's under-aligned). We have to do it
     // here.
     Opcode = NVPTXISD::LoadV2;
+    // append a "full" used bytes mask operand right before the extension type
+    // operand, signifying that all bytes are used.
+    Operands.push_back(DCI.DAG.getConstant(UINT32_MAX, DL, MVT::i32));
     Operands.push_back(DCI.DAG.getIntPtrConstant(
         cast<LoadSDNode>(LD)->getExtensionType(), DL));
     break;
@@ -5385,9 +5568,9 @@ combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
     Opcode = NVPTXISD::LoadV4;
     break;
   case NVPTXISD::LoadV4:
-    // V8 is only supported for f32. Don't forget, we're not changing the load
-    // size here. This is already a 256-bit load.
-    if (ElementVT != MVT::v2f32)
+    // V8 is only supported for f32/i32. Don't forget, we're not changing the
+    // load size here. This is already a 256-bit load.
+    if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
       return SDValue();
     OldNumOutputs = 4;
     Opcode = NVPTXISD::LoadV8;
@@ -5462,9 +5645,9 @@ static SDValue combinePackingMovIntoStore(SDNode *N,
     Opcode = NVPTXISD::StoreV4;
     break;
   case NVPTXISD::StoreV4:
-    // V8 is only supported for f32. Don't forget, we're not changing the store
-    // size here. This is already a 256-bit store.
-    if (ElementVT != MVT::v2f32)
+    // V8 is only supported for f32/i32. Don't forget, we're not changing the
+    // store size here. This is already a 256-bit store.
+    if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
       return SDValue();
     Opcode = NVPTXISD::StoreV8;
     break;
@@ -6615,6 +6798,7 @@ void NVPTXTargetLowering::ReplaceNodeResults(
     ReplaceBITCAST(N, DAG, Results);
     return;
   case ISD::LOAD:
+  case ISD::MLOAD:
     replaceLoadVector(N, DAG, Results, STI);
     return;
   case ISD::INTRINSIC_W_CHAIN:
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
index d71a86fd463f6..dd8e49de7aa6a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -235,6 +235,7 @@ class NVPTXTargetLowering : public TargetLowering {
   SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
 
   SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const;
 
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 8b129e7e5eeae..77fdf6911a420 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -1588,6 +1588,14 @@ def ADDR : Operand<pAny> {
   let MIOperandInfo = (ops ADDR_base, i32imm);
 }
 
+def UsedBytesMask : Operand<i32> {
+  let PrintMethod = "printUsedBytesMaskPragma";
+}
+
+def RegOrSink : Operand<Any> {
+  let PrintMethod = "printRegisterOrSinkSymbol";
+}
+
 def AtomicCode : Operand<i32> {
   let PrintMethod = "printAtomicCode";
 }
@@ -1832,8 +1840,10 @@ def Callseq_End :
 class LD<NVPTXRegClass regclass>
   : NVPTXInst<
     (outs regclass:$dst),
-    (ins AtomicCode:$sem, AtomicCode:$scope, AtomicCode:$addsp, AtomicCode:$Sign,
-         i32imm:$fromWidth, ADDR:$addr),
+    (ins AtomicCode:$sem, AtomicCode:$scope, AtomicCode:$addsp,
+         AtomicCode:$Sign, i32imm:$fromWidth, UsedBytesMask:$usedBytes,
+         ADDR:$addr),
+    "${usedBytes}"
     "ld${sem:sem}${scope:scope}${addsp:addsp}.${Sign:sign}$fromWidth "
     "\t$dst, [$addr];">;
 
@@ -1865,21 +1875,27 @@ multiclass LD_VEC<NVPTXRegClass regclass, bit support_v8 = false> {
   def _v2 : NVPTXInst<
     (outs regclass:$dst1, regclass:$dst2),
     (ins AtomicCode:$sem, AtomicCode:$scope, AtomicCode:$addsp,
-         AtomicCode:$Sign, i32imm:$fromWidth, ADDR:$addr),
+         AtomicCode:$Sign, i32imm:$fromWidth, UsedBytesMask:$usedBytes,
+         ADDR:$addr),
+    "${usedBytes}"
     "ld${sem:sem}${scope:scope}${addsp:addsp}.v2.${Sign:sign}$fromWidth "
     "\t{{$dst1, $dst2}}, [$addr];">;
   def _v4 : NVPTXInst<
     (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
     (ins AtomicCode:$sem, AtomicCode:$scope, AtomicCode:$addsp,
-         AtomicCode:$Sign, i32imm:$fromWidth, ADDR:$addr),
+         AtomicCode:$Sign, i32imm:$fromWidth, UsedBytesMask:$usedBytes,
+         ADDR:$addr),
+    "${usedBytes}"
     "ld${sem:sem}${scope:scope}${addsp:addsp}.v4.${Sign:sign}$fromWidth "
     "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];">;
   if support_v8 then
     def _v8 : NVPTXInst<
       (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4,
             regclass:$dst5, regclass:$dst6, regclass:$dst7, regclass:$dst8),
-      (ins AtomicCode:$sem, AtomicCode:$scope, AtomicCode:$addsp, AtomicCode:$Sign,
-           i32imm:$fromWidth, ADDR:$addr),
+      (ins AtomicCode:$sem, AtomicCode:$scope, AtomicCode:$addsp,
+           AtomicCode:$Sign, i32imm:$fromWidth, UsedBytesMask:$usedBytes,
+           ADDR:$addr),
+      "${usedBytes}"
       "ld${sem:sem}${scope:scope}${addsp:addsp}.v8.${Sign:sign}$fromWidth "
       "\t{{$dst1, $dst2, $dst3, $dst4, $dst5, $dst6, $dst7, $dst8}}, "
       "[$addr];">;
@@ -1900,7 +1916,7 @@ multiclass ST_VEC<DAGOperand O, bit support_v8 = false> {
     "\t[$addr], {{$src1, $src2}};">;
   def _v4 : NVPTXInst<
     (outs),
-    (ins O:$src1, O:$src2, O:$src3, O:$src4,
+    (ins RegOrSink:$src1, RegOrSink:$src2, RegOrSink:$src3, RegOrSink:$src4,
          AtomicCode:$sem, AtomicCode:$scope, AtomicCode:$addsp, i32imm:$fromWidth,
          ADDR:$addr),
     "st${sem:sem}${scope:scope}${addsp:addsp}.v4.b$fromWidth "
@@ -1908,8 +1924,8 @@ multiclass ST_VEC<DAGOperand O, bit support_v8 = false> {
   if support_v8 then
     def _v8 : NVPTXInst<
       (outs),
-      (ins O:$src1, O:$src2, O:$src3, O:$src4,
-           O:$src5, O:$src6, O:$src7, O:$src8,
+      (ins RegOrSink:$src1, RegOrSink:$src2, RegOrSink:$src3, RegOrSink:$src4,
+           RegOrSink:$src5, RegOrSink:$src6, RegOrSink:$src7, RegOrSink:$src8,
            AtomicCode:$sem, AtomicCode:$scope, AtomicCode:$addsp, i32imm:$fromWidth,
            ADDR:$addr),
       "st${sem:sem}${scope:scope}${addsp:addsp}.v8.b$fromWidth "
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 8501d4d7bb86f..d18c7e20df038 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -2552,7 +2552,10 @@ def LDU_GLOBAL_v4i32 : VLDU_G_ELE_V4<B32>;
 // during the lifetime of the kernel.
 
 class LDG_G<NVPTXRegClass regclass>
-  : NVPTXInst<(outs regclass:$result), (ins AtomicCode:$Sign, i32imm:$fromWidth, ADDR:$src),
+  : NVPTXInst<(outs regclass:$result),
+              (ins AtomicCode:$Sign, i32imm:$fromWidth,
+                   UsedBytesMask:$usedBytes, ADDR:$src),
+               "${usedBytes}"
                "ld.global.nc.${Sign:sign}$fromWidth \t$result, [$src];">;
 
 def LD_GLOBAL_NC_i16 : LDG_G<B16>;
@@ -2564,19 +2567,25 @@ def LD_GLOBAL_NC_i64 : LDG_G<B64>;
 // Elementized vector ldg
 class VLDG_G_ELE_V2<NVPTXRegClass regclass> :
   NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
-            (ins AtomicCode:$Sign, i32imm:$fromWidth, ADDR:$src),
+            (ins AtomicCode:$Sign, i32imm:$fromWidth, UsedBytesMask:$usedBytes,
+             ADDR:$src),
+            "${usedBytes}"
             "ld.global.nc.v2.${Sign:sign}$fromWidth \t{{$dst1, $dst2}}, [$src];">;
 
 
 class VLDG_G_ELE_V4<NVPTXRegClass regclass> :
   NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4), 
-            (ins AtomicCode:$Sign, i32imm:$fromWidth, ADDR:$src),
+            (ins AtomicCode:$Sign, i32imm:$fromWidth, UsedBytesMask:$usedBytes,
+             ADDR:$src),
+            "${usedBytes}"
             "ld.global.nc.v4.${Sign:sign}$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];">;
 
 class VLDG_G_ELE_V8<NVPTXRegClass regclass> :
   NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4,
                   regclass:$dst5, regclass:$dst6, regclass:$dst7, regclass:$dst8),
-             (ins AtomicCode:$Sign, i32imm:$fromWidth, ADDR:$src),
+            (ins AtomicCode:$Sign, i32imm:$fromWidth, UsedBytesMask:$usedBytes,
+             ADDR:$src),
+            "${usedBytes}"
              "ld.global.nc.v8.${Sign:sign}$fromWidth \t{{$dst1, $dst2, $dst3, $dst4, $dst5, $dst6, $dst7, $dst8}}, [$src];">;
 
 // FIXME: 8-bit LDG should be fixed once LDG/LDU nodes are made into proper loads.
diff --git a/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp b/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
index 320c0fb6950a7..4bbf49f93f43b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
@@ -1808,8 +1808,8 @@ bool NVPTXReplaceImageHandles::replaceImageHandle(MachineOperand &Op,
       // For CUDA, we preserve the param loads coming from function arguments
       return false;
 
-    assert(TexHandleDef.getOperand(6).isSymbol() && "Load is not a symbol!");
-    StringRef Sym = TexHandleDef.getOperand(6).getSymbolName();
+    assert(TexHandleDef.getOperand(7).isSymbol() && "Load is not a symbol!");
+    StringRef Sym = TexHandleDef.getOperand(7).getSymbolName();
     InstrsToRemove.insert(&TexHandleDef);
     Op.ChangeToES(Sym.data());
     MFI->getImageHandleSymbolIndex(Sym);
diff --git a/llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.cpp
index e8ea1ad6c404d..710d063e75725 100644
--- a/llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.cpp
@@ -30,6 +30,7 @@ const char *NVPTXSelectionDAGInfo::getTargetNodeName(unsigned Opcode) const {
     MAKE_CASE(NVPTXISD::LoadV2)
     MAKE_CASE(NVPTXISD::LoadV4)
     MAKE_CASE(NVPTXISD::LoadV8)
+    MAKE_CASE(NVPTXISD::MLoad)
     MAKE_CASE(NVPTXISD::LDUV2)
     MAKE_CASE(NVPTXISD::LDUV4)
     MAKE_CASE(NVPTXISD::StoreV2)
diff --git a/llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.h b/llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.h
index 07c130baeaa4f..9dd0a1eaa5856 100644
--- a/llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXSelectionDAGInfo.h
@@ -36,6 +36,7 @@ enum NodeType : unsigned {
   LoadV2,
   LoadV4,
   LoadV8,
+  MLoad,
   LDUV2, // LDU.v2
   LDUV4, // LDU.v4
   StoreV2,
diff --git a/llvm/lib/Target/NVPTX/NVPTXTagInvariantLoads.cpp b/llvm/lib/Target/NVPTX/NVPTXTagInvariantLoads.cpp
index a4aff44ac04f6..ed5e943946fef 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTagInvariantLoads.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTagInvariantLoads.cpp
@@ -27,13 +27,14 @@
 
 using namespace llvm;
 
-static bool isInvariantLoad(const LoadInst *LI, const bool IsKernelFn) {
+static bool isInvariantLoad(const Instruction *I, const Value *Ptr,
+                            const bool IsKernelFn) {
   // Don't bother with non-global loads
-  if (LI->getPointerAddressSpace() != NVPTXAS::ADDRESS_SPACE_GLOBAL)
+  if (Ptr->getType()->getPointerAddressSpace() != NVPTXAS::ADDRESS_SPACE_GLOBAL)
     return false;
 
   // If the load is already marked as invariant, we don't need to do anything
-  if (LI->getMetadata(LLVMContext::MD_invariant_load))
+  if (I->getMetadata(LLVMContext::MD_invariant_load))
     return false;
 
   // We use getUnderlyingObjects() here instead of getUnderlyingObject()
@@ -41,7 +42,7 @@ static bool isInvariantLoad(const LoadInst *LI, const bool IsKernelFn) {
   // not. We need to look through phi nodes to handle pointer induction
   // variables.
   SmallVector<const Value *, 8> Objs;
-  getUnderlyingObjects(LI->getPointerOperand(), Objs);
+  getUnderlyingObjects(Ptr, Objs);
 
   return all_of(Objs, [&](const Value *V) {
     if (const auto *A = dyn_cast<const Argument>(V))
@@ -53,9 +54,9 @@ static bool isInvariantLoad(const LoadInst *LI, const bool IsKernelFn) {
   });
 }
 
-static void markLoadsAsInvariant(LoadInst *LI) {
-  LI->setMetadata(LLVMContext::MD_invariant_load,
-                  MDNode::get(LI->getContext(), {}));
+static void markLoadsAsInvariant(Instruction *I) {
+  I->setMetadata(LLVMContext::MD_invariant_load,
+                 MDNode::get(I->getContext(), {}));
 }
 
 static bool tagInvariantLoads(Function &F) {
@@ -64,10 +65,16 @@ static bool tagInvariantLoads(Function &F) {
   bool Changed = false;
   for (auto &I : instructions(F)) {
     if (auto *LI = dyn_cast<LoadInst>(&I)) {
-      if (isInvariantLoad(LI, IsKernelFn)) {
+      if (isInvariantLoad(LI, LI->getPointerOperand(), IsKernelFn)) {
         markLoadsAsInvariant(LI);
         Changed = true;
       }
+      if (auto *II = dyn_cast<IntrinsicInst>(&I))
+        if (II->getIntrinsicID() == Intrinsic::masked_load &&
+            isInvariantLoad(II, II->getOperand(0), IsKernelFn)) {
+          markLoadsAsInvariant(II);
+          Changed = true;
+        }
     }
   }
   return Changed;
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index 64593e6439184..5d5553c573b0f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -592,6 +592,45 @@ Value *NVPTXTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
   return nullptr;
 }
 
+bool NVPTXTTIImpl::isLegalMaskedStore(Type *DataTy, Align Alignment,
+                                      unsigned AddrSpace,
+                                      TTI::MaskKind MaskKind) const {
+  if (MaskKind != TTI::MaskKind::ConstantMask)
+    return false;
+
+  //  We currently only support this feature for 256-bit vectors, so the
+  //  alignment must be at least 32
+  if (Alignment < 32)
+    return false;
+
+  if (!ST->has256BitVectorLoadStore(AddrSpace))
+    return false;
+
+  auto *VTy = dyn_cast<FixedVectorType>(DataTy);
+  if (!VTy)
+    return false;
+
+  auto *ElemTy = VTy->getScalarType();
+  return (ElemTy->getScalarSizeInBits() == 32 && VTy->getNumElements() == 8) ||
+         (ElemTy->getScalarSizeInBits() == 64 && VTy->getNumElements() == 4);
+}
+
+bool NVPTXTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment,
+                                     unsigned /*AddrSpace*/,
+                                     TTI::MaskKind MaskKind) const {
+  if (MaskKind != TTI::MaskKind::ConstantMask)
+    return false;
+
+  if (Alignment < DL.getTypeStoreSize(DataTy))
+    return false;
+
+  // We do not support sub-byte element type masked loads.
+  auto *VTy = dyn_cast<FixedVectorType>(DataTy);
+  if (!VTy)
+    return false;
+  return VTy->getElementType()->getScalarSizeInBits() >= 8;
+}
+
 unsigned NVPTXTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
   // 256 bit loads/stores are currently only supported for global address space
   if (ST->has256BitVectorLoadStore(AddrSpace))
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
index 78eb751cf3c2e..d7f4e1da4073b 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.h
@@ -181,6 +181,12 @@ class NVPTXTTIImpl final : public BasicTTIImplBase<NVPTXTTIImpl> {
   bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
                                   Intrinsic::ID IID) const override;
 
+  bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddrSpace,
+                          TTI::MaskKind MaskKind) const override;
+
+  bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddrSpace,
+                         TTI::MaskKind MaskKind) const override;
+
   unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override;
 
   Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 39c1173e2986c..484c4791390ac 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -285,11 +285,13 @@ class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
   }
 
   bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned /*AddressSpace*/) const override {
+                         unsigned /*AddressSpace*/,
+                         TTI::MaskKind /*MaskKind*/) const override {
     return isLegalMaskedLoadStore(DataType, Alignment);
   }
   bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned /*AddressSpace*/) const override {
+                          unsigned /*AddressSpace*/,
+                          TTI::MaskKind /*MaskKind*/) const override {
     return isLegalMaskedLoadStore(DataType, Alignment);
   }
 
diff --git a/llvm/lib/Target/VE/VETargetTransformInfo.h b/llvm/lib/Target/VE/VETargetTransformInfo.h
index 5c0ddca62c761..eed3832c9f1fb 100644
--- a/llvm/lib/Target/VE/VETargetTransformInfo.h
+++ b/llvm/lib/Target/VE/VETargetTransformInfo.h
@@ -134,12 +134,14 @@ class VETTIImpl final : public BasicTTIImplBase<VETTIImpl> {
   }
 
   // Load & Store {
-  bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned /*AddressSpace*/) const override {
+  bool
+  isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned /*AddressSpace*/,
+                    TargetTransformInfo::MaskKind /*MaskKind*/) const override {
     return isVectorLaneType(*getLaneType(DataType));
   }
-  bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned /*AddressSpace*/) const override {
+  bool isLegalMaskedStore(
+      Type *DataType, Align Alignment, unsigned /*AddressSpace*/,
+      TargetTransformInfo::MaskKind /*MaskKind*/) const override {
     return isVectorLaneType(*getLaneType(DataType));
   }
   bool isLegalMaskedGather(Type *DataType, Align Alignment) const override {
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index 4b77bf925b2ba..10a6b654a037d 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -6322,7 +6322,8 @@ static bool isLegalMaskedLoadStore(Type *ScalarTy, const X86Subtarget *ST) {
 }
 
 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment,
-                                   unsigned AddressSpace) const {
+                                   unsigned AddressSpace,
+                                   TTI::MaskKind MaskKind) const {
   Type *ScalarTy = DataTy->getScalarType();
 
   // The backend can't handle a single element vector w/o CFCMOV.
@@ -6335,7 +6336,8 @@ bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment,
 }
 
 bool X86TTIImpl::isLegalMaskedStore(Type *DataTy, Align Alignment,
-                                    unsigned AddressSpace) const {
+                                    unsigned AddressSpace,
+                                    TTI::MaskKind MaskKind) const {
   Type *ScalarTy = DataTy->getScalarType();
 
   // The backend can't handle a single element vector w/o CFCMOV.
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h
index df1393ce16ca1..9b326723ae385 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.h
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h
@@ -267,10 +267,14 @@ class X86TTIImpl final : public BasicTTIImplBase<X86TTIImpl> {
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                      const TargetTransformInfo::LSRCost &C2) const override;
   bool canMacroFuseCmp() const override;
-  bool isLegalMaskedLoad(Type *DataType, Align Alignment,
-                         unsigned AddressSpace) const override;
-  bool isLegalMaskedStore(Type *DataType, Align Alignment,
-                          unsigned AddressSpace) const override;
+  bool
+  isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace,
+                    TTI::MaskKind MaskKind =
+                        TTI::MaskKind::VariableOrConstantMask) const override;
+  bool
+  isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace,
+                     TTI::MaskKind MaskKind =
+                         TTI::MaskKind::VariableOrConstantMask) const override;
   bool isLegalNTLoad(Type *DataType, Align Alignment) const override;
   bool isLegalNTStore(Type *DataType, Align Alignment) const override;
   bool isLegalBroadcastLoad(Type *ElementTy,
diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
index 146e7d1047dd0..b7b08ae61ec52 100644
--- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
@@ -1123,7 +1123,10 @@ static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT,
       if (TTI.isLegalMaskedLoad(
               CI->getType(), CI->getParamAlign(0).valueOrOne(),
               cast<PointerType>(CI->getArgOperand(0)->getType())
-                  ->getAddressSpace()))
+                  ->getAddressSpace(),
+              isConstantIntVector(CI->getArgOperand(1))
+                  ? TTI::MaskKind::ConstantMask
+                  : TTI::MaskKind::VariableOrConstantMask))
         return false;
       scalarizeMaskedLoad(DL, HasBranchDivergence, CI, DTU, ModifiedDT);
       return true;
@@ -1132,7 +1135,10 @@ static bool optimizeCallInst(CallInst *CI, bool &ModifiedDT,
               CI->getArgOperand(0)->getType(),
               CI->getParamAlign(1).valueOrOne(),
               cast<PointerType>(CI->getArgOperand(1)->getType())
-                  ->getAddressSpace()))
+                  ->getAddressSpace(),
+              isConstantIntVector(CI->getArgOperand(2))
+                  ? TTI::MaskKind::ConstantMask
+                  : TTI::MaskKind::VariableOrConstantMask))
         return false;
       scalarizeMaskedStore(DL, HasBranchDivergence, CI, DTU, ModifiedDT);
       return true;
diff --git a/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir b/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir
index e3b072549bc04..3158916a3195c 100644
--- a/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir
+++ b/llvm/test/CodeGen/MIR/NVPTX/floating-point-immediate-operands.mir
@@ -40,9 +40,9 @@ registers:
   - { id: 7, class: b32 }
 body: |
   bb.0.entry:
-    %0 = LD_i32 0, 0, 4, 2, 32, &test_param_0, 0
+    %0 = LD_i32 0, 0, 4, 2, 32, -1, &test_param_0, 0
     %1 = CVT_f64_f32 %0, 0
-    %2 = LD_i32 0, 0, 4, 0, 32, &test_param_1, 0
+    %2 = LD_i32 0, 0, 4, 0, 32, -1, &test_param_1, 0
   ; CHECK: %3:b64 = FADD_rnf64ri %1, double 3.250000e+00
     %3 = FADD_rnf64ri %1, double 3.250000e+00
     %4 = CVT_f32_f64 %3, 5
@@ -66,9 +66,9 @@ registers:
   - { id: 7, class: b32 }
 body: |
   bb.0.entry:
-    %0 = LD_i32 0, 0, 4, 2, 32, &test2_param_0, 0
+    %0 = LD_i32 0, 0, 4, 2, 32, -1, &test2_param_0, 0
     %1 = CVT_f64_f32 %0, 0
-    %2 = LD_i32 0, 0, 4, 0, 32, &test2_param_1, 0
+    %2 = LD_i32 0, 0, 4, 0, 32, -1, &test2_param_1, 0
   ; CHECK: %3:b64 = FADD_rnf64ri %1, double 0x7FF8000000000000
     %3 = FADD_rnf64ri %1, double 0x7FF8000000000000
     %4 = CVT_f32_f64 %3, 5
diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll
index 3fac29f74125b..d219493d2b31b 100644
--- a/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll
+++ b/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll
@@ -346,19 +346,15 @@ define i32 @ld_global_v8i32(ptr addrspace(1) %ptr) {
 ; SM100-LABEL: ld_global_v8i32(
 ; SM100:       {
 ; SM100-NEXT:    .reg .b32 %r<16>;
-; SM100-NEXT:    .reg .b64 %rd<6>;
+; SM100-NEXT:    .reg .b64 %rd<2>;
 ; SM100-EMPTY:
 ; SM100-NEXT:  // %bb.0:
 ; SM100-NEXT:    ld.param.b64 %rd1, [ld_global_v8i32_param_0];
-; SM100-NEXT:    ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
-; SM100-NEXT:    mov.b64 {%r1, %r2}, %rd5;
-; SM100-NEXT:    mov.b64 {%r3, %r4}, %rd4;
-; SM100-NEXT:    mov.b64 {%r5, %r6}, %rd3;
-; SM100-NEXT:    mov.b64 {%r7, %r8}, %rd2;
-; SM100-NEXT:    add.s32 %r9, %r7, %r8;
-; SM100-NEXT:    add.s32 %r10, %r5, %r6;
-; SM100-NEXT:    add.s32 %r11, %r3, %r4;
-; SM100-NEXT:    add.s32 %r12, %r1, %r2;
+; SM100-NEXT:    ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT:    add.s32 %r9, %r1, %r2;
+; SM100-NEXT:    add.s32 %r10, %r3, %r4;
+; SM100-NEXT:    add.s32 %r11, %r5, %r6;
+; SM100-NEXT:    add.s32 %r12, %r7, %r8;
 ; SM100-NEXT:    add.s32 %r13, %r9, %r10;
 ; SM100-NEXT:    add.s32 %r14, %r11, %r12;
 ; SM100-NEXT:    add.s32 %r15, %r13, %r14;
diff --git a/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir b/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir
index 0b2d85600a2ef..4be91dfc60c6a 100644
--- a/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir
+++ b/llvm/test/CodeGen/NVPTX/machinelicm-no-preheader.mir
@@ -26,10 +26,10 @@ body:             |
   ; CHECK: bb.0.entry:
   ; CHECK-NEXT:   successors: %bb.2(0x30000000), %bb.3(0x50000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[LD_i32_:%[0-9]+]]:b32 = LD_i32 0, 0, 101, 3, 32, &test_hoist_param_1, 0 :: (dereferenceable invariant load (s32), addrspace 101)
-  ; CHECK-NEXT:   [[LD_i64_:%[0-9]+]]:b64 = LD_i64 0, 0, 101, 3, 64, &test_hoist_param_0, 0 :: (dereferenceable invariant load (s64), addrspace 101)
+  ; CHECK-NEXT:   [[LD_i32_:%[0-9]+]]:b32 = LD_i32 0, 0, 101, 3, 32, -1, &test_hoist_param_1, 0 :: (dereferenceable invariant load (s32), addrspace 101)
+  ; CHECK-NEXT:   [[LD_i64_:%[0-9]+]]:b64 = LD_i64 0, 0, 101, 3, 64, -1, &test_hoist_param_0, 0 :: (dereferenceable invariant load (s64), addrspace 101)
   ; CHECK-NEXT:   [[ADD64ri:%[0-9]+]]:b64 = nuw ADD64ri killed [[LD_i64_]], 2
-  ; CHECK-NEXT:   [[LD_i32_1:%[0-9]+]]:b32 = LD_i32 0, 0, 1, 3, 32, [[ADD64ri]], 0
+  ; CHECK-NEXT:   [[LD_i32_1:%[0-9]+]]:b32 = LD_i32 0, 0, 1, 3, 32, -1, [[ADD64ri]], 0
   ; CHECK-NEXT:   [[SETP_i32ri:%[0-9]+]]:b1 = SETP_i32ri [[LD_i32_]], 0, 0
   ; CHECK-NEXT:   CBranch killed [[SETP_i32ri]], %bb.2
   ; CHECK-NEXT: {{  $}}
@@ -54,10 +54,10 @@ body:             |
   bb.0.entry:
     successors: %bb.2(0x30000000), %bb.1(0x50000000)
 
-    %5:b32 = LD_i32 0, 0, 101, 3, 32, &test_hoist_param_1, 0 :: (dereferenceable invariant load (s32), addrspace 101)
-    %6:b64 = LD_i64 0, 0, 101, 3, 64, &test_hoist_param_0, 0 :: (dereferenceable invariant load (s64), addrspace 101)
+    %5:b32 = LD_i32 0, 0, 101, 3, 32, -1, &test_hoist_param_1, 0 :: (dereferenceable invariant load (s32), addrspace 101)
+    %6:b64 = LD_i64 0, 0, 101, 3, 64, -1, &test_hoist_param_0, 0 :: (dereferenceable invariant load (s64), addrspace 101)
     %0:b64 = nuw ADD64ri killed %6, 2
-    %1:b32 = LD_i32 0, 0, 1, 3, 32, %0, 0
+    %1:b32 = LD_i32 0, 0, 1, 3, 32, -1, %0, 0
     %7:b1 = SETP_i32ri %5, 0, 0
     CBranch killed %7, %bb.2
     GOTO %bb.1
diff --git a/llvm/test/CodeGen/NVPTX/masked-load-vectors.ll b/llvm/test/CodeGen/NVPTX/masked-load-vectors.ll
new file mode 100644
index 0000000000000..8617dea310d6c
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/masked-load-vectors.ll
@@ -0,0 +1,366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_90 | FileCheck %s -check-prefixes=CHECK,SM90
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_90 | %ptxas-verify -arch=sm_90 %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_100 -mattr=+ptx88 | FileCheck %s -check-prefixes=CHECK,SM100
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_100 -mattr=+ptx88 | %ptxas-verify -arch=sm_100 %}
+
+
+; Different architectures are tested in this file for the following reasons:
+; - SM90 does not have 256-bit load/store instructions
+; - SM90 does not have masked store instructions
+; - SM90 does not support packed f32x2 instructions
+
+define void @global_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_8xi32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_8xi32_param_0];
+; SM90-NEXT:    .pragma "used_bytes_mask 0xf000";
+; SM90-NEXT:    ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT:    .pragma "used_bytes_mask 0xf0f";
+; SM90-NEXT:    ld.global.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd2, [global_8xi32_param_1];
+; SM90-NEXT:    st.global.b32 [%rd2], %r5;
+; SM90-NEXT:    st.global.b32 [%rd2+8], %r7;
+; SM90-NEXT:    st.global.b32 [%rd2+28], %r4;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_8xi32(
+; SM100:       {
+; SM100-NEXT:    .reg .b32 %r<9>;
+; SM100-NEXT:    .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_8xi32_param_0];
+; SM100-NEXT:    .pragma "used_bytes_mask 0xf0000f0f";
+; SM100-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd2, [global_8xi32_param_1];
+; SM100-NEXT:    st.global.v8.b32 [%rd2], {%r1, _, %r3, _, _, _, _, %r8};
+; SM100-NEXT:    ret;
+  %a.load = tail call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 32 %a, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> poison)
+  tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) align 32 %b, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+  ret void
+}
+
+; Masked stores are only supported for 32-bit element types,
+; while masked loads are supported for all element types.
+define void @global_16xi16(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_16xi16(
+; SM90:       {
+; SM90-NEXT:    .reg .b16 %rs<7>;
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_16xi16_param_0];
+; SM90-NEXT:    .pragma "used_bytes_mask 0xf000";
+; SM90-NEXT:    ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
+; SM90-NEXT:    .pragma "used_bytes_mask 0xf0f";
+; SM90-NEXT:    ld.global.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT:    mov.b32 {%rs3, %rs4}, %r7;
+; SM90-NEXT:    mov.b32 {%rs5, %rs6}, %r5;
+; SM90-NEXT:    ld.param.b64 %rd2, [global_16xi16_param_1];
+; SM90-NEXT:    st.global.b16 [%rd2], %rs5;
+; SM90-NEXT:    st.global.b16 [%rd2+2], %rs6;
+; SM90-NEXT:    st.global.b16 [%rd2+8], %rs3;
+; SM90-NEXT:    st.global.b16 [%rd2+10], %rs4;
+; SM90-NEXT:    st.global.b16 [%rd2+28], %rs1;
+; SM90-NEXT:    st.global.b16 [%rd2+30], %rs2;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_16xi16(
+; SM100:       {
+; SM100-NEXT:    .reg .b16 %rs<7>;
+; SM100-NEXT:    .reg .b32 %r<9>;
+; SM100-NEXT:    .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_16xi16_param_0];
+; SM100-NEXT:    .pragma "used_bytes_mask 0xf0000f0f";
+; SM100-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT:    mov.b32 {%rs1, %rs2}, %r8;
+; SM100-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
+; SM100-NEXT:    mov.b32 {%rs5, %rs6}, %r1;
+; SM100-NEXT:    ld.param.b64 %rd2, [global_16xi16_param_1];
+; SM100-NEXT:    st.global.b16 [%rd2], %rs5;
+; SM100-NEXT:    st.global.b16 [%rd2+2], %rs6;
+; SM100-NEXT:    st.global.b16 [%rd2+8], %rs3;
+; SM100-NEXT:    st.global.b16 [%rd2+10], %rs4;
+; SM100-NEXT:    st.global.b16 [%rd2+28], %rs1;
+; SM100-NEXT:    st.global.b16 [%rd2+30], %rs2;
+; SM100-NEXT:    ret;
+  %a.load = tail call <16 x i16> @llvm.masked.load.v16i16.p1(ptr addrspace(1) align 32 %a, <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <16 x i16> poison)
+  tail call void @llvm.masked.store.v16i16.p1(<16 x i16> %a.load, ptr addrspace(1) align 32 %b, <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>)
+  ret void
+}
+
+define void @global_8xi32_no_align(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_8xi32_no_align(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<4>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_8xi32_no_align_param_0];
+; CHECK-NEXT:    ld.global.b32 %r1, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [global_8xi32_no_align_param_1];
+; CHECK-NEXT:    ld.global.b32 %r2, [%rd1+8];
+; CHECK-NEXT:    ld.global.b32 %r3, [%rd1+28];
+; CHECK-NEXT:    st.global.b32 [%rd2], %r1;
+; CHECK-NEXT:    st.global.b32 [%rd2+8], %r2;
+; CHECK-NEXT:    st.global.b32 [%rd2+28], %r3;
+; CHECK-NEXT:    ret;
+  %a.load = tail call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 16 %a, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> poison)
+  tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) align 16 %b, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+  ret void
+}
+
+
+define void @global_8xi32_invariant(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_8xi32_invariant(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_8xi32_invariant_param_0];
+; SM90-NEXT:    .pragma "used_bytes_mask 0xf000";
+; SM90-NEXT:    ld.global.nc.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT:    .pragma "used_bytes_mask 0xf0f";
+; SM90-NEXT:    ld.global.nc.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd2, [global_8xi32_invariant_param_1];
+; SM90-NEXT:    st.global.b32 [%rd2], %r5;
+; SM90-NEXT:    st.global.b32 [%rd2+8], %r7;
+; SM90-NEXT:    st.global.b32 [%rd2+28], %r4;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_8xi32_invariant(
+; SM100:       {
+; SM100-NEXT:    .reg .b32 %r<9>;
+; SM100-NEXT:    .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_8xi32_invariant_param_0];
+; SM100-NEXT:    .pragma "used_bytes_mask 0xf0000f0f";
+; SM100-NEXT:    ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd2, [global_8xi32_invariant_param_1];
+; SM100-NEXT:    st.global.v8.b32 [%rd2], {%r1, _, %r3, _, _, _, _, %r8};
+; SM100-NEXT:    ret;
+  %a.load = tail call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) align 32 %a, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> poison), !invariant.load !0
+  tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) align 32 %b, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+  ret void
+}
+
+define void @global_2xi16(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_2xi16(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<2>;
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_2xi16_param_0];
+; CHECK-NEXT:    .pragma "used_bytes_mask 0x3";
+; CHECK-NEXT:    ld.global.b32 %r1, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [global_2xi16_param_1];
+; CHECK-NEXT:    mov.b32 {%rs1, _}, %r1;
+; CHECK-NEXT:    st.global.b16 [%rd2], %rs1;
+; CHECK-NEXT:    ret;
+  %a.load = tail call <2 x i16> @llvm.masked.load.v2i16.p1(ptr addrspace(1) align 4 %a, <2 x i1> <i1 true, i1 false>, <2 x i16> poison)
+  tail call void @llvm.masked.store.v2i16.p1(<2 x i16> %a.load, ptr addrspace(1) align 4 %b, <2 x i1> <i1 true, i1 false>)
+  ret void
+}
+
+define void @global_2xi16_invariant(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_2xi16_invariant(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<2>;
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_2xi16_invariant_param_0];
+; CHECK-NEXT:    .pragma "used_bytes_mask 0x3";
+; CHECK-NEXT:    ld.global.nc.b32 %r1, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [global_2xi16_invariant_param_1];
+; CHECK-NEXT:    mov.b32 {%rs1, _}, %r1;
+; CHECK-NEXT:    st.global.b16 [%rd2], %rs1;
+; CHECK-NEXT:    ret;
+  %a.load = tail call <2 x i16> @llvm.masked.load.v2i16.p1(ptr addrspace(1) align 4 %a, <2 x i1> <i1 true, i1 false>, <2 x i16> poison), !invariant.load !0
+  tail call void @llvm.masked.store.v2i16.p1(<2 x i16> %a.load, ptr addrspace(1) align 4 %b, <2 x i1> <i1 true, i1 false>)
+  ret void
+}
+
+define void @global_2xi16_no_align(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_2xi16_no_align(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<2>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_2xi16_no_align_param_0];
+; CHECK-NEXT:    ld.global.b16 %rs1, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [global_2xi16_no_align_param_1];
+; CHECK-NEXT:    st.global.b16 [%rd2], %rs1;
+; CHECK-NEXT:    ret;
+  %a.load = tail call <2 x i16> @llvm.masked.load.v2i16.p1(ptr addrspace(1) align 2 %a, <2 x i1> <i1 true, i1 false>, <2 x i16> poison)
+  tail call void @llvm.masked.store.v2i16.p1(<2 x i16> %a.load, ptr addrspace(1) align 4 %b, <2 x i1> <i1 true, i1 false>)
+  ret void
+}
+
+define void @global_4xi8(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_4xi8(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_4xi8_param_0];
+; CHECK-NEXT:    .pragma "used_bytes_mask 0x5";
+; CHECK-NEXT:    ld.global.b32 %r1, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [global_4xi8_param_1];
+; CHECK-NEXT:    st.global.b8 [%rd2], %r1;
+; CHECK-NEXT:    prmt.b32 %r2, %r1, 0, 0x7772U;
+; CHECK-NEXT:    st.global.b8 [%rd2+2], %r2;
+; CHECK-NEXT:    ret;
+  %a.load = tail call <4 x i8> @llvm.masked.load.v4i8.p1(ptr addrspace(1) align 4 %a, <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i8> poison)
+  tail call void @llvm.masked.store.v4i8.p1(<4 x i8> %a.load, ptr addrspace(1) align 4 %b, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
+  ret void
+}
+
+define void @global_4xi8_invariant(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_4xi8_invariant(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_4xi8_invariant_param_0];
+; CHECK-NEXT:    .pragma "used_bytes_mask 0x5";
+; CHECK-NEXT:    ld.global.nc.b32 %r1, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [global_4xi8_invariant_param_1];
+; CHECK-NEXT:    st.global.b8 [%rd2], %r1;
+; CHECK-NEXT:    prmt.b32 %r2, %r1, 0, 0x7772U;
+; CHECK-NEXT:    st.global.b8 [%rd2+2], %r2;
+; CHECK-NEXT:    ret;
+  %a.load = tail call <4 x i8> @llvm.masked.load.v4i8.p1(ptr addrspace(1) align 4 %a, <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i8> poison), !invariant.load !0
+  tail call void @llvm.masked.store.v4i8.p1(<4 x i8> %a.load, ptr addrspace(1) align 4 %b, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
+  ret void
+}
+
+define void @global_4xi8_no_align(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_4xi8_no_align(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_4xi8_no_align_param_0];
+; CHECK-NEXT:    ld.global.b8 %rs1, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [global_4xi8_no_align_param_1];
+; CHECK-NEXT:    ld.global.b8 %rs2, [%rd1+2];
+; CHECK-NEXT:    st.global.b8 [%rd2], %rs1;
+; CHECK-NEXT:    st.global.b8 [%rd2+2], %rs2;
+; CHECK-NEXT:    ret;
+  %a.load = tail call <4 x i8> @llvm.masked.load.v4i8.p1(ptr addrspace(1) align 2 %a, <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i8> poison)
+  tail call void @llvm.masked.store.v4i8.p1(<4 x i8> %a.load, ptr addrspace(1) align 4 %b, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
+  ret void
+}
+
+; In sm100+, we pack 2xf32 loads into a single b64 load while lowering
+define void @global_2xf32(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_2xf32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<3>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_2xf32_param_0];
+; SM90-NEXT:    .pragma "used_bytes_mask 0xf";
+; SM90-NEXT:    ld.global.v2.b32 {%r1, %r2}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd2, [global_2xf32_param_1];
+; SM90-NEXT:    st.global.b32 [%rd2], %r1;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_2xf32(
+; SM100:       {
+; SM100-NEXT:    .reg .b32 %r<2>;
+; SM100-NEXT:    .reg .b64 %rd<4>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_2xf32_param_0];
+; SM100-NEXT:    .pragma "used_bytes_mask 0xf";
+; SM100-NEXT:    ld.global.b64 %rd2, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd3, [global_2xf32_param_1];
+; SM100-NEXT:    mov.b64 {%r1, _}, %rd2;
+; SM100-NEXT:    st.global.b32 [%rd3], %r1;
+; SM100-NEXT:    ret;
+  %a.load = tail call <2 x float> @llvm.masked.load.v2f32.p1(ptr addrspace(1) align 8 %a, <2 x i1> <i1 true, i1 false>, <2 x float> poison)
+  tail call void @llvm.masked.store.v2f32.p1(<2 x float> %a.load, ptr addrspace(1) align 8 %b, <2 x i1> <i1 true, i1 false>)
+  ret void
+}
+
+define void @global_2xf32_invariant(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_2xf32_invariant(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<3>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_2xf32_invariant_param_0];
+; SM90-NEXT:    .pragma "used_bytes_mask 0xf";
+; SM90-NEXT:    ld.global.nc.v2.b32 {%r1, %r2}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd2, [global_2xf32_invariant_param_1];
+; SM90-NEXT:    st.global.b32 [%rd2], %r1;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_2xf32_invariant(
+; SM100:       {
+; SM100-NEXT:    .reg .b32 %r<2>;
+; SM100-NEXT:    .reg .b64 %rd<4>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_2xf32_invariant_param_0];
+; SM100-NEXT:    .pragma "used_bytes_mask 0xf";
+; SM100-NEXT:    ld.global.nc.b64 %rd2, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd3, [global_2xf32_invariant_param_1];
+; SM100-NEXT:    mov.b64 {%r1, _}, %rd2;
+; SM100-NEXT:    st.global.b32 [%rd3], %r1;
+; SM100-NEXT:    ret;
+  %a.load = tail call <2 x float> @llvm.masked.load.v2f32.p1(ptr addrspace(1) align 8 %a, <2 x i1> <i1 true, i1 false>, <2 x float> poison), !invariant.load !0
+  tail call void @llvm.masked.store.v2f32.p1(<2 x float> %a.load, ptr addrspace(1) align 8 %b, <2 x i1> <i1 true, i1 false>)
+  ret void
+}
+
+define void @global_2xf32_no_align(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_2xf32_no_align(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_2xf32_no_align_param_0];
+; CHECK-NEXT:    ld.global.b32 %r1, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [global_2xf32_no_align_param_1];
+; CHECK-NEXT:    st.global.b32 [%rd2], %r1;
+; CHECK-NEXT:    ret;
+  %a.load = tail call <2 x float> @llvm.masked.load.v2f32.p1(ptr addrspace(1) align 4 %a, <2 x i1> <i1 true, i1 false>, <2 x float> poison)
+  tail call void @llvm.masked.store.v2f32.p1(<2 x float> %a.load, ptr addrspace(1) align 8 %b, <2 x i1> <i1 true, i1 false>)
+  ret void
+}
+
+declare <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1), <8 x i1>, <8 x i32>)
+declare void @llvm.masked.store.v8i32.p1(<8 x i32>, ptr addrspace(1), <8 x i1>)
+declare <16 x i16> @llvm.masked.load.v16i16.p1(ptr addrspace(1), <16 x i1>, <16 x i16>)
+declare void @llvm.masked.store.v16i16.p1(<16 x i16>, ptr addrspace(1), <16 x i1>)
+declare <2 x i16> @llvm.masked.load.v2i16.p1(ptr addrspace(1), <2 x i1>, <2 x i16>)
+declare void @llvm.masked.store.v2i16.p1(<2 x i16>, ptr addrspace(1), <2 x i1>)
+declare <4 x i8> @llvm.masked.load.v4i8.p1(ptr addrspace(1), <4 x i1>, <4 x i8>)
+declare void @llvm.masked.store.v4i8.p1(<4 x i8>, ptr addrspace(1), <4 x i1>)
+declare <2 x float> @llvm.masked.load.v2f32.p1(ptr addrspace(1), <2 x i1>, <2 x float>)
+declare void @llvm.masked.store.v2f32.p1(<2 x float>, ptr addrspace(1), <2 x i1>)
+!0 = !{}
diff --git a/llvm/test/CodeGen/NVPTX/masked-store-variable-mask.ll b/llvm/test/CodeGen/NVPTX/masked-store-variable-mask.ll
new file mode 100644
index 0000000000000..9f23acaf93bc8
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/masked-store-variable-mask.ll
@@ -0,0 +1,56 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_100 -mattr=+ptx88 | FileCheck %s -check-prefixes=CHECK
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_100 -mattr=+ptx88 | %ptxas-verify -arch=sm_100 %}
+
+; Confirm that a masked store with a variable mask is scalarized before lowering
+
+define void @global_variable_mask(ptr addrspace(1) %a, ptr addrspace(1) %b, <4 x i1> %mask) {
+; CHECK-LABEL: global_variable_mask(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<9>;
+; CHECK-NEXT:    .reg .b16 %rs<9>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b8 %rs1, [global_variable_mask_param_2+3];
+; CHECK-NEXT:    ld.param.b8 %rs3, [global_variable_mask_param_2+2];
+; CHECK-NEXT:    and.b16 %rs4, %rs3, 1;
+; CHECK-NEXT:    ld.param.b8 %rs5, [global_variable_mask_param_2+1];
+; CHECK-NEXT:    and.b16 %rs6, %rs5, 1;
+; CHECK-NEXT:    setp.ne.b16 %p2, %rs6, 0;
+; CHECK-NEXT:    ld.param.b8 %rs7, [global_variable_mask_param_2];
+; CHECK-NEXT:    and.b16 %rs8, %rs7, 1;
+; CHECK-NEXT:    setp.ne.b16 %p1, %rs8, 0;
+; CHECK-NEXT:    ld.param.b64 %rd5, [global_variable_mask_param_1];
+; CHECK-NEXT:    ld.param.b64 %rd6, [global_variable_mask_param_0];
+; CHECK-NEXT:    ld.global.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [%rd6];
+; CHECK-NEXT:    not.pred %p5, %p1;
+; CHECK-NEXT:    @%p5 bra $L__BB0_2;
+; CHECK-NEXT:  // %bb.1: // %cond.store
+; CHECK-NEXT:    st.global.b64 [%rd5], %rd1;
+; CHECK-NEXT:  $L__BB0_2: // %else
+; CHECK-NEXT:    and.b16 %rs2, %rs1, 1;
+; CHECK-NEXT:    setp.ne.b16 %p3, %rs4, 0;
+; CHECK-NEXT:    not.pred %p6, %p2;
+; CHECK-NEXT:    @%p6 bra $L__BB0_4;
+; CHECK-NEXT:  // %bb.3: // %cond.store1
+; CHECK-NEXT:    st.global.b64 [%rd5+8], %rd2;
+; CHECK-NEXT:  $L__BB0_4: // %else2
+; CHECK-NEXT:    setp.ne.b16 %p4, %rs2, 0;
+; CHECK-NEXT:    not.pred %p7, %p3;
+; CHECK-NEXT:    @%p7 bra $L__BB0_6;
+; CHECK-NEXT:  // %bb.5: // %cond.store3
+; CHECK-NEXT:    st.global.b64 [%rd5+16], %rd3;
+; CHECK-NEXT:  $L__BB0_6: // %else4
+; CHECK-NEXT:    not.pred %p8, %p4;
+; CHECK-NEXT:    @%p8 bra $L__BB0_8;
+; CHECK-NEXT:  // %bb.7: // %cond.store5
+; CHECK-NEXT:    st.global.b64 [%rd5+24], %rd4;
+; CHECK-NEXT:  $L__BB0_8: // %else6
+; CHECK-NEXT:    ret;
+  %a.load = load <4 x i64>, ptr addrspace(1) %a
+  tail call void @llvm.masked.store.v4i64.p1(<4 x i64> %a.load, ptr addrspace(1) align 32 %b, <4 x i1> %mask)
+  ret void
+}
+
+declare void @llvm.masked.store.v4i64.p1(<4 x i64>, ptr addrspace(1), <4 x i1>)
diff --git a/llvm/test/CodeGen/NVPTX/masked-store-vectors-256.ll b/llvm/test/CodeGen/NVPTX/masked-store-vectors-256.ll
new file mode 100644
index 0000000000000..feb7b7e0a0b39
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/masked-store-vectors-256.ll
@@ -0,0 +1,318 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_90 | FileCheck %s -check-prefixes=CHECK,SM90
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_90 | %ptxas-verify -arch=sm_90 %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_100 -mattr=+ptx88 | FileCheck %s -check-prefixes=CHECK,SM100
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_100 -mattr=+ptx88 | %ptxas-verify -arch=sm_100 %}
+
+; This test is based on load-store-vectors.ll,
+; and contains testing for lowering 256-bit masked vector stores
+
+; Types we are checking: i32, i64, f32, f64
+
+; Address spaces we are checking: generic, global
+; - Global is the only address space that currently supports masked stores.
+; - The generic stores will get legalized before the backend via scalarization,
+;   this file tests that even though we won't be generating them in the LSV.
+
+; 256-bit vector loads/stores are only legal for blackwell+, so on sm_90, the vectors will be split
+
+; generic address space
+
+define void @generic_8xi32(ptr %a, ptr %b) {
+; CHECK-LABEL: generic_8xi32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [generic_8xi32_param_0];
+; CHECK-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; CHECK-NEXT:    ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [generic_8xi32_param_1];
+; CHECK-NEXT:    st.b32 [%rd2], %r5;
+; CHECK-NEXT:    st.b32 [%rd2+8], %r7;
+; CHECK-NEXT:    st.b32 [%rd2+28], %r4;
+; CHECK-NEXT:    ret;
+  %a.load = load <8 x i32>, ptr %a
+  tail call void @llvm.masked.store.v8i32.p0(<8 x i32> %a.load, ptr align 32 %b, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+  ret void
+}
+
+define void @generic_4xi64(ptr %a, ptr %b) {
+; CHECK-LABEL: generic_4xi64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [generic_4xi64_param_0];
+; CHECK-NEXT:    ld.v2.b64 {%rd2, %rd3}, [%rd1+16];
+; CHECK-NEXT:    ld.v2.b64 {%rd4, %rd5}, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd6, [generic_4xi64_param_1];
+; CHECK-NEXT:    st.b64 [%rd6], %rd4;
+; CHECK-NEXT:    st.b64 [%rd6+16], %rd2;
+; CHECK-NEXT:    ret;
+  %a.load = load <4 x i64>, ptr %a
+  tail call void @llvm.masked.store.v4i64.p0(<4 x i64> %a.load, ptr align 32 %b, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
+  ret void
+}
+
+define void @generic_8xfloat(ptr %a, ptr %b) {
+; CHECK-LABEL: generic_8xfloat(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [generic_8xfloat_param_0];
+; CHECK-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; CHECK-NEXT:    ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd2, [generic_8xfloat_param_1];
+; CHECK-NEXT:    st.b32 [%rd2], %r5;
+; CHECK-NEXT:    st.b32 [%rd2+8], %r7;
+; CHECK-NEXT:    st.b32 [%rd2+28], %r4;
+; CHECK-NEXT:    ret;
+  %a.load = load <8 x float>, ptr %a
+  tail call void @llvm.masked.store.v8f32.p0(<8 x float> %a.load, ptr align 32 %b, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+  ret void
+}
+
+define void @generic_4xdouble(ptr %a, ptr %b) {
+; CHECK-LABEL: generic_4xdouble(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [generic_4xdouble_param_0];
+; CHECK-NEXT:    ld.v2.b64 {%rd2, %rd3}, [%rd1+16];
+; CHECK-NEXT:    ld.v2.b64 {%rd4, %rd5}, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd6, [generic_4xdouble_param_1];
+; CHECK-NEXT:    st.b64 [%rd6], %rd4;
+; CHECK-NEXT:    st.b64 [%rd6+16], %rd2;
+; CHECK-NEXT:    ret;
+  %a.load = load <4 x double>, ptr %a
+  tail call void @llvm.masked.store.v4f64.p0(<4 x double> %a.load, ptr align 32 %b, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
+  ret void
+}
+
+; global address space
+
+define void @global_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_8xi32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_8xi32_param_0];
+; SM90-NEXT:    ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT:    ld.global.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd2, [global_8xi32_param_1];
+; SM90-NEXT:    st.global.b32 [%rd2], %r5;
+; SM90-NEXT:    st.global.b32 [%rd2+8], %r7;
+; SM90-NEXT:    st.global.b32 [%rd2+28], %r4;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_8xi32(
+; SM100:       {
+; SM100-NEXT:    .reg .b32 %r<9>;
+; SM100-NEXT:    .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_8xi32_param_0];
+; SM100-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd2, [global_8xi32_param_1];
+; SM100-NEXT:    st.global.v8.b32 [%rd2], {%r1, _, %r3, _, _, _, _, %r8};
+; SM100-NEXT:    ret;
+  %a.load = load <8 x i32>, ptr addrspace(1) %a
+  tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) align 32 %b, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+  ret void
+}
+
+define void @global_4xi64(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_4xi64(
+; SM90:       {
+; SM90-NEXT:    .reg .b64 %rd<7>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_4xi64_param_0];
+; SM90-NEXT:    ld.global.v2.b64 {%rd2, %rd3}, [%rd1+16];
+; SM90-NEXT:    ld.global.v2.b64 {%rd4, %rd5}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd6, [global_4xi64_param_1];
+; SM90-NEXT:    st.global.b64 [%rd6], %rd4;
+; SM90-NEXT:    st.global.b64 [%rd6+16], %rd2;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_4xi64(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_4xi64_param_0];
+; SM100-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd6, [global_4xi64_param_1];
+; SM100-NEXT:    st.global.v4.b64 [%rd6], {%rd2, _, %rd4, _};
+; SM100-NEXT:    ret;
+  %a.load = load <4 x i64>, ptr addrspace(1) %a
+  tail call void @llvm.masked.store.v4i64.p1(<4 x i64> %a.load, ptr addrspace(1) align 32 %b, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
+  ret void
+}
+
+define void @global_8xfloat(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_8xfloat(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_8xfloat_param_0];
+; SM90-NEXT:    ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT:    ld.global.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd2, [global_8xfloat_param_1];
+; SM90-NEXT:    st.global.b32 [%rd2], %r5;
+; SM90-NEXT:    st.global.b32 [%rd2+8], %r7;
+; SM90-NEXT:    st.global.b32 [%rd2+28], %r4;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_8xfloat(
+; SM100:       {
+; SM100-NEXT:    .reg .b32 %r<9>;
+; SM100-NEXT:    .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_8xfloat_param_0];
+; SM100-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd2, [global_8xfloat_param_1];
+; SM100-NEXT:    st.global.v8.b32 [%rd2], {%r1, _, %r3, _, _, _, _, %r8};
+; SM100-NEXT:    ret;
+  %a.load = load <8 x float>, ptr addrspace(1) %a
+  tail call void @llvm.masked.store.v8f32.p1(<8 x float> %a.load, ptr addrspace(1) align 32 %b, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+  ret void
+}
+
+define void @global_4xdouble(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_4xdouble(
+; SM90:       {
+; SM90-NEXT:    .reg .b64 %rd<7>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_4xdouble_param_0];
+; SM90-NEXT:    ld.global.v2.b64 {%rd2, %rd3}, [%rd1+16];
+; SM90-NEXT:    ld.global.v2.b64 {%rd4, %rd5}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd6, [global_4xdouble_param_1];
+; SM90-NEXT:    st.global.b64 [%rd6], %rd4;
+; SM90-NEXT:    st.global.b64 [%rd6+16], %rd2;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_4xdouble(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_4xdouble_param_0];
+; SM100-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd6, [global_4xdouble_param_1];
+; SM100-NEXT:    st.global.v4.b64 [%rd6], {%rd2, _, %rd4, _};
+; SM100-NEXT:    ret;
+  %a.load = load <4 x double>, ptr addrspace(1) %a
+  tail call void @llvm.masked.store.v4f64.p1(<4 x double> %a.load, ptr addrspace(1) align 32 %b, <4 x i1> <i1 true, i1 false, i1 true, i1 false>)
+  ret void
+}
+
+; edge cases
+define void @global_8xi32_all_mask_on(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_8xi32_all_mask_on(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [global_8xi32_all_mask_on_param_0];
+; SM90-NEXT:    ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; SM90-NEXT:    ld.global.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; SM90-NEXT:    ld.param.b64 %rd2, [global_8xi32_all_mask_on_param_1];
+; SM90-NEXT:    st.global.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; SM90-NEXT:    st.global.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: global_8xi32_all_mask_on(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [global_8xi32_all_mask_on_param_0];
+; SM100-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd6, [global_8xi32_all_mask_on_param_1];
+; SM100-NEXT:    st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
+; SM100-NEXT:    ret;
+  %a.load = load <8 x i32>, ptr addrspace(1) %a
+  tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) align 32 %b, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
+  ret void
+}
+
+define void @global_8xi32_all_mask_off(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_8xi32_all_mask_off(
+; CHECK:       {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ret;
+  %a.load = load <8 x i32>, ptr addrspace(1) %a
+  tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) align 32 %b, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
+  ret void
+}
+
+; This is an example pattern for the LSV's output of these masked stores
+define void @vectorizerOutput(ptr addrspace(1) %in, ptr addrspace(1) %out) {
+; SM90-LABEL: vectorizerOutput(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [vectorizerOutput_param_0];
+; SM90-NEXT:    ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT:    ld.global.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT:    ld.param.b64 %rd2, [vectorizerOutput_param_1];
+; SM90-NEXT:    st.global.b32 [%rd2], %r5;
+; SM90-NEXT:    st.global.b32 [%rd2+4], %r6;
+; SM90-NEXT:    st.global.b32 [%rd2+12], %r8;
+; SM90-NEXT:    st.global.b32 [%rd2+16], %r1;
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: vectorizerOutput(
+; SM100:       {
+; SM100-NEXT:    .reg .b32 %r<9>;
+; SM100-NEXT:    .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [vectorizerOutput_param_0];
+; SM100-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd2, [vectorizerOutput_param_1];
+; SM100-NEXT:    st.global.v8.b32 [%rd2], {%r1, %r2, _, %r4, %r5, _, _, _};
+; SM100-NEXT:    ret;
+  %1 = load <8 x i32>, ptr addrspace(1) %in, align 32
+  %load05 = extractelement <8 x i32> %1, i32 0
+  %load16 = extractelement <8 x i32> %1, i32 1
+  %load38 = extractelement <8 x i32> %1, i32 3
+  %load49 = extractelement <8 x i32> %1, i32 4
+  %2 = insertelement <8 x i32> poison, i32 %load05, i32 0
+  %3 = insertelement <8 x i32> %2, i32 %load16, i32 1
+  %4 = insertelement <8 x i32> %3, i32 poison, i32 2
+  %5 = insertelement <8 x i32> %4, i32 %load38, i32 3
+  %6 = insertelement <8 x i32> %5, i32 %load49, i32 4
+  %7 = insertelement <8 x i32> %6, i32 poison, i32 5
+  %8 = insertelement <8 x i32> %7, i32 poison, i32 6
+  %9 = insertelement <8 x i32> %8, i32 poison, i32 7
+  call void @llvm.masked.store.v8i32.p1(<8 x i32> %9, ptr addrspace(1) align 32 %out, <8 x i1> <i1 true, i1 true, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false>)
+  ret void
+}
+
+declare void @llvm.masked.store.v8i32.p0(<8 x i32>, ptr, <8 x i1>)
+declare void @llvm.masked.store.v4i64.p0(<4 x i64>, ptr, <4 x i1>)
+declare void @llvm.masked.store.v8f32.p0(<8 x float>, ptr, <8 x i1>)
+declare void @llvm.masked.store.v4f64.p0(<4 x double>, ptr, <4 x i1>)
+
+declare void @llvm.masked.store.v8i32.p1(<8 x i32>, ptr addrspace(1), <8 x i1>)
+declare void @llvm.masked.store.v4i64.p1(<4 x i64>, ptr addrspace(1), <4 x i1>)
+declare void @llvm.masked.store.v8f32.p1(<8 x float>, ptr addrspace(1), <8 x i1>)
+declare void @llvm.masked.store.v4f64.p1(<4 x double>, ptr addrspace(1), <4 x i1>)
diff --git a/llvm/test/CodeGen/NVPTX/proxy-reg-erasure.mir b/llvm/test/CodeGen/NVPTX/proxy-reg-erasure.mir
index dfc84177fb0e6..a84b7fcd33836 100644
--- a/llvm/test/CodeGen/NVPTX/proxy-reg-erasure.mir
+++ b/llvm/test/CodeGen/NVPTX/proxy-reg-erasure.mir
@@ -77,7 +77,7 @@ constants:       []
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    %0:b32, %1:b32, %2:b32, %3:b32 = LDV_i32_v4 0, 0, 101, 3, 32, &retval0, 0 :: (load (s128), addrspace 101)
+    %0:b32, %1:b32, %2:b32, %3:b32 = LDV_i32_v4 0, 0, 101, 3, 32, -1, &retval0, 0 :: (load (s128), addrspace 101)
     ; CHECK-NOT: ProxyReg
     %4:b32 = ProxyRegB32 killed %0
     %5:b32 = ProxyRegB32 killed %1
@@ -86,7 +86,7 @@ body:             |
     ; CHECK: STV_i32_v4 killed %0, killed %1, killed %2, killed %3
     STV_i32_v4 killed %4, killed %5, killed %6, killed %7, 0, 0, 101, 32, &func_retval0, 0 :: (store (s128), addrspace 101)
 
-    %8:b32 = LD_i32 0, 0, 101, 3, 32, &retval0, 0 :: (load (s32), addrspace 101)
+    %8:b32 = LD_i32 0, 0, 101, 3, 32, -1, &retval0, 0 :: (load (s32), addrspace 101)
     ; CHECK-NOT: ProxyReg
     %9:b32 = ProxyRegB32 killed %8
     %10:b32 = ProxyRegB32 killed %9



More information about the llvm-commits mailing list