[llvm] [AMDGPU] Introduce "amdgpu-sw-lower-lds" pass to lower LDS accesses. (PR #87265)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 11 09:52:37 PDT 2024


https://github.com/skc7 updated https://github.com/llvm/llvm-project/pull/87265

>From 72c3f56baad4a10953b486e0300665a731dcb713 Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Thu, 7 Mar 2024 12:40:41 +0530
Subject: [PATCH 1/2] [AMDGPU] Introduce amdgpu-sw-lower-lds pass to lower LDS
 accesses. (#87265)

---
 llvm/lib/Target/AMDGPU/AMDGPU.h               |    9 +
 llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def |    1 +
 llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp   | 1057 +++++++++++++++++
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |    1 +
 llvm/lib/Target/AMDGPU/CMakeLists.txt         |    1 +
 ...pu-sw-lower-lds-dynamic-indirect-access.ll |  105 ++
 .../amdgpu-sw-lower-lds-dynamic-lds-test.ll   |   78 ++
 ...ds-multi-static-dynamic-indirect-access.ll |  194 +++
 ...gpu-sw-lower-lds-multiple-blocks-return.ll |   99 ++
 ...ower-lds-static-dynamic-indirect-access.ll |  106 ++
 ...pu-sw-lower-lds-static-dynamic-lds-test.ll |   99 ++
 ...s-static-indirect-access-function-param.ll |   82 ++
 ...lower-lds-static-indirect-access-nested.ll |  255 ++++
 ...gpu-sw-lower-lds-static-indirect-access.ll |   89 ++
 .../amdgpu-sw-lower-lds-static-lds-test.ll    |   68 ++
 15 files changed, 2244 insertions(+)
 create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index 46cc5f349555a..4391aa2e861f8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -263,6 +263,15 @@ struct AMDGPUAlwaysInlinePass : PassInfoMixin<AMDGPUAlwaysInlinePass> {
   bool GlobalOpt;
 };
 
+void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &);
+extern char &AMDGPUSwLowerLDSLegacyPassID;
+ModulePass *createAMDGPUSwLowerLDSLegacyPass();
+
+struct AMDGPUSwLowerLDSPass : PassInfoMixin<AMDGPUSwLowerLDSPass> {
+  AMDGPUSwLowerLDSPass() {}
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
 class AMDGPUCodeGenPreparePass
     : public PassInfoMixin<AMDGPUCodeGenPreparePass> {
 private:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index 57fc3314dd970..80f9249f25368 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -22,6 +22,7 @@ MODULE_PASS("amdgpu-lower-buffer-fat-pointers",
             AMDGPULowerBufferFatPointersPass(*this))
 MODULE_PASS("amdgpu-lower-ctor-dtor", AMDGPUCtorDtorLoweringPass())
 MODULE_PASS("amdgpu-lower-module-lds", AMDGPULowerModuleLDSPass(*this))
+MODULE_PASS("amdgpu-sw-lower-lds", AMDGPUSwLowerLDSPass())
 MODULE_PASS("amdgpu-printf-runtime-binding", AMDGPUPrintfRuntimeBindingPass())
 MODULE_PASS("amdgpu-unify-metadata", AMDGPUUnifyMetadataPass())
 #undef MODULE_PASS
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
new file mode 100644
index 0000000000000..b7f27c40faf12
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
@@ -0,0 +1,1057 @@
+//===-- AMDGPUSwLowerLDS.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers the local data store, LDS, uses in kernel and non-kernel
+// functions in module and packs them together as single allocation.
+// Packed LDS Layout is emulated in the dynamically allocated device global
+// memory.
+//
+// Replacement of Kernel LDS accesses:
+//    For a kernel, LDS access can be static or dynamic which are direct
+//    (accessed within kernel) and indirect (accessed through non-kernels).
+//    All these LDS accesses corresponding to kernel will be packed together,
+//    where all static LDS accesses will be allocated first and then dynamic
+//    LDS follows. The total size with alignment is calculated. A new LDS global
+//    will be created for the kernel called "SW LDS" and it will have the
+//    attribute "amdgpu-lds-size" attached with value of the size calculated.
+//    All the LDS accesses in the module will be replaced by GEP with offset
+//    into the "Sw LDS".
+//    A new "llvm.amdgcn.<kernel>.dynlds" is created per kernel accessing
+//    the dynamic LDS. This will be marked used by kernel and will have
+//    MD_absolue_symbol metadata set to total static LDS size, Since dynamic
+//    LDS allocation starts after all static LDS allocation.
+//
+//    A device global memory equal to the total LDS size will be allocated.
+//    At the prologue of the kernel, a single work-item from the
+//    work-group, does a "malloc" and stores the pointer of the
+//    allocation in "SW LDS".
+//
+//    To store the offsets corresponding to all LDS accesses, another global
+//    variable is created which will be called "SW LDS metadata" in this pass.
+//    - SW LDS Global:
+//        It is LDS global of ptr type with name
+//        "llvm.amdgcn.sw.lds.<kernel-name>".
+//    - Metadata Global:
+//        It is of struct type, with n members. n equals the number of LDS
+//        globals accessed by the kernel(direct and indirect). Each member of
+//        struct is another struct of type {i32, i32, i32}. First member
+//        corresponds to offset, second member corresponds to size of LDS global
+//        being replaced and third represents the total aligned size. It will
+//        have name "llvm.amdgcn.sw.lds.<kernel-name>.md". This global will have
+//        an intializer with static LDS related offsets and sizes initialized.
+//        But for dynamic LDS related entries, offsets will be intialized to
+//        previous static LDS allocation end offset. Sizes for them will be zero
+//        initially. These dynamic LDS offset and size values will be updated
+//        with in the kernel, since kernel can read the dynamic LDS size
+//        allocation done at runtime with query to "hidden_dynamic_lds_size"
+//        hidden kernel argument.
+//
+//    At the epilogue of kernel, allocated memory would be made free by the same
+//    single work-item.
+//
+// Replacement of non-kernel LDS accesses:
+//    Multiple kernels can access the same non-kernel function.
+//    All the kernels accessing LDS through non-kernels are sorted and
+//    assigned a kernel-id. All the LDS globals accessed by non-kernels
+//    are sorted. This information is used to build two tables:
+//    - Base table:
+//        Base table will have single row, with elements of the row
+//        placed as per kernel ID. Each element in the row corresponds
+//        to ptr of "SW LDS" variable created for that kernel.
+//    - Offset table:
+//        Offset table will have multiple rows and columns.
+//        Rows are assumed to be from 0 to (n-1). n is total number
+//        of kernels accessing the LDS through non-kernels.
+//        Each row will have m elements. m is the total number of
+//        unique LDS globals accessed by all non-kernels.
+//        Each element in the row correspond to the ptr of
+//        the replacement of LDS global done by that particular kernel.
+//    A LDS variable in non-kernel will be replaced based on the information
+//    from base and offset tables. Based on kernel-id query, ptr of "SW
+//    LDS" for that corresponding kernel is obtained from base table.
+//    The Offset into the base "SW LDS" is obtained from
+//    corresponding element in offset table. With this information, replacement
+//    value is obtained.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "Utils/AMDGPUMemoryUtils.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/ReplaceConstant.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+
+#include <algorithm>
+
+#define DEBUG_TYPE "amdgpu-sw-lower-lds"
+#define COV5_HIDDEN_DYN_LDS_SIZE_ARG 15
+
+using namespace llvm;
+using namespace AMDGPU;
+
+namespace {
+
+using DomTreeCallback = function_ref<DominatorTree *(Function &F)>;
+
+struct LDSAccessTypeInfo {
+  SetVector<GlobalVariable *> StaticLDSGlobals;
+  SetVector<GlobalVariable *> DynamicLDSGlobals;
+};
+
+// Struct to hold all the Metadata required for a kernel
+// to replace a LDS global uses with corresponding offset
+// in to device global memory.
+struct KernelLDSParameters {
+  GlobalVariable *SwLDS = nullptr;
+  GlobalVariable *SwDynLDS = nullptr;
+  GlobalVariable *SwLDSMetadata = nullptr;
+  LDSAccessTypeInfo DirectAccess;
+  LDSAccessTypeInfo IndirectAccess;
+  DenseMap<GlobalVariable *, SmallVector<uint32_t, 3>>
+      LDSToReplacementIndicesMap;
+  uint32_t MallocSize = 0;
+};
+
+// Struct to store infor for creation of offset table
+// for all the non-kernel LDS accesses.
+struct NonKernelLDSParameters {
+  GlobalVariable *LDSBaseTable{nullptr};
+  GlobalVariable *LDSOffsetTable{nullptr};
+  SetVector<Function *> OrderedKernels;
+  SetVector<GlobalVariable *> OrdereLDSGlobals;
+};
+
+class AMDGPUSwLowerLDS {
+public:
+  AMDGPUSwLowerLDS(Module &Mod, DomTreeCallback Callback)
+      : M(Mod), IRB(M.getContext()), DTCallback(Callback) {}
+  bool run();
+  void
+  getUsesOfLDSByNonKernels(const CallGraph &CG, FunctionVariableMap &Functions,
+                           SetVector<GlobalVariable *> &AllNonKernelLDSAccess);
+  SetVector<Function *>
+  getOrderedIndirectLDSAccessingKernels(SetVector<Function *> &&Kernels);
+  SetVector<GlobalVariable *>
+  getOrderedNonKernelAllLDSGlobals(SetVector<GlobalVariable *> &&Variables);
+  void buildSwLDSGlobal(Function *Func);
+  void buildSwDynLDSGlobal(Function *Func);
+  void populateSwMetadataGlobal(Function *Func);
+  void populateSwLDSAttributeAndMetadata(Function *Func);
+  void populateLDSToReplacementIndicesMap(Function *Func);
+  void replaceKernelLDSAccesses(Function *Func);
+  void lowerKernelLDSAccesses(Function *Func, DomTreeUpdater &DTU);
+  void buildNonKernelLDSOffsetTable(NonKernelLDSParameters &NKLDSParams);
+  void buildNonKernelLDSBaseTable(NonKernelLDSParameters &NKLDSParams);
+  Constant *
+  getAddressesOfVariablesInKernel(Function *Func,
+                                  SetVector<GlobalVariable *> &Variables);
+  void lowerNonKernelLDSAccesses(Function *Func,
+                                 SetVector<GlobalVariable *> &LDSGlobals,
+                                 NonKernelLDSParameters &NKLDSParams);
+  void
+  updateMallocSizeForDynamicLDS(Function *Func, Value **CurrMallocSize,
+                                Value *HiddenDynLDSSize,
+                                SetVector<GlobalVariable *> &DynamicLDSGlobals);
+
+private:
+  Module &M;
+  IRBuilder<> IRB;
+  DomTreeCallback DTCallback;
+  DenseMap<Function *, KernelLDSParameters> KernelToLDSParametersMap;
+};
+
+template <typename T> SetVector<T> sortByName(std::vector<T> &&V) {
+  // Sort the vector of globals or Functions based on their name.
+  // Returns a SetVector of globals/Functions.
+  sort(V, [](const auto *L, const auto *R) {
+    return L->getName() < R->getName();
+  });
+  return {SetVector<T>(V.begin(), V.end())};
+}
+
+SetVector<GlobalVariable *> AMDGPUSwLowerLDS::getOrderedNonKernelAllLDSGlobals(
+    SetVector<GlobalVariable *> &&Variables) {
+  // Sort all the non-kernel LDS accesses based on their name.
+  return sortByName(
+      std::vector<GlobalVariable *>(Variables.begin(), Variables.end()));
+}
+
+SetVector<Function *> AMDGPUSwLowerLDS::getOrderedIndirectLDSAccessingKernels(
+    SetVector<Function *> &&Kernels) {
+  // Sort the non-kernels accessing LDS based on their name.
+  // Also assign a kernel ID metadata based on the sorted order.
+  LLVMContext &Ctx = M.getContext();
+  if (Kernels.size() > UINT32_MAX) {
+    report_fatal_error("Unimplemented SW LDS lowering for > 2**32 kernels");
+  }
+  SetVector<Function *> OrderedKernels =
+      sortByName(std::vector<Function *>(Kernels.begin(), Kernels.end()));
+  for (size_t i = 0; i < Kernels.size(); i++) {
+    Metadata *AttrMDArgs[1] = {
+        ConstantAsMetadata::get(IRB.getInt32(i)),
+    };
+    Function *Func = OrderedKernels[i];
+    Func->setMetadata("llvm.amdgcn.lds.kernel.id",
+                      MDNode::get(Ctx, AttrMDArgs));
+  }
+  return std::move(OrderedKernels);
+}
+
+void AMDGPUSwLowerLDS::getUsesOfLDSByNonKernels(
+    const CallGraph &CG, FunctionVariableMap &functions,
+    SetVector<GlobalVariable *> &AllNonKernelLDSAccess) {
+  for (GlobalVariable *GV : AllNonKernelLDSAccess) {
+    if (!AMDGPU::isLDSVariableToLower(*GV))
+      continue;
+
+    if (GV->isAbsoluteSymbolRef()) {
+      report_fatal_error(
+          "LDS variables with absolute addresses are unimplemented.");
+    }
+
+    for (User *V : GV->users()) {
+      if (auto *I = dyn_cast<Instruction>(V)) {
+        Function *F = I->getFunction();
+        if (!isKernelLDS(F) && F->hasFnAttribute(Attribute::SanitizeAddress))
+          functions[F].insert(GV);
+      }
+    }
+  }
+}
+
+static void recordLDSAbsoluteAddress(Module &M, GlobalVariable *GV,
+                                     uint32_t Address) {
+  // Write the specified address into metadata where it can be retrieved by
+  // the assembler. Format is a half open range, [Address Address+1)
+  LLVMContext &Ctx = M.getContext();
+  auto *IntTy = M.getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS);
+  auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address));
+  auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address + 1));
+  GV->setMetadata(LLVMContext::MD_absolute_symbol,
+                  MDNode::get(Ctx, {MinC, MaxC}));
+}
+
+static void addLDSSizeAttribute(Function *Func, uint32_t Offset,
+                                bool IsDynLDS) {
+  if (Offset != 0) {
+    std::string Buffer;
+    raw_string_ostream SS{Buffer};
+    SS << format("%u", Offset);
+    if (IsDynLDS)
+      SS << format(",%u", Offset);
+    Func->addFnAttr("amdgpu-lds-size", Buffer);
+  }
+}
+
+static void markUsedByKernel(Function *Func, GlobalVariable *SGV) {
+  BasicBlock *Entry = &Func->getEntryBlock();
+  IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
+
+  Function *Decl =
+      Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
+
+  Value *UseInstance[1] = {
+      Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)};
+
+  Builder.CreateCall(Decl, {},
+                     {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)});
+}
+
+void AMDGPUSwLowerLDS::buildSwLDSGlobal(Function *Func) {
+  // Create new LDS global required for each kernel to store
+  // device global memory pointer.
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  // create new global pointer variable
+  LDSParams.SwLDS = new GlobalVariable(
+      M, IRB.getPtrTy(), false, GlobalValue::InternalLinkage,
+      PoisonValue::get(IRB.getPtrTy()), "llvm.amdgcn.sw.lds." + Func->getName(),
+      nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, false);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  LDSParams.SwLDS->setSanitizerMetadata(MD);
+  return;
+}
+
+void AMDGPUSwLowerLDS::buildSwDynLDSGlobal(Function *Func) {
+  // Create new Dyn LDS global if kernel accesses dyn LDS.
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  if (LDSParams.DirectAccess.DynamicLDSGlobals.empty() &&
+      LDSParams.IndirectAccess.DynamicLDSGlobals.empty())
+    return;
+  // create new global pointer variable
+  auto emptyCharArray = ArrayType::get(IRB.getInt8Ty(), 0);
+  LDSParams.SwDynLDS = new GlobalVariable(
+      M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr,
+      "llvm.amdgcn." + Func->getName() + ".dynlds", nullptr,
+      GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, false);
+  markUsedByKernel(Func, LDSParams.SwDynLDS);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  LDSParams.SwDynLDS->setSanitizerMetadata(MD);
+  return;
+}
+
+void AMDGPUSwLowerLDS::populateSwLDSAttributeAndMetadata(Function *Func) {
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  bool IsDynLDSUsed = LDSParams.SwDynLDS ? true : false;
+  uint32_t Offset = LDSParams.MallocSize;
+  recordLDSAbsoluteAddress(M, LDSParams.SwLDS, 0);
+  addLDSSizeAttribute(Func, Offset, IsDynLDSUsed);
+  if (LDSParams.SwDynLDS)
+    recordLDSAbsoluteAddress(M, LDSParams.SwDynLDS, Offset);
+}
+
+void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
+  // Create new metadata global for every kernel and initialize the
+  // start offsets and sizes corresponding to each LDS accesses.
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  auto &Ctx = M.getContext();
+  auto &DL = M.getDataLayout();
+  std::vector<Type *> Items;
+  Type *Int32Ty = IRB.getInt32Ty();
+  std::vector<Constant *> Initializers;
+  Align MaxAlignment(1);
+  auto UpdateMaxAlignment = [&MaxAlignment, &DL](GlobalVariable *GV) {
+    Align GVAlign = AMDGPU::getAlign(DL, GV);
+    MaxAlignment = std::max(MaxAlignment, GVAlign);
+  };
+
+  for (GlobalVariable *GV : LDSParams.DirectAccess.StaticLDSGlobals)
+    UpdateMaxAlignment(GV);
+
+  for (GlobalVariable *GV : LDSParams.DirectAccess.DynamicLDSGlobals)
+    UpdateMaxAlignment(GV);
+
+  for (GlobalVariable *GV : LDSParams.IndirectAccess.StaticLDSGlobals)
+    UpdateMaxAlignment(GV);
+
+  for (GlobalVariable *GV : LDSParams.IndirectAccess.DynamicLDSGlobals)
+    UpdateMaxAlignment(GV);
+
+  //{StartOffset, AlignedSizeInBytes}
+  SmallString<128> MDItemStr;
+  raw_svector_ostream MDItemOS(MDItemStr);
+  MDItemOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md.item";
+
+  StructType *LDSItemTy =
+      StructType::create(Ctx, {Int32Ty, Int32Ty, Int32Ty}, MDItemOS.str());
+  uint32_t &MallocSize = LDSParams.MallocSize;
+  SetVector<GlobalVariable *> UniqueLDSGlobals;
+  auto buildInitializerForSwLDSMD =
+      [&](SetVector<GlobalVariable *> &LDSGlobals) {
+        for (auto &GV : LDSGlobals) {
+          if (std::find(UniqueLDSGlobals.begin(), UniqueLDSGlobals.end(), GV) !=
+              UniqueLDSGlobals.end())
+            continue;
+          else
+            UniqueLDSGlobals.insert(GV);
+          Type *Ty = GV->getValueType();
+          const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
+          Items.push_back(LDSItemTy);
+          Constant *ItemStartOffset = ConstantInt::get(Int32Ty, MallocSize);
+          Constant *SizeInBytesConst = ConstantInt::get(Int32Ty, SizeInBytes);
+          uint64_t AlignedSize = alignTo(SizeInBytes, MaxAlignment);
+          Constant *AlignedSizeInBytesConst =
+              ConstantInt::get(Int32Ty, AlignedSize);
+          MallocSize += AlignedSize;
+          Constant *InitItem =
+              ConstantStruct::get(LDSItemTy, {ItemStartOffset, SizeInBytesConst,
+                                              AlignedSizeInBytesConst});
+          Initializers.push_back(InitItem);
+        }
+      };
+  SetVector<GlobalVariable *> SwLDSVector;
+  SwLDSVector.insert(LDSParams.SwLDS);
+  buildInitializerForSwLDSMD(SwLDSVector);
+  buildInitializerForSwLDSMD(LDSParams.DirectAccess.StaticLDSGlobals);
+  buildInitializerForSwLDSMD(LDSParams.IndirectAccess.StaticLDSGlobals);
+  buildInitializerForSwLDSMD(LDSParams.DirectAccess.DynamicLDSGlobals);
+  buildInitializerForSwLDSMD(LDSParams.IndirectAccess.DynamicLDSGlobals);
+
+  SmallString<128> MDTypeStr;
+  raw_svector_ostream MDTypeOS(MDTypeStr);
+  MDTypeOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md.type";
+
+  StructType *MetadataStructType =
+      StructType::create(Ctx, Items, MDTypeOS.str());
+  SmallString<128> MDStr;
+  raw_svector_ostream MDOS(MDStr);
+  MDOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md";
+  LDSParams.SwLDSMetadata = new GlobalVariable(
+      M, MetadataStructType, false, GlobalValue::InternalLinkage,
+      PoisonValue::get(MetadataStructType), MDOS.str(), nullptr,
+      GlobalValue::NotThreadLocal, AMDGPUAS::GLOBAL_ADDRESS, false);
+  Constant *data = ConstantStruct::get(MetadataStructType, Initializers);
+  LDSParams.SwLDSMetadata->setInitializer(data);
+  assert(LDSParams.SwLDS);
+  // Set the alignment to MaxAlignment for SwLDS.
+  LDSParams.SwLDS->setAlignment(MaxAlignment);
+  if (LDSParams.SwDynLDS)
+    LDSParams.SwDynLDS->setAlignment(MaxAlignment);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  LDSParams.SwLDSMetadata->setSanitizerMetadata(MD);
+  return;
+}
+
+void AMDGPUSwLowerLDS::populateLDSToReplacementIndicesMap(Function *Func) {
+  // Fill the corresponding LDS replacement indices for each LDS access
+  // related to this kernel.
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  SetVector<GlobalVariable *> UniqueLDSGlobals;
+  auto PopulateIndices = [&](SetVector<GlobalVariable *> &LDSGlobals,
+                             uint32_t &Idx) {
+    for (auto &GV : LDSGlobals) {
+      if (std::find(UniqueLDSGlobals.begin(), UniqueLDSGlobals.end(), GV) !=
+          UniqueLDSGlobals.end())
+        continue;
+      else
+        UniqueLDSGlobals.insert(GV);
+      LDSParams.LDSToReplacementIndicesMap[GV] = {0, Idx, 0};
+      ++Idx;
+    }
+  };
+  uint32_t Idx = 0;
+  SetVector<GlobalVariable *> SwLDSVector;
+  SwLDSVector.insert(LDSParams.SwLDS);
+  PopulateIndices(SwLDSVector, Idx);
+  PopulateIndices(LDSParams.DirectAccess.StaticLDSGlobals, Idx);
+  PopulateIndices(LDSParams.IndirectAccess.StaticLDSGlobals, Idx);
+  PopulateIndices(LDSParams.DirectAccess.DynamicLDSGlobals, Idx);
+  PopulateIndices(LDSParams.IndirectAccess.DynamicLDSGlobals, Idx);
+  return;
+}
+
+static void replacesUsesOfGlobalInFunction(Function *Func, GlobalVariable *GV,
+                                           Value *Replacement) {
+  // Replace all uses of LDS global in this Function with a Replacement.
+  auto ReplaceUsesLambda = [Func](const Use &U) -> bool {
+    auto *V = U.getUser();
+    if (auto *Inst = dyn_cast<Instruction>(V)) {
+      auto *Func1 = Inst->getParent()->getParent();
+      if (Func == Func1)
+        return true;
+    }
+    return false;
+  };
+  GV->replaceUsesWithIf(Replacement, ReplaceUsesLambda);
+}
+
+void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  GlobalVariable *SwLDS = LDSParams.SwLDS;
+  assert(SwLDS);
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDSMetadata);
+  StructType *SwLDSMetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
+  Type *Int32Ty = IRB.getInt32Ty();
+
+  auto &IndirectAccess = LDSParams.IndirectAccess;
+  auto &DirectAccess = LDSParams.DirectAccess;
+  // Replace all uses of LDS global in this Function with a Replacement.
+  SetVector<GlobalVariable *> UniqueLDSGlobals;
+  auto ReplaceLDSGlobalUses = [&](SetVector<GlobalVariable *> &LDSGlobals) {
+    for (auto &GV : LDSGlobals) {
+      // Do not generate instructions if LDS access is in non-kernel
+      // i.e indirect-access.
+      if ((IndirectAccess.StaticLDSGlobals.contains(GV) ||
+           IndirectAccess.DynamicLDSGlobals.contains(GV)) &&
+          (!DirectAccess.StaticLDSGlobals.contains(GV) &&
+           !DirectAccess.DynamicLDSGlobals.contains(GV)))
+        continue;
+      if (std::find(UniqueLDSGlobals.begin(), UniqueLDSGlobals.end(), GV) !=
+          UniqueLDSGlobals.end())
+        continue;
+      else
+        UniqueLDSGlobals.insert(GV);
+      auto &Indices = LDSParams.LDSToReplacementIndicesMap[GV];
+      assert(Indices.size() == 3);
+      uint32_t Idx0 = Indices[0];
+      uint32_t Idx1 = Indices[1];
+      uint32_t Idx2 = Indices[2];
+      Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, Idx0),
+                            ConstantInt::get(Int32Ty, Idx1),
+                            ConstantInt::get(Int32Ty, Idx2)};
+      Constant *GEP = ConstantExpr::getGetElementPtr(
+          SwLDSMetadataStructType, SwLDSMetadata, GEPIdx, true);
+      Value *Offset = IRB.CreateLoad(Int32Ty, GEP);
+      Value *BasePlusOffset =
+          IRB.CreateInBoundsGEP(IRB.getInt8Ty(), SwLDS, {Offset});
+      LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replacing LDS " << GV->getName());
+      replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
+    }
+  };
+  ReplaceLDSGlobalUses(DirectAccess.StaticLDSGlobals);
+  ReplaceLDSGlobalUses(IndirectAccess.StaticLDSGlobals);
+  ReplaceLDSGlobalUses(DirectAccess.DynamicLDSGlobals);
+  ReplaceLDSGlobalUses(IndirectAccess.DynamicLDSGlobals);
+}
+
+void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
+    Function *Func, Value **CurrMallocSize, Value *HiddenDynLDSSize,
+    SetVector<GlobalVariable *> &DynamicLDSGlobals) {
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  Type *Int32Ty = IRB.getInt32Ty();
+
+  GlobalVariable *SwLDS = LDSParams.SwLDS;
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDS && SwLDSMetadata);
+  StructType *MetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
+  unsigned MaxAlignment = SwLDS->getAlignment();
+  Value *MaxAlignValue = IRB.getInt32(MaxAlignment);
+  Value *MaxAlignValueMinusOne = IRB.getInt32(MaxAlignment - 1);
+
+  for (GlobalVariable *DynGV : DynamicLDSGlobals) {
+    auto &Indices = LDSParams.LDSToReplacementIndicesMap[DynGV];
+    // Update the Offset metadata.
+    Constant *Index0 = ConstantInt::get(Int32Ty, 0);
+    Constant *Index1 = ConstantInt::get(Int32Ty, Indices[1]);
+
+    Constant *Index2Offset = ConstantInt::get(Int32Ty, 0);
+    auto *GEPForOffset = IRB.CreateInBoundsGEP(
+        MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2Offset});
+
+    IRB.CreateStore(*CurrMallocSize, GEPForOffset);
+    // Update the size and Aligned Size metadata.
+    Constant *Index2Size = ConstantInt::get(Int32Ty, 1);
+    auto *GEPForSize = IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
+                                             {Index0, Index1, Index2Size});
+
+    Value *CurrDynLDSSize = IRB.CreateLoad(Int32Ty, HiddenDynLDSSize);
+    IRB.CreateStore(CurrDynLDSSize, GEPForSize);
+    Constant *Index2AlignedSize = ConstantInt::get(Int32Ty, 2);
+    auto *GEPForAlignedSize = IRB.CreateInBoundsGEP(
+        MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2AlignedSize});
+
+    Value *AlignedDynLDSSize =
+        IRB.CreateAdd(CurrDynLDSSize, MaxAlignValueMinusOne);
+    AlignedDynLDSSize = IRB.CreateUDiv(AlignedDynLDSSize, MaxAlignValue);
+    AlignedDynLDSSize = IRB.CreateMul(AlignedDynLDSSize, MaxAlignValue);
+    IRB.CreateStore(AlignedDynLDSSize, GEPForAlignedSize);
+
+    // Update the Current Malloc Size
+    *CurrMallocSize = IRB.CreateAdd(*CurrMallocSize, AlignedDynLDSSize);
+  }
+}
+
+void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
+                                              DomTreeUpdater &DTU) {
+  LLVM_DEBUG(dbgs() << "Sw Lowering Kernel LDS for : " << Func->getName());
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  auto &Ctx = M.getContext();
+  auto *PrevEntryBlock = &Func->getEntryBlock();
+
+  // Create malloc block.
+  auto *MallocBlock = BasicBlock::Create(Ctx, "Malloc", Func, PrevEntryBlock);
+
+  // Create WIdBlock block which has instructions related to selection of
+  // {0,0,0} indiex work item in the work group.
+  auto *WIdBlock = BasicBlock::Create(Ctx, "WId", Func, MallocBlock);
+  IRB.SetInsertPoint(WIdBlock, WIdBlock->begin());
+  auto *const WIdx =
+      IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {}, {});
+  auto *const WIdy =
+      IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_y, {}, {});
+  auto *const WIdz =
+      IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_z, {}, {});
+  auto *const XYOr = IRB.CreateOr(WIdx, WIdy);
+  auto *const XYZOr = IRB.CreateOr(XYOr, WIdz);
+  auto *const WIdzCond = IRB.CreateICmpEQ(XYZOr, IRB.getInt32(0));
+
+  // All work items will branch to PrevEntryBlock except {0,0,0} index
+  // work item which will branch to malloc block.
+  IRB.CreateCondBr(WIdzCond, MallocBlock, PrevEntryBlock);
+
+  // Malloc block
+  IRB.SetInsertPoint(MallocBlock, MallocBlock->begin());
+
+  // If Dynamic LDS globals are accessed by the kernel,
+  // Get the size of dyn lds from hidden dyn_lds_size kernel arg.
+  // Update the corresponding metadata global entries for this dyn lds global.
+  GlobalVariable *SwLDS = LDSParams.SwLDS;
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDS && SwLDSMetadata);
+  StructType *MetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
+  uint32_t MallocSize = 0;
+  Value *CurrMallocSize;
+  Type *Int32Ty = IRB.getInt32Ty();
+  Type *Int64Ty = IRB.getInt64Ty();
+
+  SetVector<GlobalVariable *> UniqueLDSGlobals;
+  auto GetUniqueLDSGlobals = [&](SetVector<GlobalVariable *> &LDSGlobals) {
+    for (auto &GV : LDSGlobals) {
+      if (std::find(UniqueLDSGlobals.begin(), UniqueLDSGlobals.end(), GV) !=
+          UniqueLDSGlobals.end())
+        continue;
+      else
+        UniqueLDSGlobals.insert(GV);
+    }
+  };
+
+  GetUniqueLDSGlobals(LDSParams.DirectAccess.StaticLDSGlobals);
+  GetUniqueLDSGlobals(LDSParams.IndirectAccess.StaticLDSGlobals);
+  unsigned NumStaticLDS = 1 + UniqueLDSGlobals.size();
+  UniqueLDSGlobals.clear();
+
+  if (NumStaticLDS) {
+    auto *GEPForEndStaticLDSOffset =
+        IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
+                              {ConstantInt::get(Int32Ty, 0),
+                               ConstantInt::get(Int32Ty, NumStaticLDS - 1),
+                               ConstantInt::get(Int32Ty, 0)});
+
+    auto *GEPForEndStaticLDSSize =
+        IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
+                              {ConstantInt::get(Int32Ty, 0),
+                               ConstantInt::get(Int32Ty, NumStaticLDS - 1),
+                               ConstantInt::get(Int32Ty, 2)});
+
+    Value *EndStaticLDSOffset =
+        IRB.CreateLoad(Int32Ty, GEPForEndStaticLDSOffset);
+    Value *EndStaticLDSSize = IRB.CreateLoad(Int32Ty, GEPForEndStaticLDSSize);
+    CurrMallocSize = IRB.CreateAdd(EndStaticLDSOffset, EndStaticLDSSize);
+  } else
+    CurrMallocSize = IRB.getInt32(MallocSize);
+
+  if (LDSParams.SwDynLDS) {
+    // Get size from hidden dyn_lds_size argument of kernel
+    Value *ImplicitArg =
+        IRB.CreateIntrinsic(Intrinsic::amdgcn_implicitarg_ptr, {}, {});
+    Value *HiddenDynLDSSize = IRB.CreateInBoundsGEP(
+        ImplicitArg->getType(), ImplicitArg,
+        {ConstantInt::get(Int64Ty, COV5_HIDDEN_DYN_LDS_SIZE_ARG)});
+    UniqueLDSGlobals.clear();
+    GetUniqueLDSGlobals(LDSParams.DirectAccess.DynamicLDSGlobals);
+    GetUniqueLDSGlobals(LDSParams.IndirectAccess.DynamicLDSGlobals);
+    updateMallocSizeForDynamicLDS(Func, &CurrMallocSize, HiddenDynLDSSize,
+                                  UniqueLDSGlobals);
+  }
+
+  CurrMallocSize = IRB.CreateZExt(CurrMallocSize, Int64Ty);
+
+  // Create a call to malloc function which does device global memory allocation
+  // with size equals to all LDS global accesses size  in this kernel.
+  Value *ReturnAddress = IRB.CreateCall(
+      Intrinsic::getDeclaration(&M, Intrinsic::returnaddress), IRB.getInt32(0));
+  FunctionCallee AsanMallocFunc = M.getOrInsertFunction(
+      StringRef("__asan_malloc_impl"),
+      FunctionType::get(Int64Ty, {Int64Ty, Int64Ty}, false));
+  Value *RAPtrToInt = IRB.CreatePtrToInt(ReturnAddress, Int64Ty);
+  Value *AsanMalloc =
+      IRB.CreateCall(AsanMallocFunc, {CurrMallocSize, RAPtrToInt});
+  Value *MallocPtr =
+      IRB.CreateIntToPtr(AsanMalloc, IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS));
+
+  // create store of malloc to new global
+  IRB.CreateStore(MallocPtr, SwLDS);
+
+  // Create branch to PrevEntryBlock
+  IRB.CreateBr(PrevEntryBlock);
+
+  // Create wave-group barrier at the starting of Previous entry block
+  Type *Int1Ty = IRB.getInt1Ty();
+  IRB.SetInsertPoint(PrevEntryBlock, PrevEntryBlock->begin());
+  auto *XYZCondPhi = IRB.CreatePHI(Int1Ty, 2, "xyzCond");
+  XYZCondPhi->addIncoming(IRB.getInt1(0), WIdBlock);
+  XYZCondPhi->addIncoming(IRB.getInt1(1), MallocBlock);
+
+  IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}, {});
+
+  replaceKernelLDSAccesses(Func);
+
+  auto *CondFreeBlock = BasicBlock::Create(Ctx, "CondFree", Func);
+  auto *FreeBlock = BasicBlock::Create(Ctx, "Free", Func);
+  auto *EndBlock = BasicBlock::Create(Ctx, "End", Func);
+  for (BasicBlock &BB : *Func) {
+    if (!BB.empty()) {
+      if (ReturnInst *RI = dyn_cast<ReturnInst>(&BB.back())) {
+        RI->eraseFromParent();
+        IRB.SetInsertPoint(&BB, BB.end());
+        IRB.CreateBr(CondFreeBlock);
+      }
+    }
+  }
+
+  // Cond Free Block
+  IRB.SetInsertPoint(CondFreeBlock, CondFreeBlock->begin());
+  IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}, {});
+  IRB.CreateCondBr(XYZCondPhi, FreeBlock, EndBlock);
+
+  // Free Block
+  IRB.SetInsertPoint(FreeBlock, FreeBlock->begin());
+
+  // Free the previously allocate device global memory.
+  Value *LoadMallocPtr =
+      IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), SwLDS);
+
+  FunctionCallee AsanFreeFunc = M.getOrInsertFunction(
+      StringRef("__asan_free_impl"),
+      FunctionType::get(IRB.getVoidTy(), {Int64Ty, Int64Ty}, false));
+  Value *ReturnAddr = IRB.CreateCall(
+      Intrinsic::getDeclaration(&M, Intrinsic::returnaddress), IRB.getInt32(0));
+  Value *RAPToInt = IRB.CreatePtrToInt(ReturnAddr, Int64Ty);
+  Value *MallocPtrToInt = IRB.CreatePtrToInt(LoadMallocPtr, Int64Ty);
+  IRB.CreateCall(AsanFreeFunc, {MallocPtrToInt, RAPToInt});
+
+  IRB.CreateBr(EndBlock);
+
+  // End Block
+  IRB.SetInsertPoint(EndBlock, EndBlock->begin());
+  IRB.CreateRetVoid();
+  // Update the DomTree with corresponding links to basic blocks.
+  DTU.applyUpdates({{DominatorTree::Insert, WIdBlock, MallocBlock},
+                    {DominatorTree::Insert, MallocBlock, PrevEntryBlock},
+                    {DominatorTree::Insert, CondFreeBlock, FreeBlock},
+                    {DominatorTree::Insert, FreeBlock, EndBlock}});
+}
+
+Constant *AMDGPUSwLowerLDS::getAddressesOfVariablesInKernel(
+    Function *Func, SetVector<GlobalVariable *> &Variables) {
+  Type *Int32Ty = IRB.getInt32Ty();
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDSMetadata);
+  auto *SwLDSMetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
+  ArrayType *KernelOffsetsType =
+      ArrayType::get(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), Variables.size());
+
+  SmallVector<Constant *> Elements;
+  for (size_t i = 0; i < Variables.size(); i++) {
+    GlobalVariable *GV = Variables[i];
+    if (!LDSParams.LDSToReplacementIndicesMap.contains(GV)) {
+      Elements.push_back(
+          PoisonValue::get(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS)));
+      continue;
+    }
+    auto &Indices = LDSParams.LDSToReplacementIndicesMap[GV];
+    uint32_t Idx0 = Indices[0];
+    uint32_t Idx1 = Indices[1];
+    uint32_t Idx2 = Indices[2];
+    Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, Idx0),
+                          ConstantInt::get(Int32Ty, Idx1),
+                          ConstantInt::get(Int32Ty, Idx2)};
+    Constant *GEP = ConstantExpr::getGetElementPtr(SwLDSMetadataStructType,
+                                                   SwLDSMetadata, GEPIdx, true);
+    Elements.push_back(GEP);
+  }
+  return ConstantArray::get(KernelOffsetsType, Elements);
+}
+
+void AMDGPUSwLowerLDS::buildNonKernelLDSBaseTable(
+    NonKernelLDSParameters &NKLDSParams) {
+  // Base table will have single row, with elements of the row
+  // placed as per kernel ID. Each element in the row corresponds
+  // to addresss of "SW LDS" global of the kernel.
+  auto &Kernels = NKLDSParams.OrderedKernels;
+  if (Kernels.empty())
+    return;
+  Type *Int32Ty = IRB.getInt32Ty();
+  const size_t NumberKernels = Kernels.size();
+  ArrayType *AllKernelsOffsetsType =
+      ArrayType::get(IRB.getPtrTy(AMDGPUAS::LOCAL_ADDRESS), NumberKernels);
+  std::vector<Constant *> OverallConstantExprElts(NumberKernels);
+  for (size_t i = 0; i < NumberKernels; i++) {
+    Function *Func = Kernels[i];
+    auto &LDSParams = KernelToLDSParametersMap[Func];
+    GlobalVariable *SwLDS = LDSParams.SwLDS;
+    assert(SwLDS);
+    Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, 0)};
+    Constant *GEP =
+        ConstantExpr::getGetElementPtr(SwLDS->getType(), SwLDS, GEPIdx, true);
+    OverallConstantExprElts[i] = GEP;
+  }
+  Constant *init =
+      ConstantArray::get(AllKernelsOffsetsType, OverallConstantExprElts);
+  NKLDSParams.LDSBaseTable = new GlobalVariable(
+      M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init,
+      "llvm.amdgcn.sw.lds.base.table", nullptr, GlobalValue::NotThreadLocal,
+      AMDGPUAS::GLOBAL_ADDRESS);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  NKLDSParams.LDSBaseTable->setSanitizerMetadata(MD);
+}
+
+void AMDGPUSwLowerLDS::buildNonKernelLDSOffsetTable(
+    NonKernelLDSParameters &NKLDSParams) {
+  // Offset table will have multiple rows and columns.
+  // Rows are assumed to be from 0 to (n-1). n is total number
+  // of kernels accessing the LDS through non-kernels.
+  // Each row will have m elements. m is the total number of
+  // unique LDS globals accessed by non-kernels.
+  // Each element in the row correspond to the address of
+  // the replacement of LDS global done by that particular kernel.
+  auto &Variables = NKLDSParams.OrdereLDSGlobals;
+  auto &Kernels = NKLDSParams.OrderedKernels;
+  if (Variables.empty() || Kernels.empty())
+    return;
+  const size_t NumberVariables = Variables.size();
+  const size_t NumberKernels = Kernels.size();
+
+  ArrayType *KernelOffsetsType =
+      ArrayType::get(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), NumberVariables);
+
+  ArrayType *AllKernelsOffsetsType =
+      ArrayType::get(KernelOffsetsType, NumberKernels);
+  std::vector<Constant *> overallConstantExprElts(NumberKernels);
+  for (size_t i = 0; i < NumberKernels; i++) {
+    Function *Func = Kernels[i];
+    overallConstantExprElts[i] =
+        getAddressesOfVariablesInKernel(Func, Variables);
+  }
+  Constant *Init =
+      ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts);
+  NKLDSParams.LDSOffsetTable = new GlobalVariable(
+      M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, Init,
+      "llvm.amdgcn.sw.lds.offset.table", nullptr, GlobalValue::NotThreadLocal,
+      AMDGPUAS::GLOBAL_ADDRESS);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  NKLDSParams.LDSOffsetTable->setSanitizerMetadata(MD);
+}
+
+void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
+    Function *Func, SetVector<GlobalVariable *> &LDSGlobals,
+    NonKernelLDSParameters &NKLDSParams) {
+  // Replace LDS access in non-kernel with replacement queried from
+  // Base table and offset from offset table.
+  LLVM_DEBUG(dbgs() << "Sw LDS lowering, lower non-kernel access for : "
+                    << Func->getName());
+  auto InsertAt = Func->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
+  IRB.SetInsertPoint(InsertAt);
+  Function *Decl =
+      Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
+  auto *KernelId = IRB.CreateCall(Decl, {});
+  GlobalVariable *LDSBaseTable = NKLDSParams.LDSBaseTable;
+  GlobalVariable *LDSOffsetTable = NKLDSParams.LDSOffsetTable;
+  auto &OrdereLDSGlobals = NKLDSParams.OrdereLDSGlobals;
+  assert(LDSBaseTable && LDSOffsetTable);
+  Value *BaseGEP = IRB.CreateInBoundsGEP(
+      LDSBaseTable->getValueType(), LDSBaseTable, {IRB.getInt32(0), KernelId});
+  Value *BaseLoad =
+      IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::LOCAL_ADDRESS), BaseGEP);
+
+  for (GlobalVariable *GV : LDSGlobals) {
+    auto GVIt = std::find(OrdereLDSGlobals.begin(), OrdereLDSGlobals.end(), GV);
+    assert(GVIt != OrdereLDSGlobals.end());
+    uint32_t GVOffset = std::distance(OrdereLDSGlobals.begin(), GVIt);
+
+    Value *OffsetGEP = IRB.CreateInBoundsGEP(
+        LDSOffsetTable->getValueType(), LDSOffsetTable,
+        {IRB.getInt32(0), KernelId, IRB.getInt32(GVOffset)});
+    Value *OffsetLoad =
+        IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), OffsetGEP);
+    Value *Offset = IRB.CreateLoad(IRB.getInt32Ty(), OffsetLoad);
+    Value *BasePlusOffset =
+        IRB.CreateInBoundsGEP(IRB.getInt8Ty(), BaseLoad, {Offset});
+    LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replace non-kernel LDS for "
+                      << GV->getName());
+    replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
+  }
+}
+
+static void reorderStaticDynamicIndirectLDSSet(KernelLDSParameters &LDSParams) {
+  // Sort Static, dynamic LDS globals which are either
+  // direct or indirect access on basis of name.
+  auto &DirectAccess = LDSParams.DirectAccess;
+  auto &IndirectAccess = LDSParams.IndirectAccess;
+  LDSParams.DirectAccess.StaticLDSGlobals = sortByName(
+      std::vector<GlobalVariable *>(DirectAccess.StaticLDSGlobals.begin(),
+                                    DirectAccess.StaticLDSGlobals.end()));
+  LDSParams.DirectAccess.DynamicLDSGlobals = sortByName(
+      std::vector<GlobalVariable *>(DirectAccess.DynamicLDSGlobals.begin(),
+                                    DirectAccess.DynamicLDSGlobals.end()));
+  LDSParams.IndirectAccess.StaticLDSGlobals = sortByName(
+      std::vector<GlobalVariable *>(IndirectAccess.StaticLDSGlobals.begin(),
+                                    IndirectAccess.StaticLDSGlobals.end()));
+  LDSParams.IndirectAccess.DynamicLDSGlobals = sortByName(
+      std::vector<GlobalVariable *>(IndirectAccess.DynamicLDSGlobals.begin(),
+                                    IndirectAccess.DynamicLDSGlobals.end()));
+}
+
+bool AMDGPUSwLowerLDS::run() {
+  bool Changed = false;
+  CallGraph CG = CallGraph(M);
+  SetVector<Function *> KernelsWithIndirectLDSAccess;
+  FunctionVariableMap NonKernelToLDSAccessMap;
+  SetVector<GlobalVariable *> AllNonKernelLDSAccess;
+
+  Changed |= eliminateConstantExprUsesOfLDSFromAllInstructions(M);
+
+  // Get all the direct and indirect access of LDS for all the kernels.
+  LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M);
+
+  // Utility to group LDS access into direct, indirect, static and dynamic.
+  auto PopulateKernelStaticDynamicLDS = [&](FunctionVariableMap &LDSAccesses,
+                                            bool DirectAccess) {
+    for (auto &K : LDSAccesses) {
+      Function *F = K.first;
+      if (!F || K.second.empty())
+        continue;
+
+      assert(isKernelLDS(F));
+
+      if (!F->hasFnAttribute(Attribute::SanitizeAddress))
+        continue;
+
+      if (!KernelToLDSParametersMap.contains(F)) {
+        KernelLDSParameters KernelLDSParams;
+        KernelToLDSParametersMap[F] = KernelLDSParams;
+      }
+
+      auto &LDSParams = KernelToLDSParametersMap[F];
+      if (!DirectAccess)
+        KernelsWithIndirectLDSAccess.insert(F);
+      for (GlobalVariable *GV : K.second) {
+        if (!DirectAccess) {
+          if (AMDGPU::isDynamicLDS(*GV))
+            LDSParams.IndirectAccess.DynamicLDSGlobals.insert(GV);
+          else
+            LDSParams.IndirectAccess.StaticLDSGlobals.insert(GV);
+          AllNonKernelLDSAccess.insert(GV);
+        } else {
+          if (AMDGPU::isDynamicLDS(*GV))
+            LDSParams.DirectAccess.DynamicLDSGlobals.insert(GV);
+          else
+            LDSParams.DirectAccess.StaticLDSGlobals.insert(GV);
+        }
+      }
+    }
+  };
+
+  PopulateKernelStaticDynamicLDS(LDSUsesInfo.direct_access, true);
+  PopulateKernelStaticDynamicLDS(LDSUsesInfo.indirect_access, false);
+
+  for (auto &K : KernelToLDSParametersMap) {
+    Function *Func = K.first;
+    auto &LDSParams = KernelToLDSParametersMap[Func];
+    if (LDSParams.DirectAccess.StaticLDSGlobals.empty() &&
+        LDSParams.DirectAccess.DynamicLDSGlobals.empty() &&
+        LDSParams.IndirectAccess.StaticLDSGlobals.empty() &&
+        LDSParams.IndirectAccess.DynamicLDSGlobals.empty()) {
+      Changed = false;
+    } else {
+      removeFnAttrFromReachable(CG, Func,
+                                {"amdgpu-no-workitem-id-x",
+                                 "amdgpu-no-workitem-id-y",
+                                 "amdgpu-no-workitem-id-z"});
+      reorderStaticDynamicIndirectLDSSet(LDSParams);
+      buildSwLDSGlobal(Func);
+      buildSwDynLDSGlobal(Func);
+      populateSwMetadataGlobal(Func);
+      populateSwLDSAttributeAndMetadata(Func);
+      populateLDSToReplacementIndicesMap(Func);
+      DomTreeUpdater DTU(DTCallback(*Func),
+                         DomTreeUpdater::UpdateStrategy::Lazy);
+      lowerKernelLDSAccesses(Func, DTU);
+      Changed = true;
+    }
+  }
+
+  // Get the Uses of LDS from non-kernels.
+  getUsesOfLDSByNonKernels(CG, NonKernelToLDSAccessMap, AllNonKernelLDSAccess);
+
+  if (!NonKernelToLDSAccessMap.empty()) {
+    NonKernelLDSParameters NKLDSParams;
+    NKLDSParams.OrderedKernels = getOrderedIndirectLDSAccessingKernels(
+        std::move(KernelsWithIndirectLDSAccess));
+    NKLDSParams.OrdereLDSGlobals =
+        getOrderedNonKernelAllLDSGlobals(std::move(AllNonKernelLDSAccess));
+    buildNonKernelLDSBaseTable(NKLDSParams);
+    buildNonKernelLDSOffsetTable(NKLDSParams);
+    for (auto &K : NonKernelToLDSAccessMap) {
+      Function *Func = K.first;
+      if (!Func->hasFnAttribute(Attribute::SanitizeAddress))
+        continue;
+      DenseSet<GlobalVariable *> &LDSGlobals = K.second;
+      SetVector<GlobalVariable *> OrderedLDSGlobals = sortByName(
+          std::vector<GlobalVariable *>(LDSGlobals.begin(), LDSGlobals.end()));
+      lowerNonKernelLDSAccesses(Func, OrderedLDSGlobals, NKLDSParams);
+    }
+    Changed = true;
+  }
+
+  if (!Changed)
+    return Changed;
+
+  for (auto &GV : make_early_inc_range(M.globals())) {
+    if (AMDGPU::isLDSVariableToLower(GV)) {
+      // probably want to remove from used lists
+      GV.removeDeadConstantUsers();
+      if (GV.use_empty())
+        GV.eraseFromParent();
+    }
+  }
+  return Changed;
+}
+
+class AMDGPUSwLowerLDSLegacy : public ModulePass {
+public:
+  static char ID;
+  AMDGPUSwLowerLDSLegacy() : ModulePass(ID) {}
+  bool runOnModule(Module &M) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addPreserved<DominatorTreeWrapperPass>();
+  }
+};
+} // namespace
+
+char AMDGPUSwLowerLDSLegacy::ID = 0;
+char &llvm::AMDGPUSwLowerLDSLegacyPassID = AMDGPUSwLowerLDSLegacy::ID;
+
+INITIALIZE_PASS(AMDGPUSwLowerLDSLegacy, "amdgpu-sw-lower-lds",
+                "AMDGPU Software lowering of LDS", false, false)
+
+bool AMDGPUSwLowerLDSLegacy::runOnModule(Module &M) {
+  DominatorTreeWrapperPass *const DTW =
+      getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+  auto DTCallback = [&DTW](Function &F) -> DominatorTree * {
+    return DTW ? &DTW->getDomTree() : nullptr;
+  };
+  AMDGPUSwLowerLDS SwLowerLDSImpl(M, DTCallback);
+  bool IsChanged = SwLowerLDSImpl.run();
+  return IsChanged;
+}
+
+ModulePass *llvm::createAMDGPUSwLowerLDSLegacyPass() {
+  return new AMDGPUSwLowerLDSLegacy();
+}
+
+PreservedAnalyses AMDGPUSwLowerLDSPass::run(Module &M,
+                                            ModuleAnalysisManager &AM) {
+  auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+  auto DTCallback = [&FAM](Function &F) -> DominatorTree * {
+    return &FAM.getResult<DominatorTreeAnalysis>(F);
+  };
+  AMDGPUSwLowerLDS SwLowerLDSImpl(M, DTCallback);
+  bool IsChanged = SwLowerLDSImpl.run();
+  if (!IsChanged)
+    return PreservedAnalyses::all();
+
+  PreservedAnalyses PA;
+  PA.preserve<DominatorTreeAnalysis>();
+  return PA;
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index ce997c659094a..87ddfcfaefc95 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -406,6 +406,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
   initializeSILoadStoreOptimizerPass(*PR);
   initializeAMDGPUCtorDtorLoweringLegacyPass(*PR);
   initializeAMDGPUAlwaysInlinePass(*PR);
+  initializeAMDGPUSwLowerLDSLegacyPass(*PR);
   initializeAMDGPUAttributorLegacyPass(*PR);
   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
   initializeAMDGPUAnnotateUniformValuesPass(*PR);
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index c992352cb78da..ee56c48982648 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -74,6 +74,7 @@ add_llvm_target(AMDGPUCodeGen
   AMDGPULowerKernelArguments.cpp
   AMDGPULowerKernelAttributes.cpp
   AMDGPULowerModuleLDSPass.cpp
+  AMDGPUSwLowerLDS.cpp
   AMDGPUMachineCFGStructurizer.cpp
   AMDGPUMachineFunction.cpp
   AMDGPUMachineModuleInfo.cpp
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
new file mode 100644
index 0000000000000..7fb2366f9843c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check indirect dynamic LDS access through a non-kernel from kernel is lowered correctly.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 2
+ at lds_3 = external addrspace(3) global [0 x i8], align 4
+ at lds_4 = external addrspace(3) global [0 x i8], align 8
+
+define void @use_variables() sanitize_address {
+; CHECK-LABEL: define void @use_variables(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i8 3, ptr addrspace(3) @lds_3, align 4
+  store i8 3, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP27:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP27]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 2), align 4
+; CHECK-NEXT:    [[TMP28:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP28]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP23]])
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP35]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       26:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
+; CHECK-NEXT:    [[TMP29:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP29]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
+; CHECK-NEXT:    call void @use_variables()
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP30]], align 2
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP32:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64
+; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP34]], i64 [[TMP33]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables()
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+;.
+; CHECK: [[META2]] = !{i32 0}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
new file mode 100644
index 0000000000000..be8044d8dae07
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check if direct access of dynamic LDS in kernel is lowered correctly.
+ at lds_1 = external addrspace(3) global [0 x i8]
+ at lds_2 = external addrspace(3) global [0 x i8]
+
+;.
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 1, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.k0.dynlds = external addrspace(3) global [0 x i8], no_sanitize_address, align 1, !absolute_symbol [[META1:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 0, i32 0 } }, no_sanitize_address
+;.
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = add i32 [[TMP8]], [[TMP9]]
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP20]], i64 15
+; CHECK-NEXT:    store i32 [[TMP24]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(4) [[TMP18]], align 4
+; CHECK-NEXT:    store i32 [[TMP13]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = udiv i32 [[TMP14]], 1
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], 1
+; CHECK-NEXT:    store i32 [[TMP16]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP24]], [[TMP16]]
+; CHECK-NEXT:    [[TMP21:%.*]] = zext i32 [[TMP17]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP21]], i64 [[TMP23]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       21:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP11]], align 4
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP28:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP25:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = ptrtoint ptr addrspace(1) [[TMP28]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP27]], i64 [[TMP26]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  store i8 7, ptr addrspace(3) @lds_1, align 4
+  ;store i8 8, ptr addrspace(3) @lds_2, align 8
+  ret void
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="8,8" }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+; CHECK: [[META1]] = !{i32 8, i32 9}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
new file mode 100644
index 0000000000000..3deba8ce138a8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check when multiple  kernels access the same non-kernel, LDS accesses are lowere correctly.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 2
+ at lds_3 = external addrspace(3) global [0 x i8], align 4
+ at lds_4 = external addrspace(3) global [0 x i8], align 8
+
+define void @use_variables_1() sanitize_address {
+; CHECK-LABEL: define void @use_variables_1(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 2
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 3
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i8 3, ptr addrspace(3) @lds_3, align 4
+  store i8 3, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+
+define void @use_variables_2() sanitize_address {
+; CHECK-LABEL: define void @use_variables_2(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP15]], align 2
+; CHECK-NEXT:    ret void
+;
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP25]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP25]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
+; CHECK-NEXT:    [[TMP26:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP27:%.*]] = zext i32 [[TMP26]] to i64
+; CHECK-NEXT:    [[TMP28:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP28]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP27]], i64 [[TMP33]])
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP24]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       26:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
+; CHECK-NEXT:    call void @use_variables_1()
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 1
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP31:%.*]] = ptrtoint ptr [[TMP30]] to i64
+; CHECK-NEXT:    [[TMP32:%.*]] = ptrtoint ptr addrspace(1) [[TMP29]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP32]], i64 [[TMP31]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables_1()
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  ret void
+}
+
+define amdgpu_kernel void @k1() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k1(
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META4:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 2, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP30:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP30]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP33:%.*]] = add i32 [[TMP30]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP33]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 2), align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP8]], [[TMP19]]
+; CHECK-NEXT:    store i32 [[TMP20]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP21]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 1), align 4
+; CHECK-NEXT:    [[TMP22:%.*]] = add i32 [[TMP21]], 7
+; CHECK-NEXT:    [[TMP23:%.*]] = udiv i32 [[TMP22]], 8
+; CHECK-NEXT:    [[TMP24:%.*]] = mul i32 [[TMP23]], 8
+; CHECK-NEXT:    store i32 [[TMP24]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 2), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = add i32 [[TMP20]], [[TMP24]]
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP28:%.*]] = ptrtoint ptr [[TMP27]] to i64
+; CHECK-NEXT:    [[TMP34:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP28]])
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP34]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       26:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP31:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, i32 [[TMP31]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k1.dynlds) ]
+; CHECK-NEXT:    call void @use_variables_1()
+; CHECK-NEXT:    call void @use_variables_2()
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP32]], align 4
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
+; CHECK-NEXT:    [[TMP35:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP36:%.*]] = ptrtoint ptr [[TMP35]] to i64
+; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr addrspace(1) [[TMP29]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP37]], i64 [[TMP36]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables_1()
+  call void @use_variables_2()
+  store i8 3, ptr addrspace(3) @lds_3, align 4
+  ret void
+}
+;.
+; CHECK: [[META3]] = !{i32 0}
+; CHECK: [[META4]] = !{i32 1}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
new file mode 100644
index 0000000000000..4384287797f35
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check malloc and free blocks are placed correctly when multiple
+; blocks and branching is present in the function with LDS accesses lowered correctly.
+
+ at lds_1 = internal addrspace(3) global i32 poison
+ at lds_2 = internal addrspace(3) global i32 poison
+
+;.
+; CHECK: @llvm.amdgcn.sw.lds.test_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.test_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.test_kernel.md.type { %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 8, i32 4, i32 4 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 12, i32 4, i32 4 } }, no_sanitize_address
+;.
+define amdgpu_kernel void @test_kernel() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @test_kernel(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 2, i32 2), align 4
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP15]], [[TMP16]]
+; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP18]] to i64
+; CHECK-NEXT:    [[TMP14:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP17]], i64 [[TMP19]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP20]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       14:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, i32 [[TMP10]]
+; CHECK-NEXT:    [[TMP25:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP26:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP12:%.*]] = addrspacecast ptr addrspace(3) [[TMP11]] to ptr addrspace(1)
+; CHECK-NEXT:    [[VAL1:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = addrspacecast ptr addrspace(3) [[TMP26]] to ptr addrspace(1)
+; CHECK-NEXT:    [[VAL2:%.*]] = load i32, ptr addrspace(1) [[TMP13]], align 4
+; CHECK-NEXT:    [[RESULT:%.*]] = add i32 [[VAL1]], [[VAL2]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[RESULT]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[POSITIVE:%.*]], label [[NEGATIVE:%.*]]
+; CHECK:       positive:
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       negative:
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sgt i32 [[VAL1]], 0
+; CHECK-NEXT:    br i1 [[CMP2]], label [[VAL1_POSITIVE:%.*]], label [[VAL1_NEGATIVE:%.*]]
+; CHECK:       val1_positive:
+; CHECK-NEXT:    br label [[CONDFREE]]
+; CHECK:       val1_negative:
+; CHECK-NEXT:    br label [[CONDFREE]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP21:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr addrspace(1) [[TMP21]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP24]], i64 [[TMP23]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+%val1 = load i32, ptr addrspace(1) addrspacecast (ptr addrspace(3) @lds_1 to ptr addrspace(1))
+%val2 = load i32, ptr addrspace(1) addrspacecast (ptr addrspace(3) @lds_2 to ptr addrspace(1))
+
+%result = add i32 %val1, %val2
+%cmp = icmp sgt i32 %result, 0
+br i1 %cmp, label %positive, label %negative
+
+positive:
+ret void
+
+negative:
+%cmp2 = icmp sgt i32 %val1, 0
+br i1 %cmp2, label %val1_positive, label %val1_negative
+
+val1_positive:
+ret void
+
+val1_negative:
+ret void
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="16" }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
new file mode 100644
index 0000000000000..2548c872347b8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
@@ -0,0 +1,106 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check if static and dynamic LDS accesses are lowered correctly when a non-kernel
+; is called from kernel.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 2
+ at lds_3 = external addrspace(3) global [0 x i8], align 4
+ at lds_4 = external addrspace(3) global [0 x i8], align 8
+
+define void @use_variables() sanitize_address {
+; CHECK-LABEL: define void @use_variables(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i8 3, ptr addrspace(3) @lds_3, align 4
+  store i8 3, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP27:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP27]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 2), align 4
+; CHECK-NEXT:    [[TMP28:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP28]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP23]])
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP35]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       26:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
+; CHECK-NEXT:    [[TMP29:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP29]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
+; CHECK-NEXT:    call void @use_variables()
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP30]], align 2
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP32:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64
+; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP34]], i64 [[TMP33]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables()
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+;.
+; CHECK: [[META2]] = !{i32 0}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
new file mode 100644
index 0000000000000..35f96fd21ee55
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check if static and dynamic LDS accesses are lowered correctly in kernel.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
+ at lds_3 = external addrspace(3) global [0 x i8], align 4
+ at lds_4 = external addrspace(3) global [0 x i8], align 8
+
+;.
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.k0.dynlds = external addrspace(3) global [0 x i8], no_sanitize_address, align 8, !absolute_symbol [[META1:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 4, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 24, i32 0, i32 0 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 24, i32 0, i32 0 } }, no_sanitize_address
+;.
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP31:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP31]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP31]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 2), align 4
+; CHECK-NEXT:    [[TMP32:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP30:%.*]] = zext i32 [[TMP32]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP30]], i64 [[TMP23]])
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP39]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       26:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
+; CHECK-NEXT:    [[TMP26:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP26]]
+; CHECK-NEXT:    [[TMP28:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP28]]
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP33]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 4
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP27]], align 8
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP29]], align 4
+; CHECK-NEXT:    store i8 8, ptr addrspace(3) [[TMP34]], align 8
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP35:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP36:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr addrspace(1) [[TMP35]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP38]], i64 [[TMP37]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  store i8 7, ptr addrspace(3) @lds_1, align 4
+  store i32 8, ptr addrspace(3) @lds_2, align 8
+  store i8 7, ptr addrspace(3) @lds_3, align 4
+  store i8 8, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="24,24" }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+; CHECK: [[META1]] = !{i32 24, i32 25}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
new file mode 100644
index 0000000000000..5b285f467c5fa
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
@@ -0,0 +1,82 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check if LDS accesses are lowered correctly when LDS is passed as function
+; argument to non-kernel.
+
+ at lds_var = internal addrspace(3) global [1024 x i32] poison, align 4
+
+;.
+; CHECK: @llvm.amdgcn.sw.lds.my_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.my_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.my_kernel.md.type { %llvm.amdgcn.sw.lds.my_kernel.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.my_kernel.md.item { i32 8, i32 4096, i32 4096 } }, no_sanitize_address
+;.
+define void @my_function(ptr addrspace(3) %lds_arg) sanitize_address {
+; CHECK-LABEL: define void @my_function(
+; CHECK-SAME: ptr addrspace(3) [[LDS_ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[LDS_VAL:%.*]] = load i32, ptr addrspace(3) [[LDS_ARG]], align 4
+; CHECK-NEXT:    [[NEW_LDS_VAL:%.*]] = add i32 [[LDS_VAL]], 1
+; CHECK-NEXT:    store i32 [[NEW_LDS_VAL]], ptr addrspace(3) [[LDS_ARG]], align 4
+; CHECK-NEXT:    ret void
+;
+  %lds_val = load i32, ptr addrspace(3) %lds_arg, align 4
+  %new_lds_val = add i32 %lds_val, 1
+  store i32 %new_lds_val, ptr addrspace(3) %lds_arg, align 4
+  ret void
+}
+
+define amdgpu_kernel void @my_kernel() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @my_kernel(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP13:%.*]] = zext i32 [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP10:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP13]], i64 [[TMP15]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP16]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       14:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, i32 [[TMP8]]
+; CHECK-NEXT:    [[LDS_PTR:%.*]] = getelementptr [1024 x i32], ptr addrspace(3) [[TMP9]], i32 0, i32 0
+; CHECK-NEXT:    call void @my_function(ptr addrspace(3) [[LDS_PTR]])
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP17:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
+; CHECK-NEXT:    [[TMP18:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[TMP18]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint ptr addrspace(1) [[TMP17]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP20]], i64 [[TMP19]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  %lds_ptr = getelementptr [1024 x i32], ptr addrspace(3) @lds_var, i32 0, i32 0
+  call void @my_function(ptr addrspace(3) %lds_ptr)
+  ret void
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1]] = { sanitize_address "amdgpu-lds-size"="4104" }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR4:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
new file mode 100644
index 0000000000000..9570c41838eef
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
@@ -0,0 +1,255 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check if LDS accesses are lowered correctly when a call is made to nested non-kernel.
+
+ at A = external addrspace(3) global [8 x ptr]
+ at B = external addrspace(3) global [0 x i32]
+
+define amdgpu_kernel void @kernel_0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP11]], i64 [[TMP14]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       14:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    call void @call_store_A()
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr addrspace(1) [[TMP15]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP18]], i64 [[TMP17]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @call_store_A()
+  ret void
+}
+
+define amdgpu_kernel void @kernel_1() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_1(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP12]], [[TMP20]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP21]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(4) [[TMP7]], align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = add i32 [[TMP8]], 3
+; CHECK-NEXT:    [[TMP10:%.*]] = udiv i32 [[TMP9]], 4
+; CHECK-NEXT:    [[TMP11:%.*]] = mul i32 [[TMP10]], 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP21]], [[TMP11]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP16]], i64 [[TMP18]])
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       21:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel_1.dynlds) ]
+; CHECK-NEXT:    [[PTR:%.*]] = call ptr @get_B_ptr()
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP22:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
+; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = ptrtoint ptr addrspace(1) [[TMP22]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP25]], i64 [[TMP24]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  %ptr = call ptr @get_B_ptr()
+  ret void
+}
+
+define amdgpu_kernel void @kernel_2() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_2(
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META4:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP12]] to i64
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP11]], i64 [[TMP14]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       14:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    call void @store_A()
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr addrspace(1) [[TMP15]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP18]], i64 [[TMP17]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @store_A()
+  ret void
+}
+
+define amdgpu_kernel void @kernel_3() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_3(
+; CHECK-SAME: ) #[[ATTR1]] !llvm.amdgcn.lds.kernel.id [[META5:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP12]], [[TMP20]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP21]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(4) [[TMP7]], align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = add i32 [[TMP8]], 3
+; CHECK-NEXT:    [[TMP10:%.*]] = udiv i32 [[TMP9]], 4
+; CHECK-NEXT:    [[TMP11:%.*]] = mul i32 [[TMP10]], 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP21]], [[TMP11]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP16]], i64 [[TMP18]])
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       21:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel_3.dynlds) ]
+; CHECK-NEXT:    [[PTR:%.*]] = call ptr @get_B_ptr()
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP22:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
+; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = ptrtoint ptr addrspace(1) [[TMP22]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP25]], i64 [[TMP24]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  %ptr = call ptr @get_B_ptr()
+  ret void
+}
+
+define private void @call_store_A() sanitize_address {
+; CHECK-LABEL: define private void @call_store_A(
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT:    call void @store_A()
+; CHECK-NEXT:    ret void
+;
+  call void @store_A()
+  ret void
+}
+
+define private void @store_A() sanitize_address {
+; CHECK-LABEL: define private void @store_A(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
+; CHECK-NEXT:    store ptr [[TMP10]], ptr null, align 8
+; CHECK-NEXT:    ret void
+;
+  store ptr addrspacecast (ptr addrspace(3) @A to ptr), ptr null
+  ret void
+}
+
+define private ptr @get_B_ptr() sanitize_address {
+; CHECK-LABEL: define private ptr @get_B_ptr(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
+; CHECK-NEXT:    ret ptr [[TMP10]]
+;
+  ret ptr addrspacecast (ptr addrspace(3) @B to ptr)
+}
+;.
+; CHECK: [[META2]] = !{i32 0}
+; CHECK: [[META3]] = !{i32 1}
+; CHECK: [[META4]] = !{i32 2}
+; CHECK: [[META5]] = !{i32 3}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
new file mode 100644
index 0000000000000..deb4475878356
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check if static LDS is lowered correctly when a non-kernel with LDS accesses is called from kernel.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 2
+ at lds_3 = external addrspace(3) global [3 x i8], align 4
+ at lds_4 = external addrspace(3) global [4 x i8], align 8
+
+define void @use_variables() sanitize_address {
+; CHECK-LABEL: define void @use_variables(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
+; CHECK-NEXT:    [[X:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP16:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
+; CHECK-NEXT:    store i8 3, ptr [[TMP16]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    ret void
+;
+  %X = addrspacecast ptr addrspace(3) @lds_3 to ptr
+  store i8 3, ptr addrspacecast( ptr addrspace(3) @lds_3 to ptr), align 4
+  store i8 3, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 2), align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = add i32 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP16]] to i64
+; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP12:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP15]], i64 [[TMP24]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP12]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       14:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
+; CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP17]]
+; CHECK-NEXT:    call void @use_variables()
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP11]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP18]], align 2
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr addrspace(1) [[TMP19]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP22]], i64 [[TMP21]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables()
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+;.
+; CHECK: [[META1]] = !{i32 0}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
new file mode 100644
index 0000000000000..4849b804835e0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; Test to check if static LDS accesses in kernel are lowered correctly.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
+
+;.
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 4, i32 8 } }, no_sanitize_address
+;.
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = add i32 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP16]] to i64
+; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP12:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP15]], i64 [[TMP11]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP12]] to ptr addrspace(1)
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       14:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
+; CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP17]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP18]], align 4
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP24]], align 2
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr addrspace(1) [[TMP19]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP22]], i64 [[TMP21]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  store i8 7, ptr addrspace(3) @lds_1, align 4
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="24" }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+;.

>From bf0447e63c140bbe6f3190066f20183916409040 Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Thu, 11 Jul 2024 21:50:09 +0530
Subject: [PATCH 2/2] [AMDGPU] Implement final LDS lowering to global memory

---
 llvm/lib/Target/AMDGPU/AMDGPU.h               |   6 +-
 llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def |   2 +-
 llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp   | 275 ++++++++++++++++--
 ...pu-sw-lower-lds-dynamic-indirect-access.ll |  53 +++-
 .../amdgpu-sw-lower-lds-dynamic-lds-test.ll   |  16 +-
 ...ds-multi-static-dynamic-indirect-access.ll |  71 ++++-
 ...gpu-sw-lower-lds-multiple-blocks-return.ll |  17 +-
 ...ower-lds-static-dynamic-indirect-access.ll |  53 +++-
 ...pu-sw-lower-lds-static-dynamic-lds-test.ll |  41 ++-
 ...s-static-indirect-access-function-param.ll |  14 +-
 ...lower-lds-static-indirect-access-nested.ll | 132 +++++----
 ...gpu-sw-lower-lds-static-indirect-access.ll |  47 ++-
 .../amdgpu-sw-lower-lds-static-lds-test.ll    |  25 +-
 13 files changed, 606 insertions(+), 146 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index 4391aa2e861f8..2bd2622eef713 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -265,10 +265,12 @@ struct AMDGPUAlwaysInlinePass : PassInfoMixin<AMDGPUAlwaysInlinePass> {
 
 void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &);
 extern char &AMDGPUSwLowerLDSLegacyPassID;
-ModulePass *createAMDGPUSwLowerLDSLegacyPass();
+ModulePass *
+createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM = nullptr);
 
 struct AMDGPUSwLowerLDSPass : PassInfoMixin<AMDGPUSwLowerLDSPass> {
-  AMDGPUSwLowerLDSPass() {}
+  const AMDGPUTargetMachine &TM;
+  AMDGPUSwLowerLDSPass(const AMDGPUTargetMachine &TM_) : TM(TM_) {}
   PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
 };
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index 80f9249f25368..a3167a7ec5e65 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -22,7 +22,7 @@ MODULE_PASS("amdgpu-lower-buffer-fat-pointers",
             AMDGPULowerBufferFatPointersPass(*this))
 MODULE_PASS("amdgpu-lower-ctor-dtor", AMDGPUCtorDtorLoweringPass())
 MODULE_PASS("amdgpu-lower-module-lds", AMDGPULowerModuleLDSPass(*this))
-MODULE_PASS("amdgpu-sw-lower-lds", AMDGPUSwLowerLDSPass())
+MODULE_PASS("amdgpu-sw-lower-lds", AMDGPUSwLowerLDSPass(*this))
 MODULE_PASS("amdgpu-printf-runtime-binding", AMDGPUPrintfRuntimeBindingPass())
 MODULE_PASS("amdgpu-unify-metadata", AMDGPUUnifyMetadataPass())
 #undef MODULE_PASS
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
index b7f27c40faf12..d61faa039e4ca 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
@@ -7,9 +7,8 @@
 //===----------------------------------------------------------------------===//
 //
 // This pass lowers the local data store, LDS, uses in kernel and non-kernel
-// functions in module and packs them together as single allocation.
-// Packed LDS Layout is emulated in the dynamically allocated device global
-// memory.
+// functions in module to use dynamically allocated global memory.
+// Packed LDS Layout is emulated in the global memory.
 //
 // Replacement of Kernel LDS accesses:
 //    For a kernel, LDS access can be static or dynamic which are direct
@@ -80,6 +79,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "AMDGPU.h"
+#include "AMDGPUTargetMachine.h"
 #include "Utils/AMDGPUMemoryUtils.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
@@ -89,7 +89,11 @@
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Analysis/CallGraph.h"
 #include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
 #include "llvm/IR/Constants.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -98,6 +102,7 @@
 #include "llvm/InitializePasses.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
 #include "llvm/Transforms/Utils/ModuleUtils.h"
 
 #include <algorithm>
@@ -129,21 +134,24 @@ struct KernelLDSParameters {
   DenseMap<GlobalVariable *, SmallVector<uint32_t, 3>>
       LDSToReplacementIndicesMap;
   uint32_t MallocSize = 0;
+  uint32_t LDSSize = 0;
+  SmallVector<std::pair<uint32_t, uint32_t>, 64> RedzoneOffsetAndSizeVector;
 };
 
 // Struct to store infor for creation of offset table
 // for all the non-kernel LDS accesses.
 struct NonKernelLDSParameters {
-  GlobalVariable *LDSBaseTable{nullptr};
-  GlobalVariable *LDSOffsetTable{nullptr};
+  GlobalVariable *LDSBaseTable = nullptr;
+  GlobalVariable *LDSOffsetTable = nullptr;
   SetVector<Function *> OrderedKernels;
   SetVector<GlobalVariable *> OrdereLDSGlobals;
 };
 
 class AMDGPUSwLowerLDS {
 public:
-  AMDGPUSwLowerLDS(Module &Mod, DomTreeCallback Callback)
-      : M(Mod), IRB(M.getContext()), DTCallback(Callback) {}
+  AMDGPUSwLowerLDS(Module &Mod, const AMDGPUTargetMachine &TM,
+                   DomTreeCallback Callback)
+      : M(Mod), AMDGPUTM(TM), IRB(M.getContext()), DTCallback(Callback) {}
   bool run();
   void
   getUsesOfLDSByNonKernels(const CallGraph &CG, FunctionVariableMap &Functions,
@@ -157,7 +165,15 @@ class AMDGPUSwLowerLDS {
   void populateSwMetadataGlobal(Function *Func);
   void populateSwLDSAttributeAndMetadata(Function *Func);
   void populateLDSToReplacementIndicesMap(Function *Func);
+  void getLDSMemoryInstructions(Function *Func,
+                                SetVector<Instruction *> &LDSInstructions);
   void replaceKernelLDSAccesses(Function *Func);
+  Value *getTranslatedGlobalMemoryGEPOfLDSPointer(Value *LoadMallocPtr,
+                                                  Value *LDSPtr);
+  void translateLDSMemoryOperationsToGlobalMemory(
+      Function *Func, Value *LoadMallocPtr,
+      SetVector<Instruction *> &LDSInstructions);
+  void poisonRedzones(Function *Func, Value *MallocPtr);
   void lowerKernelLDSAccesses(Function *Func, DomTreeUpdater &DTU);
   void buildNonKernelLDSOffsetTable(NonKernelLDSParameters &NKLDSParams);
   void buildNonKernelLDSBaseTable(NonKernelLDSParameters &NKLDSParams);
@@ -174,6 +190,7 @@ class AMDGPUSwLowerLDS {
 
 private:
   Module &M;
+  const AMDGPUTargetMachine &AMDGPUTM;
   IRBuilder<> IRB;
   DomTreeCallback DTCallback;
   DenseMap<Function *, KernelLDSParameters> KernelToLDSParametersMap;
@@ -223,11 +240,6 @@ void AMDGPUSwLowerLDS::getUsesOfLDSByNonKernels(
     if (!AMDGPU::isLDSVariableToLower(*GV))
       continue;
 
-    if (GV->isAbsoluteSymbolRef()) {
-      report_fatal_error(
-          "LDS variables with absolute addresses are unimplemented.");
-    }
-
     for (User *V : GV->users()) {
       if (auto *I = dyn_cast<Instruction>(V)) {
         Function *F = I->getFunction();
@@ -313,19 +325,60 @@ void AMDGPUSwLowerLDS::buildSwDynLDSGlobal(Function *Func) {
 void AMDGPUSwLowerLDS::populateSwLDSAttributeAndMetadata(Function *Func) {
   auto &LDSParams = KernelToLDSParametersMap[Func];
   bool IsDynLDSUsed = LDSParams.SwDynLDS ? true : false;
-  uint32_t Offset = LDSParams.MallocSize;
+  uint32_t Offset = LDSParams.LDSSize;
   recordLDSAbsoluteAddress(M, LDSParams.SwLDS, 0);
   addLDSSizeAttribute(Func, Offset, IsDynLDSUsed);
   if (LDSParams.SwDynLDS)
     recordLDSAbsoluteAddress(M, LDSParams.SwDynLDS, Offset);
 }
 
+static uint64_t getRedzoneSizeForScale(int AsanScale) {
+  return std::max(32U, 1U << AsanScale);
+}
+
+static uint64_t getMinRedzoneSizeForGlobal(int AsanScale) {
+  return getRedzoneSizeForScale(AsanScale);
+}
+
+static uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes) {
+  constexpr uint64_t kMaxRZ = 1 << 18;
+  const uint64_t MinRZ = getMinRedzoneSizeForGlobal(AsanScale);
+
+  uint64_t RZ = 0;
+  if (SizeInBytes <= MinRZ / 2) {
+    // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
+    // at least 32 bytes, optimize when SizeInBytes is less than or equal to
+    // half of MinRZ.
+    RZ = MinRZ - SizeInBytes;
+  } else {
+    // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
+    RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
+
+    // Round up to multiple of MinRZ.
+    if (SizeInBytes % MinRZ)
+      RZ += MinRZ - (SizeInBytes % MinRZ);
+  }
+
+  assert((RZ + SizeInBytes) % MinRZ == 0);
+
+  return RZ;
+}
+
 void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
   // Create new metadata global for every kernel and initialize the
   // start offsets and sizes corresponding to each LDS accesses.
   auto &LDSParams = KernelToLDSParametersMap[Func];
   auto &Ctx = M.getContext();
   auto &DL = M.getDataLayout();
+
+  // Get Shadow mapping scale.
+  uint64_t ShadowBase;
+  int AsanScale;
+  bool OrShadowOffset;
+  unsigned LongSize = M.getDataLayout().getPointerSizeInBits();
+  getAddressSanitizerParams(Triple(AMDGPUTM.getTargetTriple()), LongSize, false,
+                            &ShadowBase, &AsanScale, &OrShadowOffset);
+
   std::vector<Type *> Items;
   Type *Int32Ty = IRB.getInt32Ty();
   std::vector<Constant *> Initializers;
@@ -369,10 +422,21 @@ void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
           Items.push_back(LDSItemTy);
           Constant *ItemStartOffset = ConstantInt::get(Int32Ty, MallocSize);
           Constant *SizeInBytesConst = ConstantInt::get(Int32Ty, SizeInBytes);
-          uint64_t AlignedSize = alignTo(SizeInBytes, MaxAlignment);
+          // Get redzone size corresponding a size.
+          const uint64_t RightRedzoneSize =
+              getRedzoneSizeForGlobal(AsanScale, SizeInBytes);
+          // Update MallocSize with current size and redzone size.
+          MallocSize += SizeInBytes;
+          LDSParams.RedzoneOffsetAndSizeVector.emplace_back(MallocSize,
+                                                            RightRedzoneSize);
+          MallocSize += RightRedzoneSize;
+          // Align current size plus redzone.
+          uint64_t AlignedSize =
+              alignTo(SizeInBytes + RightRedzoneSize, MaxAlignment);
           Constant *AlignedSizeInBytesConst =
               ConstantInt::get(Int32Ty, AlignedSize);
-          MallocSize += AlignedSize;
+          // Align MallocSize
+          MallocSize = alignTo(MallocSize, MaxAlignment);
           Constant *InitItem =
               ConstantStruct::get(LDSItemTy, {ItemStartOffset, SizeInBytesConst,
                                               AlignedSizeInBytesConst});
@@ -387,10 +451,14 @@ void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
   buildInitializerForSwLDSMD(LDSParams.DirectAccess.DynamicLDSGlobals);
   buildInitializerForSwLDSMD(LDSParams.IndirectAccess.DynamicLDSGlobals);
 
+  // Update the LDS size used by the kernel.
+  Type *Ty = LDSParams.SwLDS->getValueType();
+  const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
+  uint64_t AlignedSize = alignTo(SizeInBytes, MaxAlignment);
+  LDSParams.LDSSize = AlignedSize;
   SmallString<128> MDTypeStr;
   raw_svector_ostream MDTypeOS(MDTypeStr);
   MDTypeOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md.type";
-
   StructType *MetadataStructType =
       StructType::create(Ctx, Items, MDTypeOS.str());
   SmallString<128> MDStr;
@@ -555,12 +623,129 @@ void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
   }
 }
 
+static DebugLoc getOrCreateDebugLoc(const Instruction *InsertBefore,
+                                    DISubprogram *SP) {
+  assert(InsertBefore);
+  if (InsertBefore->getDebugLoc())
+    return InsertBefore->getDebugLoc();
+  if (SP)
+    return DILocation::get(SP->getContext(), SP->getLine(), 1, SP);
+  return DebugLoc();
+}
+
+void AMDGPUSwLowerLDS::getLDSMemoryInstructions(
+    Function *Func, SetVector<Instruction *> &LDSInstructions) {
+  for (BasicBlock &BB : *Func) {
+    for (Instruction &Inst : BB) {
+      if (LoadInst *LI = dyn_cast<LoadInst>(&Inst)) {
+        if (LI->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
+          LDSInstructions.insert(&Inst);
+      } else if (StoreInst *SI = dyn_cast<StoreInst>(&Inst)) {
+        if (SI->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
+          LDSInstructions.insert(&Inst);
+      } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&Inst)) {
+        if (RMW->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
+          LDSInstructions.insert(&Inst);
+      } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(&Inst)) {
+        if (XCHG->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS)
+          LDSInstructions.insert(&Inst);
+      } else
+        continue;
+    }
+  }
+}
+
+Value *
+AMDGPUSwLowerLDS::getTranslatedGlobalMemoryGEPOfLDSPointer(Value *LoadMallocPtr,
+                                                           Value *LDSPtr) {
+  assert(LDSPtr && "Invalid LDS pointer operand");
+  Value *PtrToInt = IRB.CreatePtrToInt(LDSPtr, IRB.getInt32Ty());
+  Value *GEP =
+      IRB.CreateInBoundsGEP(IRB.getInt8Ty(), LoadMallocPtr, {PtrToInt});
+  return GEP;
+}
+
+void AMDGPUSwLowerLDS::translateLDSMemoryOperationsToGlobalMemory(
+    Function *Func, Value *LoadMallocPtr,
+    SetVector<Instruction *> &LDSInstructions) {
+  LLVM_DEBUG(dbgs() << "Translating LDS memory operations to global memory : "
+                    << Func->getName());
+  for (Instruction *Inst : LDSInstructions) {
+    IRB.SetInsertPoint(Inst);
+    if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+      Value *LIOperand = LI->getPointerOperand();
+      Value *Replacement =
+          getTranslatedGlobalMemoryGEPOfLDSPointer(LoadMallocPtr, LIOperand);
+      Value *NewLI = IRB.CreateAlignedLoad(LI->getType(), Replacement,
+                                           LI->getAlign(), LI->isVolatile());
+      LI->replaceAllUsesWith(NewLI);
+      LI->eraseFromParent();
+    } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+      Value *SIOperand = SI->getPointerOperand();
+      Value *Replacement =
+          getTranslatedGlobalMemoryGEPOfLDSPointer(LoadMallocPtr, SIOperand);
+      Value *NewSI = IRB.CreateAlignedStore(SI->getValueOperand(), Replacement,
+                                            SI->getAlign(), SI->isVolatile());
+      SI->replaceAllUsesWith(NewSI);
+      SI->eraseFromParent();
+    } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) {
+      Value *RMWPtrOperand = RMW->getPointerOperand();
+      Value *RMWValOperand = RMW->getValOperand();
+      Value *Replacement = getTranslatedGlobalMemoryGEPOfLDSPointer(
+          LoadMallocPtr, RMWPtrOperand);
+      Value *NewRMW = IRB.CreateAtomicRMW(
+          RMW->getOperation(), Replacement, RMWValOperand, RMW->getAlign(),
+          RMW->getOrdering(), RMW->getSyncScopeID());
+      cast<AtomicRMWInst>(NewRMW)->setVolatile(RMW->isVolatile());
+      RMW->replaceAllUsesWith(NewRMW);
+      RMW->eraseFromParent();
+    } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(Inst)) {
+      Value *XCHGPtrOperand = XCHG->getPointerOperand();
+      Value *Replacement = getTranslatedGlobalMemoryGEPOfLDSPointer(
+          LoadMallocPtr, XCHGPtrOperand);
+      Value *NewXCHG = IRB.CreateAtomicCmpXchg(
+          Replacement, XCHG->getCompareOperand(), XCHG->getNewValOperand(),
+          XCHG->getAlign(), XCHG->getSuccessOrdering(),
+          XCHG->getFailureOrdering(), XCHG->getSyncScopeID());
+      cast<AtomicRMWInst>(NewXCHG)->setVolatile(XCHG->isVolatile());
+      XCHG->replaceAllUsesWith(NewXCHG);
+      XCHG->eraseFromParent();
+    } else
+      report_fatal_error("Unimplemented LDS lowering instruction");
+  }
+  return;
+}
+
+void AMDGPUSwLowerLDS::poisonRedzones(Function *Func, Value *MallocPtr) {
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  Type *Int64Ty = IRB.getInt64Ty();
+  Type *VoidTy = IRB.getVoidTy();
+  FunctionCallee AsanPoisonRegion = M.getOrInsertFunction(
+      StringRef("__asan_poison_region"),
+      FunctionType::get(VoidTy, {Int64Ty, Int64Ty}, false));
+
+  auto RedzonesVec = LDSParams.RedzoneOffsetAndSizeVector;
+  size_t VecSize = RedzonesVec.size();
+  for (unsigned i = 0; i < VecSize; i++) {
+    auto &RedzonePair = RedzonesVec[i];
+    uint64_t RedzoneOffset = RedzonePair.first;
+    uint64_t RedzoneSize = RedzonePair.second;
+    Value *RedzoneAddrOffset = IRB.CreateInBoundsGEP(
+        IRB.getInt8Ty(), MallocPtr, {IRB.getInt64(RedzoneOffset)});
+    Value *RedzoneAddress = IRB.CreatePtrToInt(RedzoneAddrOffset, Int64Ty);
+    IRB.CreateCall(AsanPoisonRegion,
+                   {RedzoneAddress, IRB.getInt64(RedzoneSize)});
+  }
+}
+
 void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
                                               DomTreeUpdater &DTU) {
   LLVM_DEBUG(dbgs() << "Sw Lowering Kernel LDS for : " << Func->getName());
   auto &LDSParams = KernelToLDSParametersMap[Func];
   auto &Ctx = M.getContext();
   auto *PrevEntryBlock = &Func->getEntryBlock();
+  SetVector<Instruction *> LDSInstructions;
+  getLDSMemoryInstructions(Func, LDSInstructions);
 
   // Create malloc block.
   auto *MallocBlock = BasicBlock::Create(Ctx, "Malloc", Func, PrevEntryBlock);
@@ -569,6 +754,9 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   // {0,0,0} indiex work item in the work group.
   auto *WIdBlock = BasicBlock::Create(Ctx, "WId", Func, MallocBlock);
   IRB.SetInsertPoint(WIdBlock, WIdBlock->begin());
+  DebugLoc FirstDL =
+      getOrCreateDebugLoc(&*PrevEntryBlock->begin(), Func->getSubprogram());
+  IRB.SetCurrentDebugLocation(FirstDL);
   auto *const WIdx =
       IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {}, {});
   auto *const WIdy =
@@ -652,7 +840,7 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   CurrMallocSize = IRB.CreateZExt(CurrMallocSize, Int64Ty);
 
   // Create a call to malloc function which does device global memory allocation
-  // with size equals to all LDS global accesses size  in this kernel.
+  // with size equals to all LDS global accesses size in this kernel.
   Value *ReturnAddress = IRB.CreateCall(
       Intrinsic::getDeclaration(&M, Intrinsic::returnaddress), IRB.getInt32(0));
   FunctionCallee AsanMallocFunc = M.getOrInsertFunction(
@@ -667,6 +855,9 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   // create store of malloc to new global
   IRB.CreateStore(MallocPtr, SwLDS);
 
+  // create calls to __asan_poison_region to poison redzones.
+  poisonRedzones(Func, MallocPtr);
+
   // Create branch to PrevEntryBlock
   IRB.CreateBr(PrevEntryBlock);
 
@@ -679,8 +870,18 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
 
   IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}, {});
 
+  // Load malloc pointer from Sw LDS.
+  Value *LoadMallocPtr =
+      IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), SwLDS);
+
+  // Replace All uses of LDS globals with new LDS pointers.
   replaceKernelLDSAccesses(Func);
 
+  // Replace Memory Operations on LDS with corresponding
+  // global memory pointers.
+  translateLDSMemoryOperationsToGlobalMemory(Func, LoadMallocPtr,
+                                             LDSInstructions);
+
   auto *CondFreeBlock = BasicBlock::Create(Ctx, "CondFree", Func);
   auto *FreeBlock = BasicBlock::Create(Ctx, "Free", Func);
   auto *EndBlock = BasicBlock::Create(Ctx, "End", Func);
@@ -703,9 +904,6 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   IRB.SetInsertPoint(FreeBlock, FreeBlock->begin());
 
   // Free the previously allocate device global memory.
-  Value *LoadMallocPtr =
-      IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), SwLDS);
-
   FunctionCallee AsanFreeFunc = M.getOrInsertFunction(
       StringRef("__asan_free_impl"),
       FunctionType::get(IRB.getVoidTy(), {Int64Ty, Int64Ty}, false));
@@ -842,6 +1040,11 @@ void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
                     << Func->getName());
   auto InsertAt = Func->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
   IRB.SetInsertPoint(InsertAt);
+
+  // Get LDS memory instructions.
+  SetVector<Instruction *> LDSInstructions;
+  getLDSMemoryInstructions(Func, LDSInstructions);
+
   Function *Decl =
       Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
   auto *KernelId = IRB.CreateCall(Decl, {});
@@ -853,6 +1056,8 @@ void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
       LDSBaseTable->getValueType(), LDSBaseTable, {IRB.getInt32(0), KernelId});
   Value *BaseLoad =
       IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::LOCAL_ADDRESS), BaseGEP);
+  Value *LoadMallocPtr =
+      IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), BaseLoad);
 
   for (GlobalVariable *GV : LDSGlobals) {
     auto GVIt = std::find(OrdereLDSGlobals.begin(), OrdereLDSGlobals.end(), GV);
@@ -871,6 +1076,8 @@ void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
                       << GV->getName());
     replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
   }
+  translateLDSMemoryOperationsToGlobalMemory(Func, LoadMallocPtr,
+                                             LDSInstructions);
 }
 
 static void reorderStaticDynamicIndirectLDSSet(KernelLDSParameters &LDSParams) {
@@ -913,7 +1120,6 @@ bool AMDGPUSwLowerLDS::run() {
         continue;
 
       assert(isKernelLDS(F));
-
       if (!F->hasFnAttribute(Attribute::SanitizeAddress))
         continue;
 
@@ -999,7 +1205,6 @@ bool AMDGPUSwLowerLDS::run() {
 
   for (auto &GV : make_early_inc_range(M.globals())) {
     if (AMDGPU::isLDSVariableToLower(GV)) {
-      // probably want to remove from used lists
       GV.removeDeadConstantUsers();
       if (GV.use_empty())
         GV.eraseFromParent();
@@ -1010,8 +1215,12 @@ bool AMDGPUSwLowerLDS::run() {
 
 class AMDGPUSwLowerLDSLegacy : public ModulePass {
 public:
+  const AMDGPUTargetMachine *AMDGPUTM;
   static char ID;
-  AMDGPUSwLowerLDSLegacy() : ModulePass(ID) {}
+  AMDGPUSwLowerLDSLegacy(const AMDGPUTargetMachine *TM)
+      : ModulePass(ID), AMDGPUTM(TM) {
+    initializeAMDGPUSwLowerLDSLegacyPass(*PassRegistry::getPassRegistry());
+  }
   bool runOnModule(Module &M) override;
   void getAnalysisUsage(AnalysisUsage &AU) const override {
     AU.addPreserved<DominatorTreeWrapperPass>();
@@ -1022,8 +1231,11 @@ class AMDGPUSwLowerLDSLegacy : public ModulePass {
 char AMDGPUSwLowerLDSLegacy::ID = 0;
 char &llvm::AMDGPUSwLowerLDSLegacyPassID = AMDGPUSwLowerLDSLegacy::ID;
 
-INITIALIZE_PASS(AMDGPUSwLowerLDSLegacy, "amdgpu-sw-lower-lds",
-                "AMDGPU Software lowering of LDS", false, false)
+INITIALIZE_PASS_BEGIN(AMDGPUSwLowerLDSLegacy, "amdgpu-sw-lower-lds",
+                      "AMDGPU Software lowering of LDS", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(AMDGPUSwLowerLDSLegacy, "amdgpu-sw-lower-lds",
+                    "AMDGPU Software lowering of LDS", false, false)
 
 bool AMDGPUSwLowerLDSLegacy::runOnModule(Module &M) {
   DominatorTreeWrapperPass *const DTW =
@@ -1031,13 +1243,18 @@ bool AMDGPUSwLowerLDSLegacy::runOnModule(Module &M) {
   auto DTCallback = [&DTW](Function &F) -> DominatorTree * {
     return DTW ? &DTW->getDomTree() : nullptr;
   };
-  AMDGPUSwLowerLDS SwLowerLDSImpl(M, DTCallback);
+  if (!AMDGPUTM) {
+    auto &TPC = getAnalysis<TargetPassConfig>();
+    AMDGPUTM = &TPC.getTM<AMDGPUTargetMachine>();
+  }
+  AMDGPUSwLowerLDS SwLowerLDSImpl(M, *AMDGPUTM, DTCallback);
   bool IsChanged = SwLowerLDSImpl.run();
   return IsChanged;
 }
 
-ModulePass *llvm::createAMDGPUSwLowerLDSLegacyPass() {
-  return new AMDGPUSwLowerLDSLegacy();
+ModulePass *
+llvm::createAMDGPUSwLowerLDSLegacyPass(const AMDGPUTargetMachine *TM) {
+  return new AMDGPUSwLowerLDSLegacy(TM);
 }
 
 PreservedAnalyses AMDGPUSwLowerLDSPass::run(Module &M,
@@ -1046,7 +1263,7 @@ PreservedAnalyses AMDGPUSwLowerLDSPass::run(Module &M,
   auto DTCallback = [&FAM](Function &F) -> DominatorTree * {
     return &FAM.getResult<DominatorTreeAnalysis>(F);
   };
-  AMDGPUSwLowerLDS SwLowerLDSImpl(M, DTCallback);
+  AMDGPUSwLowerLDS SwLowerLDSImpl(M, TM, DTCallback);
   bool IsChanged = SwLowerLDSImpl.run();
   if (!IsChanged)
     return PreservedAnalyses::all();
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
index 7fb2366f9843c..000b19697eab8 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
 ; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check indirect dynamic LDS access through a non-kernel from kernel is lowered correctly.
@@ -7,12 +7,20 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
+;.
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.k0.dynlds = external addrspace(3) global [0 x i8], no_sanitize_address, align 8, !absolute_symbol [[META1:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 32, i32 1, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 64, i32 4, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 96, i32 0, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 128, i32 0, i32 32 } }, no_sanitize_address
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(1) constant [1 x ptr addrspace(3)] [ptr addrspace(3) @llvm.amdgcn.sw.lds.k0], no_sanitize_address
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(1) constant [1 x [2 x ptr addrspace(1)]] [[2 x ptr addrspace(1)] [ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0)]], no_sanitize_address
+;.
 define void @use_variables() sanitize_address {
 ; CHECK-LABEL: define void @use_variables(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(3) [[TMP4]], align 8
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
@@ -21,8 +29,12 @@ define void @use_variables() sanitize_address {
 ; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr addrspace(3) [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP13]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(1) [[TMP14]], align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr addrspace(3) [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP17]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(1) [[TMP16]], align 8
 ; CHECK-NEXT:    ret void
 ;
   store i8 3, ptr addrspace(3) @lds_3, align 4
@@ -69,24 +81,43 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP35:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP23]])
 ; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP35]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 8
+; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr addrspace(1) [[TMP36]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP37]], i64 24)
+; CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 33
+; CHECK-NEXT:    [[TMP39:%.*]] = ptrtoint ptr addrspace(1) [[TMP38]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP39]], i64 31)
+; CHECK-NEXT:    [[TMP40:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 68
+; CHECK-NEXT:    [[TMP41:%.*]] = ptrtoint ptr addrspace(1) [[TMP40]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP41]], i64 28)
+; CHECK-NEXT:    [[TMP46:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 96
+; CHECK-NEXT:    [[TMP47:%.*]] = ptrtoint ptr addrspace(1) [[TMP46]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP47]], i64 32)
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 128
+; CHECK-NEXT:    [[TMP49:%.*]] = ptrtoint ptr addrspace(1) [[TMP48]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP49]], i64 32)
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       26:
+; CHECK:       36:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
 ; CHECK-NEXT:    [[TMP29:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
 ; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP29]]
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
 ; CHECK-NEXT:    call void @use_variables()
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 1
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP30]], align 2
+; CHECK-NEXT:    [[TMP42:%.*]] = ptrtoint ptr addrspace(3) [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP43:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP31]], i32 [[TMP42]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP43]], align 1
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr addrspace(3) [[TMP30]] to i32
+; CHECK-NEXT:    [[TMP45:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP31]], i32 [[TMP44]]
+; CHECK-NEXT:    store i32 8, ptr addrspace(1) [[TMP45]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP32:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64
 ; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
@@ -101,5 +132,13 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1]] = { sanitize_address "amdgpu-lds-size"="8,8" }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR4:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+; CHECK: [[META1]] = !{i32 8, i32 9}
 ; CHECK: [[META2]] = !{i32 0}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
index be8044d8dae07..acac6da2dbb11 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
@@ -8,7 +8,7 @@
 ;.
 ; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 1, !absolute_symbol [[META0:![0-9]+]]
 ; CHECK: @llvm.amdgcn.k0.dynlds = external addrspace(3) global [0 x i8], no_sanitize_address, align 1, !absolute_symbol [[META1:![0-9]+]]
-; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 0, i32 0 } }, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 32, i32 0, i32 32 } }, no_sanitize_address
 ;.
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
@@ -41,20 +41,28 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP21]], i64 [[TMP23]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 8
+; CHECK-NEXT:    [[TMP32:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP32]], i64 24)
+; CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 32
+; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP33]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP34]], i64 32)
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       21:
+; CHECK:       25:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP28:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP11]], align 4
+; CHECK-NEXT:    [[TMP29:%.*]] = ptrtoint ptr addrspace(3) [[TMP11]] to i32
+; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP28]], i32 [[TMP29]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP30]], align 4
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP28:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP25:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[TMP25]] to i64
 ; CHECK-NEXT:    [[TMP27:%.*]] = ptrtoint ptr addrspace(1) [[TMP28]] to i64
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
index 3deba8ce138a8..4f143af698c81 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
@@ -7,12 +7,15 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(1) constant [2 x ptr addrspace(3)] [ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1], no_sanitize_address
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(1) constant [2 x [4 x ptr addrspace(1)]] [[4 x ptr addrspace(1)] [ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), ptr addrspace(1) poison, ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0)], [4 x ptr addrspace(1)] [ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 0), ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 2, i32 0), ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 0)]], no_sanitize_address
 define void @use_variables_1() sanitize_address {
 ; CHECK-LABEL: define void @use_variables_1(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(3) [[TMP4]], align 8
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 2
 ; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
@@ -21,8 +24,12 @@ define void @use_variables_1() sanitize_address {
 ; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr addrspace(3) [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP13]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(1) [[TMP14]], align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr addrspace(3) [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP17]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(1) [[TMP16]], align 8
 ; CHECK-NEXT:    ret void
 ;
   store i8 3, ptr addrspace(3) @lds_3, align 4
@@ -36,6 +43,7 @@ define void @use_variables_2() sanitize_address {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(3) [[TMP4]], align 8
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
@@ -44,8 +52,12 @@ define void @use_variables_2() sanitize_address {
 ; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 1
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP15]], align 2
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr addrspace(3) [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP13]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP14]], align 1
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr addrspace(3) [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP17]]
+; CHECK-NEXT:    store i32 8, ptr addrspace(1) [[TMP16]], align 2
 ; CHECK-NEXT:    ret void
 ;
   store i8 7, ptr addrspace(3) @lds_1, align 1
@@ -55,7 +67,7 @@ define void @use_variables_2() sanitize_address {
 
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -92,21 +104,35 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP24:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP27]], i64 [[TMP33]])
 ; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP24]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 8
+; CHECK-NEXT:    [[TMP35:%.*]] = ptrtoint ptr addrspace(1) [[TMP34]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP35]], i64 24)
+; CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 33
+; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr addrspace(1) [[TMP36]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP37]], i64 31)
+; CHECK-NEXT:    [[TMP40:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 64
+; CHECK-NEXT:    [[TMP41:%.*]] = ptrtoint ptr addrspace(1) [[TMP40]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP41]], i64 32)
+; CHECK-NEXT:    [[TMP42:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 96
+; CHECK-NEXT:    [[TMP43:%.*]] = ptrtoint ptr addrspace(1) [[TMP42]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP43]], i64 32)
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       26:
+; CHECK:       34:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
 ; CHECK-NEXT:    call void @use_variables_1()
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 1
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr addrspace(3) [[TMP23]] to i32
+; CHECK-NEXT:    [[TMP39:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP29]], i32 [[TMP38]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP39]], align 1
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP30:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP31:%.*]] = ptrtoint ptr [[TMP30]] to i64
 ; CHECK-NEXT:    [[TMP32:%.*]] = ptrtoint ptr addrspace(1) [[TMP29]] to i64
@@ -122,7 +148,7 @@ define amdgpu_kernel void @k0() sanitize_address {
 
 define amdgpu_kernel void @k1() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k1(
-; CHECK-SAME: ) #[[ATTR2:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META4:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR1]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -159,22 +185,39 @@ define amdgpu_kernel void @k1() sanitize_address {
 ; CHECK-NEXT:    [[TMP34:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP28]])
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP34]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
+; CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 8
+; CHECK-NEXT:    [[TMP39:%.*]] = ptrtoint ptr addrspace(1) [[TMP38]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP39]], i64 24)
+; CHECK-NEXT:    [[TMP42:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 33
+; CHECK-NEXT:    [[TMP43:%.*]] = ptrtoint ptr addrspace(1) [[TMP42]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP43]], i64 31)
+; CHECK-NEXT:    [[TMP44:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 68
+; CHECK-NEXT:    [[TMP45:%.*]] = ptrtoint ptr addrspace(1) [[TMP44]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP45]], i64 28)
+; CHECK-NEXT:    [[TMP46:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 96
+; CHECK-NEXT:    [[TMP47:%.*]] = ptrtoint ptr addrspace(1) [[TMP46]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP47]], i64 32)
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 128
+; CHECK-NEXT:    [[TMP49:%.*]] = ptrtoint ptr addrspace(1) [[TMP48]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP49]], i64 32)
 ; CHECK-NEXT:    br label [[TMP14]]
-; CHECK:       26:
+; CHECK:       36:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
 ; CHECK-NEXT:    [[TMP31:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
 ; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, i32 [[TMP31]]
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k1.dynlds) ]
 ; CHECK-NEXT:    call void @use_variables_1()
 ; CHECK-NEXT:    call void @use_variables_2()
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP32]], align 4
+; CHECK-NEXT:    [[TMP40:%.*]] = ptrtoint ptr addrspace(3) [[TMP32]] to i32
+; CHECK-NEXT:    [[TMP41:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP29]], i32 [[TMP40]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(1) [[TMP41]], align 4
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
 ; CHECK-NEXT:    [[TMP35:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP36:%.*]] = ptrtoint ptr [[TMP35]] to i64
 ; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr addrspace(1) [[TMP29]] to i64
@@ -189,6 +232,6 @@ define amdgpu_kernel void @k1() sanitize_address {
   ret void
 }
 ;.
-; CHECK: [[META3]] = !{i32 0}
-; CHECK: [[META4]] = !{i32 1}
+; CHECK: [[META2]] = !{i32 0}
+; CHECK: [[META3]] = !{i32 1}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
index 4384287797f35..fa0e688f61dad 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
@@ -9,7 +9,7 @@
 
 ;.
 ; CHECK: @llvm.amdgcn.sw.lds.test_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4, !absolute_symbol [[META0:![0-9]+]]
-; CHECK: @llvm.amdgcn.sw.lds.test_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.test_kernel.md.type { %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 8, i32 4, i32 4 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 12, i32 4, i32 4 } }, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.test_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.test_kernel.md.type { %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 32, i32 4, i32 32 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 64, i32 4, i32 32 } }, no_sanitize_address
 ;.
 define amdgpu_kernel void @test_kernel() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @test_kernel(
@@ -32,10 +32,20 @@ define amdgpu_kernel void @test_kernel() sanitize_address {
 ; CHECK-NEXT:    [[TMP20:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP17]], i64 [[TMP19]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP20]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
+; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 8
+; CHECK-NEXT:    [[TMP28:%.*]] = ptrtoint ptr addrspace(1) [[TMP27]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP28]], i64 24)
+; CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 36
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr addrspace(1) [[TMP29]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP30]], i64 28)
+; CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 68
+; CHECK-NEXT:    [[TMP32:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP32]], i64 28)
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       14:
+; CHECK:       20:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP21:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, i32 [[TMP10]]
 ; CHECK-NEXT:    [[TMP25:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 2, i32 0), align 4
@@ -60,7 +70,6 @@ define amdgpu_kernel void @test_kernel() sanitize_address {
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP21:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
 ; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
 ; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr addrspace(1) [[TMP21]] to i64
@@ -90,7 +99,7 @@ val1_negative:
 ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="16" }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="8" }
 ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
 ; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
 ; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
index 2548c872347b8..31deb499dd18a 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
 ; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if static and dynamic LDS accesses are lowered correctly when a non-kernel
@@ -8,12 +8,20 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
+;.
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.k0.dynlds = external addrspace(3) global [0 x i8], no_sanitize_address, align 8, !absolute_symbol [[META1:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 32, i32 1, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 64, i32 4, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 96, i32 0, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 128, i32 0, i32 32 } }, no_sanitize_address
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(1) constant [1 x ptr addrspace(3)] [ptr addrspace(3) @llvm.amdgcn.sw.lds.k0], no_sanitize_address
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(1) constant [1 x [2 x ptr addrspace(1)]] [[2 x ptr addrspace(1)] [ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0)]], no_sanitize_address
+;.
 define void @use_variables() sanitize_address {
 ; CHECK-LABEL: define void @use_variables(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(3) [[TMP4]], align 8
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
@@ -22,8 +30,12 @@ define void @use_variables() sanitize_address {
 ; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    [[TMP13:%.*]] = ptrtoint ptr addrspace(3) [[TMP9]] to i32
+; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP13]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(1) [[TMP14]], align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr addrspace(3) [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP17]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(1) [[TMP16]], align 8
 ; CHECK-NEXT:    ret void
 ;
   store i8 3, ptr addrspace(3) @lds_3, align 4
@@ -70,24 +82,43 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP35:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP23]])
 ; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP35]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP36:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 8
+; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr addrspace(1) [[TMP36]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP37]], i64 24)
+; CHECK-NEXT:    [[TMP38:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 33
+; CHECK-NEXT:    [[TMP39:%.*]] = ptrtoint ptr addrspace(1) [[TMP38]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP39]], i64 31)
+; CHECK-NEXT:    [[TMP40:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 68
+; CHECK-NEXT:    [[TMP41:%.*]] = ptrtoint ptr addrspace(1) [[TMP40]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP41]], i64 28)
+; CHECK-NEXT:    [[TMP46:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 96
+; CHECK-NEXT:    [[TMP47:%.*]] = ptrtoint ptr addrspace(1) [[TMP46]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP47]], i64 32)
+; CHECK-NEXT:    [[TMP48:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 128
+; CHECK-NEXT:    [[TMP49:%.*]] = ptrtoint ptr addrspace(1) [[TMP48]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP49]], i64 32)
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       26:
+; CHECK:       36:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
 ; CHECK-NEXT:    [[TMP29:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
 ; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP29]]
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
 ; CHECK-NEXT:    call void @use_variables()
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 1
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP30]], align 2
+; CHECK-NEXT:    [[TMP42:%.*]] = ptrtoint ptr addrspace(3) [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP43:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP31]], i32 [[TMP42]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP43]], align 1
+; CHECK-NEXT:    [[TMP44:%.*]] = ptrtoint ptr addrspace(3) [[TMP30]] to i32
+; CHECK-NEXT:    [[TMP45:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP31]], i32 [[TMP44]]
+; CHECK-NEXT:    store i32 8, ptr addrspace(1) [[TMP45]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP32:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64
 ; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
@@ -102,5 +133,13 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1]] = { sanitize_address "amdgpu-lds-size"="8,8" }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR4:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+; CHECK: [[META1]] = !{i32 8, i32 9}
 ; CHECK: [[META2]] = !{i32 0}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
index 35f96fd21ee55..b82c22e840ab0 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
@@ -10,7 +10,7 @@
 ;.
 ; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
 ; CHECK: @llvm.amdgcn.k0.dynlds = external addrspace(3) global [0 x i8], no_sanitize_address, align 8, !absolute_symbol [[META1:![0-9]+]]
-; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 4, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 24, i32 0, i32 0 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 24, i32 0, i32 0 } }, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 32, i32 1, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 64, i32 4, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 96, i32 0, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 128, i32 0, i32 32 } }, no_sanitize_address
 ;.
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
@@ -51,10 +51,26 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP39:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP30]], i64 [[TMP23]])
 ; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP39]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP40:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 8
+; CHECK-NEXT:    [[TMP41:%.*]] = ptrtoint ptr addrspace(1) [[TMP40]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP41]], i64 24)
+; CHECK-NEXT:    [[TMP42:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 33
+; CHECK-NEXT:    [[TMP43:%.*]] = ptrtoint ptr addrspace(1) [[TMP42]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP43]], i64 31)
+; CHECK-NEXT:    [[TMP44:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 68
+; CHECK-NEXT:    [[TMP45:%.*]] = ptrtoint ptr addrspace(1) [[TMP44]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP45]], i64 28)
+; CHECK-NEXT:    [[TMP54:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 96
+; CHECK-NEXT:    [[TMP55:%.*]] = ptrtoint ptr addrspace(1) [[TMP54]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP55]], i64 32)
+; CHECK-NEXT:    [[TMP56:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP20]], i64 128
+; CHECK-NEXT:    [[TMP57:%.*]] = ptrtoint ptr addrspace(1) [[TMP56]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP57]], i64 32)
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       26:
+; CHECK:       36:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP35:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
 ; CHECK-NEXT:    [[TMP26:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
@@ -64,16 +80,23 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
 ; CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP33]]
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 4
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP27]], align 8
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP29]], align 4
-; CHECK-NEXT:    store i8 8, ptr addrspace(3) [[TMP34]], align 8
+; CHECK-NEXT:    [[TMP46:%.*]] = ptrtoint ptr addrspace(3) [[TMP25]] to i32
+; CHECK-NEXT:    [[TMP47:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP35]], i32 [[TMP46]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP47]], align 4
+; CHECK-NEXT:    [[TMP48:%.*]] = ptrtoint ptr addrspace(3) [[TMP27]] to i32
+; CHECK-NEXT:    [[TMP49:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP35]], i32 [[TMP48]]
+; CHECK-NEXT:    store i32 8, ptr addrspace(1) [[TMP49]], align 8
+; CHECK-NEXT:    [[TMP50:%.*]] = ptrtoint ptr addrspace(3) [[TMP29]] to i32
+; CHECK-NEXT:    [[TMP51:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP35]], i32 [[TMP50]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP51]], align 4
+; CHECK-NEXT:    [[TMP52:%.*]] = ptrtoint ptr addrspace(3) [[TMP34]] to i32
+; CHECK-NEXT:    [[TMP53:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP35]], i32 [[TMP52]]
+; CHECK-NEXT:    store i8 8, ptr addrspace(1) [[TMP53]], align 8
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP35:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP36:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr [[TMP36]] to i64
 ; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr addrspace(1) [[TMP35]] to i64
@@ -89,11 +112,11 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="24,24" }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="8,8" }
 ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
 ; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
 ; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
 ;.
 ; CHECK: [[META0]] = !{i32 0, i32 1}
-; CHECK: [[META1]] = !{i32 24, i32 25}
+; CHECK: [[META1]] = !{i32 8, i32 9}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
index 5b285f467c5fa..27c04e39991c4 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
@@ -8,7 +8,7 @@
 
 ;.
 ; CHECK: @llvm.amdgcn.sw.lds.my_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4, !absolute_symbol [[META0:![0-9]+]]
-; CHECK: @llvm.amdgcn.sw.lds.my_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.my_kernel.md.type { %llvm.amdgcn.sw.lds.my_kernel.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.my_kernel.md.item { i32 8, i32 4096, i32 4096 } }, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.my_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.my_kernel.md.type { %llvm.amdgcn.sw.lds.my_kernel.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.my_kernel.md.item { i32 32, i32 4096, i32 5120 } }, no_sanitize_address
 ;.
 define void @my_function(ptr addrspace(3) %lds_arg) sanitize_address {
 ; CHECK-LABEL: define void @my_function(
@@ -45,10 +45,17 @@ define amdgpu_kernel void @my_kernel() sanitize_address {
 ; CHECK-NEXT:    [[TMP16:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP13]], i64 [[TMP15]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP16]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 8
+; CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr addrspace(1) [[TMP21]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP22]], i64 24)
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 4128
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr addrspace(1) [[TMP23]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP24]], i64 1024)
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       14:
+; CHECK:       18:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP17:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, i32 [[TMP8]]
 ; CHECK-NEXT:    [[LDS_PTR:%.*]] = getelementptr [1024 x i32], ptr addrspace(3) [[TMP9]], i32 0, i32 0
@@ -58,7 +65,6 @@ define amdgpu_kernel void @my_kernel() sanitize_address {
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP17:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
 ; CHECK-NEXT:    [[TMP18:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[TMP18]] to i64
 ; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint ptr addrspace(1) [[TMP17]] to i64
@@ -73,7 +79,7 @@ define amdgpu_kernel void @my_kernel() sanitize_address {
 }
 ;.
 ; CHECK: attributes #[[ATTR0]] = { sanitize_address }
-; CHECK: attributes #[[ATTR1]] = { sanitize_address "amdgpu-lds-size"="4104" }
+; CHECK: attributes #[[ATTR1]] = { sanitize_address "amdgpu-lds-size"="8" }
 ; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
 ; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
 ; CHECK: attributes #[[ATTR4:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
index 9570c41838eef..19502d15a1f6c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if LDS accesses are lowered correctly when a call is made to nested non-kernel.
@@ -9,15 +9,15 @@
 define amdgpu_kernel void @kernel_0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_0(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
-; CHECK-NEXT:  WId:
+; CHECK-NEXT:  [[WID:.*]]:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
-; CHECK:       Malloc:
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[MALLOC:.*]], label %[[BB18:.*]]
+; CHECK:       [[MALLOC]]:
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 1, i32 2), align 4
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP9]], [[TMP10]]
@@ -27,23 +27,29 @@ define amdgpu_kernel void @kernel_0() sanitize_address {
 ; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP11]], i64 [[TMP14]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
-; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       14:
-; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr addrspace(1) [[TMP20]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP21]], i64 24)
+; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 96
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr addrspace(1) [[TMP22]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP23]], i64 32)
+; CHECK-NEXT:    br label %[[BB18]]
+; CHECK:       [[BB18]]:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, %[[WID]] ], [ true, %[[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
 ; CHECK-NEXT:    call void @call_store_A()
-; CHECK-NEXT:    br label [[CONDFREE:%.*]]
-; CHECK:       CondFree:
+; CHECK-NEXT:    br label %[[CONDFREE:.*]]
+; CHECK:       [[CONDFREE]]:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
-; CHECK:       Free:
-; CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
+; CHECK-NEXT:    br i1 [[XYZCOND]], label %[[FREE:.*]], label %[[END:.*]]
+; CHECK:       [[FREE]]:
 ; CHECK-NEXT:    [[TMP16:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64
 ; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr addrspace(1) [[TMP15]] to i64
 ; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP18]], i64 [[TMP17]])
-; CHECK-NEXT:    br label [[END]]
-; CHECK:       End:
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
 ; CHECK-NEXT:    ret void
 ;
   call void @call_store_A()
@@ -53,15 +59,15 @@ define amdgpu_kernel void @kernel_0() sanitize_address {
 define amdgpu_kernel void @kernel_1() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_1(
 ; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
-; CHECK-NEXT:  WId:
+; CHECK-NEXT:  [[WID:.*]]:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
-; CHECK:       Malloc:
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[MALLOC:.*]], label %[[BB25:.*]]
+; CHECK:       [[MALLOC]]:
 ; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, align 4
 ; CHECK-NEXT:    [[TMP20:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 2), align 4
 ; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP12]], [[TMP20]]
@@ -81,24 +87,30 @@ define amdgpu_kernel void @kernel_1() sanitize_address {
 ; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP16]], i64 [[TMP18]])
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
-; CHECK-NEXT:    br label [[TMP14]]
-; CHECK:       21:
-; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    [[TMP26:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 8
+; CHECK-NEXT:    [[TMP27:%.*]] = ptrtoint ptr addrspace(1) [[TMP26]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP27]], i64 24)
+; CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 32
+; CHECK-NEXT:    [[TMP29:%.*]] = ptrtoint ptr addrspace(1) [[TMP28]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP29]], i64 32)
+; CHECK-NEXT:    br label %[[BB25]]
+; CHECK:       [[BB25]]:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, %[[WID]] ], [ true, %[[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP22:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel_1.dynlds) ]
 ; CHECK-NEXT:    [[PTR:%.*]] = call ptr @get_B_ptr()
-; CHECK-NEXT:    br label [[CONDFREE:%.*]]
-; CHECK:       CondFree:
+; CHECK-NEXT:    br label %[[CONDFREE:.*]]
+; CHECK:       [[CONDFREE]]:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
-; CHECK:       Free:
-; CHECK-NEXT:    [[TMP22:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
+; CHECK-NEXT:    br i1 [[XYZCOND]], label %[[FREE:.*]], label %[[END:.*]]
+; CHECK:       [[FREE]]:
 ; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[TMP23]] to i64
 ; CHECK-NEXT:    [[TMP25:%.*]] = ptrtoint ptr addrspace(1) [[TMP22]] to i64
 ; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP25]], i64 [[TMP24]])
-; CHECK-NEXT:    br label [[END]]
-; CHECK:       End:
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
 ; CHECK-NEXT:    ret void
 ;
   %ptr = call ptr @get_B_ptr()
@@ -108,15 +120,15 @@ define amdgpu_kernel void @kernel_1() sanitize_address {
 define amdgpu_kernel void @kernel_2() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_2(
 ; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META4:![0-9]+]] {
-; CHECK-NEXT:  WId:
+; CHECK-NEXT:  [[WID:.*]]:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
-; CHECK:       Malloc:
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[MALLOC:.*]], label %[[BB18:.*]]
+; CHECK:       [[MALLOC]]:
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 1, i32 2), align 4
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP9]], [[TMP10]]
@@ -126,23 +138,29 @@ define amdgpu_kernel void @kernel_2() sanitize_address {
 ; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP11]], i64 [[TMP14]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
-; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       14:
-; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 8
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr addrspace(1) [[TMP20]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP21]], i64 24)
+; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 96
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr addrspace(1) [[TMP22]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP23]], i64 32)
+; CHECK-NEXT:    br label %[[BB18]]
+; CHECK:       [[BB18]]:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, %[[WID]] ], [ true, %[[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
 ; CHECK-NEXT:    call void @store_A()
-; CHECK-NEXT:    br label [[CONDFREE:%.*]]
-; CHECK:       CondFree:
+; CHECK-NEXT:    br label %[[CONDFREE:.*]]
+; CHECK:       [[CONDFREE]]:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
-; CHECK:       Free:
-; CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
+; CHECK-NEXT:    br i1 [[XYZCOND]], label %[[FREE:.*]], label %[[END:.*]]
+; CHECK:       [[FREE]]:
 ; CHECK-NEXT:    [[TMP16:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64
 ; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr addrspace(1) [[TMP15]] to i64
 ; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP18]], i64 [[TMP17]])
-; CHECK-NEXT:    br label [[END]]
-; CHECK:       End:
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
 ; CHECK-NEXT:    ret void
 ;
   call void @store_A()
@@ -152,15 +170,15 @@ define amdgpu_kernel void @kernel_2() sanitize_address {
 define amdgpu_kernel void @kernel_3() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_3(
 ; CHECK-SAME: ) #[[ATTR1]] !llvm.amdgcn.lds.kernel.id [[META5:![0-9]+]] {
-; CHECK-NEXT:  WId:
+; CHECK-NEXT:  [[WID:.*]]:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
 ; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
-; CHECK:       Malloc:
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[MALLOC:.*]], label %[[BB25:.*]]
+; CHECK:       [[MALLOC]]:
 ; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, align 4
 ; CHECK-NEXT:    [[TMP20:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 2), align 4
 ; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP12]], [[TMP20]]
@@ -180,24 +198,30 @@ define amdgpu_kernel void @kernel_3() sanitize_address {
 ; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP16]], i64 [[TMP18]])
 ; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
-; CHECK-NEXT:    br label [[TMP14]]
-; CHECK:       21:
-; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    [[TMP26:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 8
+; CHECK-NEXT:    [[TMP27:%.*]] = ptrtoint ptr addrspace(1) [[TMP26]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP27]], i64 24)
+; CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 32
+; CHECK-NEXT:    [[TMP29:%.*]] = ptrtoint ptr addrspace(1) [[TMP28]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP29]], i64 32)
+; CHECK-NEXT:    br label %[[BB25]]
+; CHECK:       [[BB25]]:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, %[[WID]] ], [ true, %[[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP22:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel_3.dynlds) ]
 ; CHECK-NEXT:    [[PTR:%.*]] = call ptr @get_B_ptr()
-; CHECK-NEXT:    br label [[CONDFREE:%.*]]
-; CHECK:       CondFree:
+; CHECK-NEXT:    br label %[[CONDFREE:.*]]
+; CHECK:       [[CONDFREE]]:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
-; CHECK:       Free:
-; CHECK-NEXT:    [[TMP22:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
+; CHECK-NEXT:    br i1 [[XYZCOND]], label %[[FREE:.*]], label %[[END:.*]]
+; CHECK:       [[FREE]]:
 ; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[TMP23]] to i64
 ; CHECK-NEXT:    [[TMP25:%.*]] = ptrtoint ptr addrspace(1) [[TMP22]] to i64
 ; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP25]], i64 [[TMP24]])
-; CHECK-NEXT:    br label [[END]]
-; CHECK:       End:
+; CHECK-NEXT:    br label %[[END]]
+; CHECK:       [[END]]:
 ; CHECK-NEXT:    ret void
 ;
   %ptr = call ptr @get_B_ptr()
@@ -220,6 +244,7 @@ define private void @store_A() sanitize_address {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(3) [[TMP4]], align 8
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
@@ -238,6 +263,7 @@ define private ptr @get_B_ptr() sanitize_address {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(3) [[TMP4]], align 8
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
index deb4475878356..840887cbc854f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
 ; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if static LDS is lowered correctly when a non-kernel with LDS accesses is called from kernel.
@@ -7,12 +7,19 @@
 @lds_3 = external addrspace(3) global [3 x i8], align 4
 @lds_4 = external addrspace(3) global [4 x i8], align 8
 
+;.
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 32, i32 1, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 64, i32 4, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 96, i32 3, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 128, i32 4, i32 32 } }, no_sanitize_address
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(1) constant [1 x ptr addrspace(3)] [ptr addrspace(3) @llvm.amdgcn.sw.lds.k0], no_sanitize_address
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(1) constant [1 x [2 x ptr addrspace(1)]] [[2 x ptr addrspace(1)] [ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0)]], no_sanitize_address
+;.
 define void @use_variables() sanitize_address {
 ; CHECK-LABEL: define void @use_variables(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load ptr addrspace(1), ptr addrspace(3) [[TMP4]], align 8
 ; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
 ; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
@@ -24,7 +31,9 @@ define void @use_variables() sanitize_address {
 ; CHECK-NEXT:    [[X:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
 ; CHECK-NEXT:    [[TMP16:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
 ; CHECK-NEXT:    store i8 3, ptr [[TMP16]], align 4
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr addrspace(3) [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP7]], i32 [[TMP14]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(1) [[TMP17]], align 8
 ; CHECK-NEXT:    ret void
 ;
   %X = addrspacecast ptr addrspace(3) @lds_3 to ptr
@@ -54,23 +63,42 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP12:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP15]], i64 [[TMP24]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP12]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 8
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr addrspace(1) [[TMP25]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP26]], i64 24)
+; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 33
+; CHECK-NEXT:    [[TMP28:%.*]] = ptrtoint ptr addrspace(1) [[TMP27]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP28]], i64 31)
+; CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 68
+; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP29]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP34]], i64 28)
+; CHECK-NEXT:    [[TMP35:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 99
+; CHECK-NEXT:    [[TMP36:%.*]] = ptrtoint ptr addrspace(1) [[TMP35]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP36]], i64 29)
+; CHECK-NEXT:    [[TMP37:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 132
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr addrspace(1) [[TMP37]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP38]], i64 28)
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       14:
+; CHECK:       24:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
 ; CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
 ; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP17]]
 ; CHECK-NEXT:    call void @use_variables()
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP11]], align 1
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP18]], align 2
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr addrspace(3) [[TMP11]] to i32
+; CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP19]], i32 [[TMP30]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP31]], align 1
+; CHECK-NEXT:    [[TMP32:%.*]] = ptrtoint ptr addrspace(3) [[TMP18]] to i32
+; CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP19]], i32 [[TMP32]]
+; CHECK-NEXT:    store i32 8, ptr addrspace(1) [[TMP33]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP20:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
 ; CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr addrspace(1) [[TMP19]] to i64
@@ -85,5 +113,12 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1]] = { sanitize_address "amdgpu-lds-size"="8" }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR4:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
 ; CHECK: [[META1]] = !{i32 0}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
index 4849b804835e0..bc7a842d2844b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
@@ -7,7 +7,7 @@
 
 ;.
 ; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
-; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 4, i32 8 } }, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 32, i32 1, i32 32 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 64, i32 4, i32 32 } }, no_sanitize_address
 ;.
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
@@ -30,22 +30,35 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP12:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP15]], i64 [[TMP11]])
 ; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP12]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 8
+; CHECK-NEXT:    [[TMP30:%.*]] = ptrtoint ptr addrspace(1) [[TMP25]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP30]], i64 24)
+; CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 33
+; CHECK-NEXT:    [[TMP32:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP32]], i64 31)
+; CHECK-NEXT:    [[TMP33:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP6]], i64 68
+; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP33]] to i64
+; CHECK-NEXT:    call void @__asan_poison_region(i64 [[TMP34]], i64 28)
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       14:
+; CHECK:       20:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
 ; CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
 ; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP17]]
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP18]], align 4
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP24]], align 2
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr addrspace(3) [[TMP18]] to i32
+; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP19]], i32 [[TMP26]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(1) [[TMP27]], align 4
+; CHECK-NEXT:    [[TMP28:%.*]] = ptrtoint ptr addrspace(3) [[TMP24]] to i32
+; CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP19]], i32 [[TMP28]]
+; CHECK-NEXT:    store i32 8, ptr addrspace(1) [[TMP29]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    [[TMP20:%.*]] = call ptr @llvm.returnaddress(i32 0)
 ; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
 ; CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr addrspace(1) [[TMP19]] to i64
@@ -59,7 +72,7 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="24" }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="8" }
 ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
 ; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
 ; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }



More information about the llvm-commits mailing list