[llvm] [AMDGPU] Introduce "amdgpu-sw-lower-lds" pass to lower LDS accesses to use device global memory. (PR #87265)

via llvm-commits llvm-commits at lists.llvm.org
Sun May 19 03:53:03 PDT 2024


https://github.com/skc7 updated https://github.com/llvm/llvm-project/pull/87265

>From 8c0acc91c40c75a65d0bbd1d5840ac9e9f81f597 Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Thu, 7 Mar 2024 12:40:41 +0530
Subject: [PATCH 1/3] [AMDGPU] Enable amdgpu-sw-lower-lds pass to lower LDS
 accesses to use device global memory. (#87265)

---
 llvm/lib/Target/AMDGPU/AMDGPU.h               |   9 +
 .../AMDGPU/AMDGPULowerModuleLDSPass.cpp       |  44 +-
 llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def |   1 +
 llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp   | 876 ++++++++++++++++++
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |   1 +
 llvm/lib/Target/AMDGPU/CMakeLists.txt         |   1 +
 .../Target/AMDGPU/Utils/AMDGPUMemoryUtils.h   |   2 +-
 ...pu-sw-lower-lds-dynamic-indirect-access.ll | 100 ++
 .../amdgpu-sw-lower-lds-dynamic-lds-test.ll   |  63 ++
 ...ds-multi-static-dynamic-indirect-access.ll | 194 ++++
 ...gpu-sw-lower-lds-multiple-blocks-return.ll |  79 ++
 ...ower-lds-static-dynamic-indirect-access.ll | 101 ++
 ...pu-sw-lower-lds-static-dynamic-lds-test.ll |  88 ++
 ...s-static-indirect-access-function-param.ll |  61 ++
 ...lower-lds-static-indirect-access-nested.ll | 220 +++++
 ...gpu-sw-lower-lds-static-indirect-access.ll |  85 ++
 .../amdgpu-sw-lower-lds-static-lds-test.ll    |  58 ++
 17 files changed, 1939 insertions(+), 44 deletions(-)
 create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index 6016bd5187d88..15ff74f7c53af 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -263,6 +263,15 @@ struct AMDGPUAlwaysInlinePass : PassInfoMixin<AMDGPUAlwaysInlinePass> {
   bool GlobalOpt;
 };
 
+void initializeAMDGPUSwLowerLDSLegacyPass(PassRegistry &);
+extern char &AMDGPUSwLowerLDSLegacyPassID;
+ModulePass *createAMDGPUSwLowerLDSLegacyPass();
+
+struct AMDGPUSwLowerLDSPass : PassInfoMixin<AMDGPUSwLowerLDSPass> {
+  AMDGPUSwLowerLDSPass() {}
+  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
 class AMDGPUCodeGenPreparePass
     : public PassInfoMixin<AMDGPUCodeGenPreparePass> {
 private:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index 2c7163a775372..625ac0230f160 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -862,48 +862,6 @@ class AMDGPULowerModuleLDS {
     return N;
   }
 
-  /// Strip "amdgpu-no-lds-kernel-id" from any functions where we may have
-  /// introduced its use. If AMDGPUAttributor ran prior to the pass, we inferred
-  /// the lack of llvm.amdgcn.lds.kernel.id calls.
-  void removeNoLdsKernelIdFromReachable(CallGraph &CG, Function *KernelRoot) {
-    KernelRoot->removeFnAttr("amdgpu-no-lds-kernel-id");
-
-    SmallVector<Function *> WorkList({CG[KernelRoot]->getFunction()});
-    SmallPtrSet<Function *, 8> Visited;
-    bool SeenUnknownCall = false;
-
-    while (!WorkList.empty()) {
-      Function *F = WorkList.pop_back_val();
-
-      for (auto &CallRecord : *CG[F]) {
-        if (!CallRecord.second)
-          continue;
-
-        Function *Callee = CallRecord.second->getFunction();
-        if (!Callee) {
-          if (!SeenUnknownCall) {
-            SeenUnknownCall = true;
-
-            // If we see any indirect calls, assume nothing about potential
-            // targets.
-            // TODO: This could be refined to possible LDS global users.
-            for (auto &ExternalCallRecord : *CG.getExternalCallingNode()) {
-              Function *PotentialCallee =
-                  ExternalCallRecord.second->getFunction();
-              assert(PotentialCallee);
-              if (!isKernelLDS(PotentialCallee))
-                PotentialCallee->removeFnAttr("amdgpu-no-lds-kernel-id");
-            }
-          }
-        } else {
-          Callee->removeFnAttr("amdgpu-no-lds-kernel-id");
-          if (Visited.insert(Callee).second)
-            WorkList.push_back(Callee);
-        }
-      }
-    }
-  }
-
   DenseMap<Function *, GlobalVariable *> lowerDynamicLDSVariables(
       Module &M, LDSUsesInfoTy &LDSUsesInfo,
       DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS,
@@ -1059,7 +1017,7 @@ class AMDGPULowerModuleLDS {
       //
       // TODO: We could filter out subgraphs that do not access LDS globals.
       for (Function *F : KernelsThatAllocateTableLDS)
-        removeNoLdsKernelIdFromReachable(CG, F);
+        removeFnAttrFromReachable(CG, F, "amdgpu-no-lds-kernel-id");
     }
 
     DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS =
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index 90f36fadf3590..eda4949d0296d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -22,6 +22,7 @@ MODULE_PASS("amdgpu-lower-buffer-fat-pointers",
             AMDGPULowerBufferFatPointersPass(*this))
 MODULE_PASS("amdgpu-lower-ctor-dtor", AMDGPUCtorDtorLoweringPass())
 MODULE_PASS("amdgpu-lower-module-lds", AMDGPULowerModuleLDSPass(*this))
+MODULE_PASS("amdgpu-sw-lower-lds", AMDGPUSwLowerLDSPass())
 MODULE_PASS("amdgpu-printf-runtime-binding", AMDGPUPrintfRuntimeBindingPass())
 MODULE_PASS("amdgpu-unify-metadata", AMDGPUUnifyMetadataPass())
 #undef MODULE_PASS
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
new file mode 100644
index 0000000000000..3b951e72aabed
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
@@ -0,0 +1,876 @@
+//===-- AMDGPUSwLowerLDS.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers the local data store, LDS, uses in kernel and non-kernel
+// functions in module with dynamically allocated device global memory.
+//
+// Replacement of Kernel LDS accesses:
+//    For a kernel, LDS access can be static or dynamic which are direct
+//    (accessed within kernel) and indirect (accessed through non-kernels).
+//    A device global memory equal to size of all these LDS globals will be
+//    allocated. At the prologue of the kernel, a single work-item from the
+//    work-group, does a "malloc" and stores the pointer of the allocation in
+//    new LDS global that will be created for the kernel. This will be called
+//    "SW LDS" in this pass.
+//    Each LDS access corresponds to an offset in the allocated memory.
+//    All static LDS accesses will be allocated first and then dynamic LDS
+//    will occupy the device global memory.
+//    To store the offsets corresponding to all LDS accesses, another global
+//    variable is created which will be called "SW LDS metadata" in this pass.
+//    - SW LDS Global:
+//        It is LDS global of ptr type with name
+//        "llvm.amdgcn.sw.lds.<kernel-name>".
+//    - Metadata Global:
+//        It is of struct type, with n members. n equals the number of LDS
+//        globals accessed by the kernel(direct and indirect). Each member of
+//        struct is another struct of type {i32, i32, i32}. First member
+//        corresponds to offset, second member corresponds to size of LDS global
+//        being replaced and third represents the total aligned size. It will
+//        have name "llvm.amdgcn.sw.lds.<kernel-name>.md". This global will have
+//        an intializer with static LDS related offsets and sizes initialized.
+//        But for dynamic LDS related entries, offsets will be intialized to
+//        previous static LDS allocation end offset. Sizes for them will be zero
+//        initially. These dynamic LDS offset and size values will be updated
+//        with in the kernel, since kernel can read the dynamic LDS size
+//        allocation done at runtime with query to "hidden_dynamic_lds_size"
+//        hidden kernel argument.
+//
+//    LDS accesses within the kernel will be replaced by "gep" ptr to
+//    corresponding offset into allocated device global memory for the kernel.
+//    At the epilogue of kernel, allocated memory would be made free by the same
+//    single work-item.
+//
+// Replacement of non-kernel LDS accesses:
+//    Multiple kernels can access the same non-kernel function.
+//    All the kernels accessing LDS through non-kernels are sorted and
+//    assigned a kernel-id. All the LDS globals accessed by non-kernels
+//    are sorted. This information is used to build two tables:
+//    - Base table:
+//        Base table will have single row, with elements of the row
+//        placed as per kernel ID. Each element in the row corresponds
+//        to addresss of "SW LDS" variable created for
+//        that kernel.
+//    - Offset table:
+//        Offset table will have multiple rows and columns.
+//        Rows are assumed to be from 0 to (n-1). n is total number
+//        of kernels accessing the LDS through non-kernels.
+//        Each row will have m elements. m is the total number of
+//        unique LDS globals accessed by all non-kernels.
+//        Each element in the row correspond to the address of
+//        the replacement of LDS global done by that particular kernel.
+//    A LDS variable in non-kernel will be replaced based on the information
+//    from base and offset tables. Based on kernel-id query, address of "SW
+//    LDS" for that corresponding kernel is obtained from base table.
+//    The Offset into the base "SW LDS" is obtained from
+//    corresponding element in offset table. With this information, replacement
+//    value is obtained.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "Utils/AMDGPUMemoryUtils.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/ReplaceConstant.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+
+#include <algorithm>
+
+#define DEBUG_TYPE "amdgpu-sw-lower-lds"
+
+using namespace llvm;
+using namespace AMDGPU;
+
+namespace {
+
+using DomTreeCallback = function_ref<DominatorTree *(Function &F)>;
+
+struct LDSAccessTypeInfo {
+  SetVector<GlobalVariable *> StaticLDSGlobals;
+  SetVector<GlobalVariable *> DynamicLDSGlobals;
+};
+
+// Struct to hold all the Metadata required for a kernel
+// to replace a LDS global uses with corresponding offset
+// in to device global memory.
+struct KernelLDSParameters {
+  GlobalVariable *SwLDS = nullptr;
+  GlobalVariable *SwLDSMetadata = nullptr;
+  LDSAccessTypeInfo DirectAccess;
+  LDSAccessTypeInfo IndirectAccess;
+  DenseMap<GlobalVariable *, SmallVector<uint32_t, 3>>
+      LDSToReplacementIndicesMap;
+};
+
+// Struct to store infor for creation of offset table
+// for all the non-kernel LDS accesses.
+struct NonKernelLDSParameters {
+  GlobalVariable *LDSBaseTable{nullptr};
+  GlobalVariable *LDSOffsetTable{nullptr};
+  SetVector<Function *> OrderedKernels;
+  SetVector<GlobalVariable *> OrdereLDSGlobals;
+};
+
+class AMDGPUSwLowerLDS {
+public:
+  AMDGPUSwLowerLDS(Module &Mod, DomTreeCallback Callback)
+      : M(Mod), IRB(M.getContext()), DTCallback(Callback) {}
+  bool run();
+  void getUsesOfLDSByNonKernels(const CallGraph &CG,
+                                FunctionVariableMap &Functions);
+  SetVector<Function *>
+  getOrderedIndirectLDSAccessingKernels(SetVector<Function *> &&Kernels);
+  SetVector<GlobalVariable *>
+  getOrderedNonKernelAllLDSGlobals(SetVector<GlobalVariable *> &&Variables);
+  void populateSwLDSGlobal(Function *Func);
+  void populateSwMetadataGlobal(Function *Func);
+  void populateLDSToReplacementIndicesMap(Function *Func);
+  void replaceKernelLDSAccesses(Function *Func);
+  void lowerKernelLDSAccesses(Function *Func, DomTreeUpdater &DTU);
+  void buildNonKernelLDSOffsetTable(NonKernelLDSParameters &NKLDSParams);
+  void buildNonKernelLDSBaseTable(NonKernelLDSParameters &NKLDSParams);
+  Constant *
+  getAddressesOfVariablesInKernel(Function *Func,
+                                  SetVector<GlobalVariable *> &Variables);
+  void lowerNonKernelLDSAccesses(Function *Func,
+                                 SetVector<GlobalVariable *> &LDSGlobals,
+                                 NonKernelLDSParameters &NKLDSParams);
+
+private:
+  Module &M;
+  IRBuilder<> IRB;
+  DomTreeCallback DTCallback;
+  DenseMap<Function *, KernelLDSParameters> KernelToLDSParametersMap;
+};
+
+template <typename T> SetVector<T> sortByName(std::vector<T> &&V) {
+  // Sort the vector of globals or Functions based on their name.
+  // Returns a SetVector of globals/Functions.
+  sort(V, [](const auto *L, const auto *R) {
+    return L->getName() < R->getName();
+  });
+  return {SetVector<T>(V.begin(), V.end())};
+}
+
+SetVector<GlobalVariable *> AMDGPUSwLowerLDS::getOrderedNonKernelAllLDSGlobals(
+    SetVector<GlobalVariable *> &&Variables) {
+  // Sort all the non-kernel LDS accesses based on their name.
+  return sortByName(
+      std::vector<GlobalVariable *>(Variables.begin(), Variables.end()));
+}
+
+SetVector<Function *> AMDGPUSwLowerLDS::getOrderedIndirectLDSAccessingKernels(
+    SetVector<Function *> &&Kernels) {
+  // Sort the non-kernels accessing LDS based on their name.
+  // Also assign a kernel ID metadata based on the sorted order.
+  LLVMContext &Ctx = M.getContext();
+  if (Kernels.size() > UINT32_MAX) {
+    // 32 bit keeps it in one SGPR. > 2**32 kernels won't fit on the GPU
+    report_fatal_error("Unimplemented SW LDS lowering for > 2**32 kernels");
+  }
+  SetVector<Function *> OrderedKernels =
+      sortByName(std::vector<Function *>(Kernels.begin(), Kernels.end()));
+  for (size_t i = 0; i < Kernels.size(); i++) {
+    Metadata *AttrMDArgs[1] = {
+        ConstantAsMetadata::get(IRB.getInt32(i)),
+    };
+    Function *Func = OrderedKernels[i];
+    Func->setMetadata("llvm.amdgcn.lds.kernel.id",
+                      MDNode::get(Ctx, AttrMDArgs));
+    auto &LDSParams = KernelToLDSParametersMap[Func];
+  }
+  return std::move(OrderedKernels);
+}
+
+void AMDGPUSwLowerLDS::getUsesOfLDSByNonKernels(
+    const CallGraph &CG, FunctionVariableMap &functions) {
+  // Get uses from the current function, excluding uses by called functions
+  // Two output variables to avoid walking the globals list twice
+  for (auto &GV : M.globals()) {
+    if (!AMDGPU::isLDSVariableToLower(GV))
+      continue;
+
+    if (GV.isAbsoluteSymbolRef()) {
+      report_fatal_error(
+          "LDS variables with absolute addresses are unimplemented.");
+    }
+
+    for (User *V : GV.users()) {
+      if (auto *I = dyn_cast<Instruction>(V)) {
+        Function *F = I->getFunction();
+        if (!isKernelLDS(F))
+          functions[F].insert(&GV);
+      }
+    }
+  }
+}
+
+void AMDGPUSwLowerLDS::populateSwLDSGlobal(Function *Func) {
+  // Create new LDS global required for each kernel to store
+  // device global memory pointer.
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  // create new global pointer variable
+  LDSParams.SwLDS = new GlobalVariable(
+      M, IRB.getPtrTy(), false, GlobalValue::InternalLinkage,
+      PoisonValue::get(IRB.getPtrTy()), "llvm.amdgcn.sw.lds." + Func->getName(),
+      nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, false);
+  return;
+}
+
+void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
+  // Create new metadata global for every kernel and initialize the
+  // start offsets and sizes corresponding to each LDS accesses.
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  auto &Ctx = M.getContext();
+  auto &DL = M.getDataLayout();
+  std::vector<Type *> Items;
+  Type *Int32Ty = IRB.getInt32Ty();
+  std::vector<Constant *> Initializers;
+  Align MaxAlignment(1);
+  auto UpdateMaxAlignment = [&MaxAlignment, &DL](GlobalVariable *GV) {
+    Align GVAlign = AMDGPU::getAlign(DL, GV);
+    MaxAlignment = std::max(MaxAlignment, GVAlign);
+  };
+
+  for (GlobalVariable *GV : LDSParams.DirectAccess.StaticLDSGlobals)
+    UpdateMaxAlignment(GV);
+
+  for (GlobalVariable *GV : LDSParams.DirectAccess.DynamicLDSGlobals)
+    UpdateMaxAlignment(GV);
+
+  for (GlobalVariable *GV : LDSParams.IndirectAccess.StaticLDSGlobals)
+    UpdateMaxAlignment(GV);
+
+  for (GlobalVariable *GV : LDSParams.IndirectAccess.DynamicLDSGlobals)
+    UpdateMaxAlignment(GV);
+
+  //{StartOffset, AlignedSizeInBytes}
+  SmallString<128> MDItemStr;
+  raw_svector_ostream MDItemOS(MDItemStr);
+  MDItemOS << "llvm.amdgcn.sw.lds." << Func->getName().str() << ".md.item";
+
+  StructType *LDSItemTy =
+      StructType::create(Ctx, {Int32Ty, Int32Ty, Int32Ty}, MDItemOS.str());
+  uint32_t MallocSize = 0;
+  auto buildInitializerForSwLDSMD =
+      [&](SetVector<GlobalVariable *> &LDSGlobals) {
+        for (auto &GV : LDSGlobals) {
+          Type *Ty = GV->getValueType();
+          const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
+          Items.push_back(LDSItemTy);
+          Constant *ItemStartOffset = ConstantInt::get(Int32Ty, MallocSize);
+          Constant *SizeInBytesConst = ConstantInt::get(Int32Ty, SizeInBytes);
+          uint64_t AlignedSize = alignTo(SizeInBytes, MaxAlignment);
+          Constant *AlignedSizeInBytesConst =
+              ConstantInt::get(Int32Ty, AlignedSize);
+          MallocSize += AlignedSize;
+          Constant *InitItem =
+              ConstantStruct::get(LDSItemTy, {ItemStartOffset, SizeInBytesConst,
+                                              AlignedSizeInBytesConst});
+          Initializers.push_back(InitItem);
+        }
+      };
+
+  buildInitializerForSwLDSMD(LDSParams.DirectAccess.StaticLDSGlobals);
+  buildInitializerForSwLDSMD(LDSParams.IndirectAccess.StaticLDSGlobals);
+  buildInitializerForSwLDSMD(LDSParams.DirectAccess.DynamicLDSGlobals);
+  buildInitializerForSwLDSMD(LDSParams.IndirectAccess.DynamicLDSGlobals);
+
+  SmallString<128> MDTypeStr;
+  raw_svector_ostream MDTypeOS(MDTypeStr);
+  MDTypeOS << "llvm.amdgcn.sw.lds." << Func->getName().str() << ".md.type";
+
+  StructType *MetadataStructType =
+      StructType::create(Ctx, Items, MDTypeOS.str());
+  SmallString<128> MDStr;
+  raw_svector_ostream MDOS(MDStr);
+  MDOS << "llvm.amdgcn.sw.lds." << Func->getName().str() << ".md";
+  LDSParams.SwLDSMetadata = new GlobalVariable(
+      M, MetadataStructType, false, GlobalValue::InternalLinkage,
+      PoisonValue::get(MetadataStructType), MDOS.str(), nullptr,
+      GlobalValue::NotThreadLocal, AMDGPUAS::GLOBAL_ADDRESS, false);
+  Constant *data = ConstantStruct::get(MetadataStructType, Initializers);
+  LDSParams.SwLDSMetadata->setInitializer(data);
+  assert(LDSParams.SwLDS);
+  // Set the alignment to MaxAlignment for SwLDS.
+  LDSParams.SwLDS->setAlignment(MaxAlignment);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  LDSParams.SwLDSMetadata->setSanitizerMetadata(MD);
+  return;
+}
+
+void AMDGPUSwLowerLDS::populateLDSToReplacementIndicesMap(Function *Func) {
+  // Fill the corresponding LDS replacement indices for each LDS access
+  // related to this kernel.
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  auto PopulateIndices = [&](SetVector<GlobalVariable *> &LDSGlobals,
+                             uint32_t &Idx) {
+    for (auto &GV : LDSGlobals) {
+      LDSParams.LDSToReplacementIndicesMap[GV] = {0, Idx, 0};
+      ++Idx;
+    }
+  };
+  uint32_t Idx = 0;
+  PopulateIndices(LDSParams.DirectAccess.StaticLDSGlobals, Idx);
+  PopulateIndices(LDSParams.IndirectAccess.StaticLDSGlobals, Idx);
+  PopulateIndices(LDSParams.DirectAccess.DynamicLDSGlobals, Idx);
+  PopulateIndices(LDSParams.IndirectAccess.DynamicLDSGlobals, Idx);
+  return;
+}
+
+static void replacesUsesOfGlobalInFunction(Function *Func, GlobalVariable *GV,
+                                           Value *Replacement) {
+  // Replace all uses of LDS global in this Function with a Replacement.
+  auto ReplaceUsesLambda = [Func](const Use &U) -> bool {
+    auto *V = U.getUser();
+    if (auto *Inst = dyn_cast<Instruction>(V)) {
+      auto *Func1 = Inst->getParent()->getParent();
+      if (Func == Func1)
+        return true;
+    }
+    return false;
+  };
+  GV->replaceUsesWithIf(Replacement, ReplaceUsesLambda);
+}
+
+void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  GlobalVariable *SwLDS = LDSParams.SwLDS;
+  assert(SwLDS);
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDSMetadata);
+  StructType *SwLDSMetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
+  Type *Int32Ty = IRB.getInt32Ty();
+
+  auto &IndirectAccess = LDSParams.IndirectAccess;
+  auto &DirectAccess = LDSParams.DirectAccess;
+  // Replace all uses of LDS global in this Function with a Replacement.
+  auto ReplaceLDSGlobalUses = [&](SetVector<GlobalVariable *> &LDSGlobals) {
+    for (auto &GV : LDSGlobals) {
+      // Do not generate instructions if LDS access is in non-kernel
+      // i.e indirect-access.
+      if ((IndirectAccess.StaticLDSGlobals.contains(GV) ||
+           IndirectAccess.DynamicLDSGlobals.contains(GV)) &&
+          (!DirectAccess.StaticLDSGlobals.contains(GV) &&
+           !DirectAccess.DynamicLDSGlobals.contains(GV)))
+        continue;
+      auto &Indices = LDSParams.LDSToReplacementIndicesMap[GV];
+      assert(Indices.size() == 3);
+      uint32_t Idx0 = Indices[0];
+      uint32_t Idx1 = Indices[1];
+      uint32_t Idx2 = Indices[2];
+      Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, Idx0),
+                            ConstantInt::get(Int32Ty, Idx1),
+                            ConstantInt::get(Int32Ty, Idx2)};
+      Constant *GEP = ConstantExpr::getGetElementPtr(
+          SwLDSMetadataStructType, SwLDSMetadata, GEPIdx, true);
+      Value *Load = IRB.CreateLoad(Int32Ty, GEP);
+      Value *BasePlusOffset =
+          IRB.CreateInBoundsGEP(IRB.getInt8Ty(), SwLDS, {Load});
+      LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replacing LDS "
+                        << GV->getName().str());
+      replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
+    }
+  };
+  ReplaceLDSGlobalUses(DirectAccess.StaticLDSGlobals);
+  ReplaceLDSGlobalUses(IndirectAccess.StaticLDSGlobals);
+  ReplaceLDSGlobalUses(DirectAccess.DynamicLDSGlobals);
+  ReplaceLDSGlobalUses(IndirectAccess.DynamicLDSGlobals);
+}
+
+void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
+                                              DomTreeUpdater &DTU) {
+  LLVM_DEBUG(dbgs() << "Sw Lowering Kernel LDS for : "
+                    << Func->getName().str());
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  auto &Ctx = M.getContext();
+  auto *PrevEntryBlock = &Func->getEntryBlock();
+
+  // Create malloc block.
+  auto *MallocBlock = BasicBlock::Create(Ctx, "Malloc", Func, PrevEntryBlock);
+
+  // Create WIdBlock block which has instructions related to selection of
+  // {0,0,0} indiex work item in the work group.
+  auto *WIdBlock = BasicBlock::Create(Ctx, "WId", Func, MallocBlock);
+  IRB.SetInsertPoint(WIdBlock, WIdBlock->begin());
+  auto *const WIdx =
+      IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {}, {});
+  auto *const WIdy =
+      IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_y, {}, {});
+  auto *const WIdz =
+      IRB.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_z, {}, {});
+  auto *const XYOr = IRB.CreateOr(WIdx, WIdy);
+  auto *const XYZOr = IRB.CreateOr(XYOr, WIdz);
+  auto *const WIdzCond = IRB.CreateICmpEQ(XYZOr, IRB.getInt32(0));
+
+  GlobalVariable *SwLDS = LDSParams.SwLDS;
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDS && SwLDSMetadata);
+  StructType *MetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
+
+  // All work items will branch to PrevEntryBlock except {0,0,0} index
+  // work item which will branch to malloc block.
+  IRB.CreateCondBr(WIdzCond, MallocBlock, PrevEntryBlock);
+
+  // Malloc block
+  IRB.SetInsertPoint(MallocBlock, MallocBlock->begin());
+
+  // If Dynamic LDS globals are accessed by the kernel,
+  // Get the size of dyn lds from hidden dyn_lds_size kernel arg.
+  // Update the corresponding metadata global entries for this dyn lds global.
+  uint32_t MallocSize = 0;
+  Value *CurrMallocSize;
+
+  unsigned NumStaticLDS = LDSParams.DirectAccess.StaticLDSGlobals.size() +
+                          LDSParams.IndirectAccess.StaticLDSGlobals.size();
+  unsigned NumDynLDS = LDSParams.DirectAccess.DynamicLDSGlobals.size() +
+                       LDSParams.IndirectAccess.DynamicLDSGlobals.size();
+
+  if (NumStaticLDS) {
+    auto *GEPForEndStaticLDSOffset = IRB.CreateInBoundsGEP(
+        MetadataStructType, SwLDSMetadata,
+        {IRB.getInt32(0), IRB.getInt32(NumStaticLDS - 1), IRB.getInt32(0)});
+
+    auto *GEPForEndStaticLDSSize = IRB.CreateInBoundsGEP(
+        MetadataStructType, SwLDSMetadata,
+        {IRB.getInt32(0), IRB.getInt32(NumStaticLDS - 1), IRB.getInt32(2)});
+
+    Value *EndStaticLDSOffset =
+        IRB.CreateLoad(IRB.getInt64Ty(), GEPForEndStaticLDSOffset);
+    Value *EndStaticLDSSize =
+        IRB.CreateLoad(IRB.getInt64Ty(), GEPForEndStaticLDSSize);
+    CurrMallocSize = IRB.CreateAdd(EndStaticLDSOffset, EndStaticLDSSize);
+  } else
+    CurrMallocSize = IRB.getInt64(MallocSize);
+
+  if (NumDynLDS) {
+    unsigned MaxAlignment = SwLDS->getAlignment();
+    Value *MaxAlignValue = IRB.getInt64(MaxAlignment);
+    Value *MaxAlignValueMinusOne = IRB.getInt64(MaxAlignment - 1);
+
+    Value *ImplicitArg =
+        IRB.CreateIntrinsic(Intrinsic::amdgcn_implicitarg_ptr, {}, {});
+    Value *HiddenDynLDSSize = IRB.CreateInBoundsGEP(
+        ImplicitArg->getType(), ImplicitArg, {IRB.getInt32(15)});
+
+    auto MallocSizeCalcLambda =
+        [&](SetVector<GlobalVariable *> &DynamicLDSGlobals) {
+          for (GlobalVariable *DynGV : DynamicLDSGlobals) {
+            auto &Indices = LDSParams.LDSToReplacementIndicesMap[DynGV];
+
+            // Update the Offset metadata.
+            auto *GEPForOffset = IRB.CreateInBoundsGEP(
+                MetadataStructType, SwLDSMetadata,
+                {IRB.getInt32(0), IRB.getInt32(Indices[1]), IRB.getInt32(0)});
+            IRB.CreateStore(CurrMallocSize, GEPForOffset);
+
+            // Get size from hidden dyn_lds_size argument of kernel
+            // Update the size and Aligned Size metadata.
+            auto *GEPForSize = IRB.CreateInBoundsGEP(
+                MetadataStructType, SwLDSMetadata,
+                {IRB.getInt32(0), IRB.getInt32(Indices[1]), IRB.getInt32(1)});
+            Value *CurrDynLDSSize =
+                IRB.CreateLoad(IRB.getInt64Ty(), HiddenDynLDSSize);
+            IRB.CreateStore(CurrDynLDSSize, GEPForSize);
+
+            auto *GEPForAlignedSize = IRB.CreateInBoundsGEP(
+                MetadataStructType, SwLDSMetadata,
+                {IRB.getInt32(0), IRB.getInt32(Indices[1]), IRB.getInt32(2)});
+            Value *AlignedDynLDSSize =
+                IRB.CreateAdd(CurrDynLDSSize, MaxAlignValueMinusOne);
+            AlignedDynLDSSize =
+                IRB.CreateUDiv(AlignedDynLDSSize, MaxAlignValue);
+            AlignedDynLDSSize = IRB.CreateMul(AlignedDynLDSSize, MaxAlignValue);
+            IRB.CreateStore(AlignedDynLDSSize, GEPForAlignedSize);
+
+            // Update the Current Malloc Size
+            CurrMallocSize = IRB.CreateAdd(CurrMallocSize, AlignedDynLDSSize);
+          }
+        };
+    MallocSizeCalcLambda(LDSParams.DirectAccess.DynamicLDSGlobals);
+    MallocSizeCalcLambda(LDSParams.IndirectAccess.DynamicLDSGlobals);
+  }
+
+  // Create a call to malloc function which does device global memory allocation
+  // with size equals to all LDS global accesses size  in this kernel.
+  FunctionCallee AMDGPUMallocFunc = M.getOrInsertFunction(
+      StringRef("malloc"),
+      FunctionType::get(IRB.getPtrTy(1), {IRB.getInt64Ty()}, false));
+  Value *MCI = IRB.CreateCall(AMDGPUMallocFunc, {CurrMallocSize});
+
+  // create store of malloc to new global
+  IRB.CreateStore(MCI, SwLDS);
+
+  // Create branch to PrevEntryBlock
+  IRB.CreateBr(PrevEntryBlock);
+
+  // Create wave-group barrier at the starting of Previous entry block
+  Type *Int1Ty = IRB.getInt1Ty();
+  IRB.SetInsertPoint(PrevEntryBlock, PrevEntryBlock->begin());
+  auto *XYZCondPhi = IRB.CreatePHI(Int1Ty, 2, "xyzCond");
+  XYZCondPhi->addIncoming(IRB.getInt1(0), WIdBlock);
+  XYZCondPhi->addIncoming(IRB.getInt1(1), MallocBlock);
+
+  IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}, {});
+
+  replaceKernelLDSAccesses(Func);
+
+  auto *CondFreeBlock = BasicBlock::Create(Ctx, "CondFree", Func);
+  auto *FreeBlock = BasicBlock::Create(Ctx, "Free", Func);
+  auto *EndBlock = BasicBlock::Create(Ctx, "End", Func);
+  for (BasicBlock &BB : *Func) {
+    if (!BB.empty()) {
+      if (ReturnInst *RI = dyn_cast<ReturnInst>(&BB.back())) {
+        RI->eraseFromParent();
+        IRB.SetInsertPoint(&BB, BB.end());
+        IRB.CreateBr(CondFreeBlock);
+      }
+    }
+  }
+
+  // Cond Free Block
+  IRB.SetInsertPoint(CondFreeBlock, CondFreeBlock->begin());
+  IRB.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}, {});
+  IRB.CreateCondBr(XYZCondPhi, FreeBlock, EndBlock);
+
+  // Free Block
+  IRB.SetInsertPoint(FreeBlock, FreeBlock->begin());
+
+  // Free the previously allocate device global memory.
+  FunctionCallee AMDGPUFreeReturn = M.getOrInsertFunction(
+      StringRef("free"),
+      FunctionType::get(IRB.getVoidTy(), {IRB.getPtrTy()}, false));
+
+  Value *MallocPtr = IRB.CreateLoad(IRB.getPtrTy(), SwLDS);
+  IRB.CreateCall(AMDGPUFreeReturn, {MallocPtr});
+  IRB.CreateBr(EndBlock);
+
+  // End Block
+  IRB.SetInsertPoint(EndBlock, EndBlock->begin());
+  IRB.CreateRetVoid();
+  // Update the DomTree with corresponding links to basic blocks.
+  DTU.applyUpdates({{DominatorTree::Insert, WIdBlock, MallocBlock},
+                    {DominatorTree::Insert, MallocBlock, PrevEntryBlock},
+                    {DominatorTree::Insert, CondFreeBlock, FreeBlock},
+                    {DominatorTree::Insert, FreeBlock, EndBlock}});
+}
+
+Constant *AMDGPUSwLowerLDS::getAddressesOfVariablesInKernel(
+    Function *Func, SetVector<GlobalVariable *> &Variables) {
+  Type *Int32Ty = IRB.getInt32Ty();
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDSMetadata);
+  auto *SwLDSMetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
+  ArrayType *KernelOffsetsType = ArrayType::get(Int32Ty, Variables.size());
+
+  SmallVector<Constant *> Elements;
+  for (size_t i = 0; i < Variables.size(); i++) {
+    GlobalVariable *GV = Variables[i];
+    if (!LDSParams.LDSToReplacementIndicesMap.contains(GV)) {
+      Elements.push_back(PoisonValue::get(Int32Ty));
+      continue;
+    }
+    auto &Indices = LDSParams.LDSToReplacementIndicesMap[GV];
+    uint32_t Idx0 = Indices[0];
+    uint32_t Idx1 = Indices[1];
+    uint32_t Idx2 = Indices[2];
+    Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, Idx0),
+                          ConstantInt::get(Int32Ty, Idx1),
+                          ConstantInt::get(Int32Ty, Idx2)};
+    Constant *GEP = ConstantExpr::getGetElementPtr(SwLDSMetadataStructType,
+                                                   SwLDSMetadata, GEPIdx, true);
+    auto elt = ConstantExpr::getPtrToInt(GEP, Int32Ty);
+    Elements.push_back(elt);
+  }
+  return ConstantArray::get(KernelOffsetsType, Elements);
+}
+
+void AMDGPUSwLowerLDS::buildNonKernelLDSBaseTable(
+    NonKernelLDSParameters &NKLDSParams) {
+  // Base table will have single row, with elements of the row
+  // placed as per kernel ID. Each element in the row corresponds
+  // to addresss of "SW LDS" global of the kernel.
+  auto &Kernels = NKLDSParams.OrderedKernels;
+  Type *Int32Ty = IRB.getInt32Ty();
+  const size_t NumberKernels = Kernels.size();
+  ArrayType *AllKernelsOffsetsType = ArrayType::get(Int32Ty, NumberKernels);
+  std::vector<Constant *> OverallConstantExprElts(NumberKernels);
+  for (size_t i = 0; i < NumberKernels; i++) {
+    Function *Func = Kernels[i];
+    auto &LDSParams = KernelToLDSParametersMap[Func];
+    GlobalVariable *SwLDS = LDSParams.SwLDS;
+    assert(SwLDS);
+    Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, 0)};
+    Constant *GEP =
+        ConstantExpr::getGetElementPtr(SwLDS->getType(), SwLDS, GEPIdx, true);
+    auto Elt = ConstantExpr::getPtrToInt(GEP, Int32Ty);
+    OverallConstantExprElts[i] = Elt;
+  }
+  Constant *init =
+      ConstantArray::get(AllKernelsOffsetsType, OverallConstantExprElts);
+  NKLDSParams.LDSBaseTable = new GlobalVariable(
+      M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init,
+      "llvm.amdgcn.sw.lds.base.table", nullptr, GlobalValue::NotThreadLocal,
+      AMDGPUAS::CONSTANT_ADDRESS);
+}
+
+void AMDGPUSwLowerLDS::buildNonKernelLDSOffsetTable(
+    NonKernelLDSParameters &NKLDSParams) {
+  // Offset table will have multiple rows and columns.
+  // Rows are assumed to be from 0 to (n-1). n is total number
+  // of kernels accessing the LDS through non-kernels.
+  // Each row will have m elements. m is the total number of
+  // unique LDS globals accessed by non-kernels.
+  // Each element in the row correspond to the address of
+  // the replacement of LDS global done by that particular kernel.
+  auto &Variables = NKLDSParams.OrdereLDSGlobals;
+  auto &Kernels = NKLDSParams.OrderedKernels;
+  assert(!Variables.empty());
+  assert(!Kernels.empty());
+  const size_t NumberVariables = Variables.size();
+  const size_t NumberKernels = Kernels.size();
+
+  ArrayType *KernelOffsetsType =
+      ArrayType::get(IRB.getInt32Ty(), NumberVariables);
+
+  ArrayType *AllKernelsOffsetsType =
+      ArrayType::get(KernelOffsetsType, NumberKernels);
+  std::vector<Constant *> overallConstantExprElts(NumberKernels);
+  for (size_t i = 0; i < NumberKernels; i++) {
+    Function *Func = Kernels[i];
+    overallConstantExprElts[i] =
+        getAddressesOfVariablesInKernel(Func, Variables);
+  }
+  Constant *Init =
+      ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts);
+  NKLDSParams.LDSOffsetTable = new GlobalVariable(
+      M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, Init,
+      "llvm.amdgcn.sw.lds.offset.table", nullptr, GlobalValue::NotThreadLocal,
+      AMDGPUAS::CONSTANT_ADDRESS);
+}
+
+void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
+    Function *Func, SetVector<GlobalVariable *> &LDSGlobals,
+    NonKernelLDSParameters &NKLDSParams) {
+  // Replace LDS access in non-kernel with replacement queried from
+  // Base table and offset from offset table.
+  LLVM_DEBUG(dbgs() << "Sw LDS lowering, lower non-kernel access for : "
+                    << Func->getName().str());
+  auto *EntryBlock = &Func->getEntryBlock();
+  IRB.SetInsertPoint(EntryBlock, EntryBlock->begin());
+  Function *Decl =
+      Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
+  auto *KernelId = IRB.CreateCall(Decl, {});
+  GlobalVariable *LDSBaseTable = NKLDSParams.LDSBaseTable;
+  GlobalVariable *LDSOffsetTable = NKLDSParams.LDSOffsetTable;
+  auto &OrdereLDSGlobals = NKLDSParams.OrdereLDSGlobals;
+  assert(LDSBaseTable && LDSOffsetTable);
+  Value *BaseGEP = IRB.CreateInBoundsGEP(
+      LDSBaseTable->getValueType(), LDSBaseTable, {IRB.getInt32(0), KernelId});
+  Value *BaseLoad = IRB.CreateLoad(IRB.getInt32Ty(), BaseGEP);
+
+  for (GlobalVariable *GV : LDSGlobals) {
+    Value *BasePtr = IRB.CreateIntToPtr(BaseLoad, GV->getType());
+    auto GVIt = std::find(OrdereLDSGlobals.begin(), OrdereLDSGlobals.end(), GV);
+    assert(GVIt != OrdereLDSGlobals.end());
+    uint32_t GVOffset = std::distance(OrdereLDSGlobals.begin(), GVIt);
+    Value *OffsetGEP = IRB.CreateInBoundsGEP(
+        LDSOffsetTable->getValueType(), LDSOffsetTable,
+        {IRB.getInt32(0), KernelId, IRB.getInt32(GVOffset)});
+    Value *OffsetLoad = IRB.CreateLoad(IRB.getInt32Ty(), OffsetGEP);
+    OffsetLoad = IRB.CreateIntToPtr(OffsetLoad, GV->getType());
+    OffsetLoad = IRB.CreateLoad(IRB.getInt32Ty(), OffsetLoad);
+    Value *BasePlusOffset =
+        IRB.CreateInBoundsGEP(IRB.getInt8Ty(), BasePtr, {OffsetLoad});
+    LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replace non-kernel LDS for "
+                      << GV->getName().str());
+    replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
+  }
+}
+
+static void reorderStaticDynamicIndirectLDSSet(KernelLDSParameters &LDSParams) {
+  // Sort Static, dynamic LDS globals which are either
+  // direct or indirect access on basis of name.
+  auto &DirectAccess = LDSParams.DirectAccess;
+  auto &IndirectAccess = LDSParams.IndirectAccess;
+  LDSParams.DirectAccess.StaticLDSGlobals = sortByName(
+      std::vector<GlobalVariable *>(DirectAccess.StaticLDSGlobals.begin(),
+                                    DirectAccess.StaticLDSGlobals.end()));
+  LDSParams.DirectAccess.DynamicLDSGlobals = sortByName(
+      std::vector<GlobalVariable *>(DirectAccess.DynamicLDSGlobals.begin(),
+                                    DirectAccess.DynamicLDSGlobals.end()));
+  LDSParams.IndirectAccess.StaticLDSGlobals = sortByName(
+      std::vector<GlobalVariable *>(IndirectAccess.StaticLDSGlobals.begin(),
+                                    IndirectAccess.StaticLDSGlobals.end()));
+  LDSParams.IndirectAccess.DynamicLDSGlobals = sortByName(
+      std::vector<GlobalVariable *>(IndirectAccess.DynamicLDSGlobals.begin(),
+                                    IndirectAccess.DynamicLDSGlobals.end()));
+}
+
+bool AMDGPUSwLowerLDS::run() {
+  bool Changed = false;
+  CallGraph CG = CallGraph(M);
+  SetVector<Function *> KernelsWithIndirectLDSAccess;
+  FunctionVariableMap NonKernelToLDSAccessMap;
+  SetVector<GlobalVariable *> AllNonKernelLDSAccess;
+
+  Changed |= eliminateConstantExprUsesOfLDSFromAllInstructions(M);
+
+  // Get all the direct and indirect access of LDS for all the kernels.
+  LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M);
+
+  // Get the Uses of LDS from non-kernels.
+  getUsesOfLDSByNonKernels(CG, NonKernelToLDSAccessMap);
+
+  // Utility to group LDS access into direct, indirect, static and dynamic.
+  auto PopulateKernelStaticDynamicLDS = [&](FunctionVariableMap &LDSAccesses,
+                                            bool DirectAccess) {
+    for (auto &K : LDSAccesses) {
+      Function *F = K.first;
+      assert(isKernelLDS(F));
+
+      if (!KernelToLDSParametersMap.contains(F)) {
+        KernelLDSParameters KernelLDSParams;
+        KernelToLDSParametersMap[F] = KernelLDSParams;
+      }
+
+      auto &LDSParams = KernelToLDSParametersMap[F];
+      if (!DirectAccess)
+        KernelsWithIndirectLDSAccess.insert(F);
+      for (GlobalVariable *GV : K.second) {
+        if (!DirectAccess) {
+          if (AMDGPU::isDynamicLDS(*GV))
+            LDSParams.IndirectAccess.DynamicLDSGlobals.insert(GV);
+          else
+            LDSParams.IndirectAccess.StaticLDSGlobals.insert(GV);
+          AllNonKernelLDSAccess.insert(GV);
+        } else {
+          if (AMDGPU::isDynamicLDS(*GV))
+            LDSParams.DirectAccess.DynamicLDSGlobals.insert(GV);
+          else
+            LDSParams.DirectAccess.StaticLDSGlobals.insert(GV);
+        }
+      }
+    }
+  };
+
+  PopulateKernelStaticDynamicLDS(LDSUsesInfo.direct_access, true);
+  PopulateKernelStaticDynamicLDS(LDSUsesInfo.indirect_access, false);
+
+  for (auto &K : KernelToLDSParametersMap) {
+    Function *Func = K.first;
+    auto &LDSParams = KernelToLDSParametersMap[Func];
+    if (LDSParams.DirectAccess.StaticLDSGlobals.empty() &&
+        LDSParams.DirectAccess.DynamicLDSGlobals.empty() &&
+        LDSParams.IndirectAccess.StaticLDSGlobals.empty() &&
+        LDSParams.IndirectAccess.DynamicLDSGlobals.empty()) {
+      Changed = false;
+    } else {
+      removeFnAttrFromReachable(CG, Func, "amdgpu-no-workitem-id-x");
+      removeFnAttrFromReachable(CG, Func, "amdgpu-no-workitem-id-y");
+      removeFnAttrFromReachable(CG, Func, "amdgpu-no-workitem-id-z");
+      reorderStaticDynamicIndirectLDSSet(LDSParams);
+      populateSwLDSGlobal(Func);
+      populateSwMetadataGlobal(Func);
+      populateLDSToReplacementIndicesMap(Func);
+      DomTreeUpdater DTU(DTCallback(*Func),
+                         DomTreeUpdater::UpdateStrategy::Lazy);
+      lowerKernelLDSAccesses(Func, DTU);
+      Changed = true;
+    }
+  }
+
+  NonKernelLDSParameters NKLDSParams;
+  if (!NonKernelToLDSAccessMap.empty()) {
+    NKLDSParams.OrderedKernels = getOrderedIndirectLDSAccessingKernels(
+        std::move(KernelsWithIndirectLDSAccess));
+    NKLDSParams.OrdereLDSGlobals =
+        getOrderedNonKernelAllLDSGlobals(std::move(AllNonKernelLDSAccess));
+    assert(!NKLDSParams.OrderedKernels.empty());
+    assert(!NKLDSParams.OrdereLDSGlobals.empty());
+    buildNonKernelLDSBaseTable(NKLDSParams);
+    buildNonKernelLDSOffsetTable(NKLDSParams);
+    for (auto &K : NonKernelToLDSAccessMap) {
+      Function *Func = K.first;
+      DenseSet<GlobalVariable *> &LDSGlobals = K.second;
+      SetVector<GlobalVariable *> OrderedLDSGlobals = sortByName(
+          std::vector<GlobalVariable *>(LDSGlobals.begin(), LDSGlobals.end()));
+      lowerNonKernelLDSAccesses(Func, OrderedLDSGlobals, NKLDSParams);
+    }
+  }
+  return Changed;
+}
+
+class AMDGPUSwLowerLDSLegacy : public ModulePass {
+public:
+  static char ID;
+  AMDGPUSwLowerLDSLegacy() : ModulePass(ID) {}
+  bool runOnModule(Module &M) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addPreserved<DominatorTreeWrapperPass>();
+  }
+};
+} // namespace
+
+char AMDGPUSwLowerLDSLegacy::ID = 0;
+char &llvm::AMDGPUSwLowerLDSLegacyPassID = AMDGPUSwLowerLDSLegacy::ID;
+
+INITIALIZE_PASS(AMDGPUSwLowerLDSLegacy, "amdgpu-sw-lower-lds",
+                "AMDGPU Software lowering of LDS", false, false)
+
+bool AMDGPUSwLowerLDSLegacy::runOnModule(Module &M) {
+  DominatorTreeWrapperPass *const DTW =
+      getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+  auto DTCallback = [&DTW](Function &F) -> DominatorTree * {
+    return DTW ? &DTW->getDomTree() : nullptr;
+  };
+  bool IsChanged = false;
+  AMDGPUSwLowerLDS SwLowerLDSImpl(M, DTCallback);
+  IsChanged |= SwLowerLDSImpl.run();
+  return IsChanged;
+}
+
+ModulePass *llvm::createAMDGPUSwLowerLDSLegacyPass() {
+  return new AMDGPUSwLowerLDSLegacy();
+}
+
+PreservedAnalyses AMDGPUSwLowerLDSPass::run(Module &M,
+                                            ModuleAnalysisManager &AM) {
+  auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+  auto DTCallback = [&FAM](Function &F) -> DominatorTree * {
+    return &FAM.getResult<DominatorTreeAnalysis>(F);
+  };
+  bool IsChanged = false;
+  AMDGPUSwLowerLDS SwLowerLDSImpl(M, DTCallback);
+  IsChanged |= SwLowerLDSImpl.run();
+  if (!IsChanged)
+    return PreservedAnalyses::all();
+
+  PreservedAnalyses PA;
+  PA.preserve<DominatorTreeAnalysis>();
+  return PA;
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 305a6c8c3b926..6d75a634c82f0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -402,6 +402,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
   initializeSILoadStoreOptimizerPass(*PR);
   initializeAMDGPUCtorDtorLoweringLegacyPass(*PR);
   initializeAMDGPUAlwaysInlinePass(*PR);
+  initializeAMDGPUSwLowerLDSLegacyPass(*PR);
   initializeAMDGPUAttributorLegacyPass(*PR);
   initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
   initializeAMDGPUAnnotateUniformValuesPass(*PR);
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 48325a0928f93..139a416d50f29 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -73,6 +73,7 @@ add_llvm_target(AMDGPUCodeGen
   AMDGPULowerKernelArguments.cpp
   AMDGPULowerKernelAttributes.cpp
   AMDGPULowerModuleLDSPass.cpp
+  AMDGPUSwLowerLDS.cpp
   AMDGPUMachineCFGStructurizer.cpp
   AMDGPUMachineFunction.cpp
   AMDGPUMachineModuleInfo.cpp
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
index 4d3ad328e1310..c95a153417b6f 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
@@ -31,7 +31,7 @@ namespace AMDGPU {
 using FunctionVariableMap = DenseMap<Function *, DenseSet<GlobalVariable *>>;
 using VariableFunctionMap = DenseMap<GlobalVariable *, DenseSet<Function *>>;
 
-Align getAlign(const DataLayout &DL, const GlobalVariable *GV);
+Align getAlign(DataLayout const &DL, const GlobalVariable *GV);
 
 bool isDynamicLDS(const GlobalVariable &GV);
 bool isLDSVariableToLower(const GlobalVariable &GV);
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
new file mode 100644
index 0000000000000..3ac43caebd918
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
@@ -0,0 +1,100 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check indirect dynamic LDS access through a non-kernel from kernel is lowered correctly.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 2
+ at lds_3 = external addrspace(3) global [0 x i8], align 4
+ at lds_4 = external addrspace(3) global [0 x i8], align 8
+
+define void @use_variables() {
+; CHECK-LABEL: define void @use_variables() {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i8 3, ptr addrspace(3) @lds_3, align 4
+  store i8 3, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+
+define amdgpu_kernel void @k0() {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
+; CHECK-NEXT:    store i64 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP28:%.*]] = udiv i64 [[TMP19]], 8
+; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP28]], 8
+; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP27]], [[TMP8]]
+; CHECK-NEXT:    store i64 [[TMP12]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 8
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP29]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 8
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP29]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP18]], 8
+; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       22:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
+; CHECK-NEXT:    call void @use_variables()
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP25]], align 2
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP26:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP26]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables()
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+;.
+; CHECK: [[META0]] = !{i32 0}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
new file mode 100644
index 0000000000000..ecb02d5cb5ebc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
@@ -0,0 +1,63 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check if direct access of dynamic LDS in kernel is lowered correctly.
+ at lds_1 = external addrspace(3) global [0 x i8]
+ at lds_2 = external addrspace(3) global [0 x i8]
+
+define amdgpu_kernel void @k0() {
+; CHECK-LABEL: define amdgpu_kernel void @k0() {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP20]], i32 15
+; CHECK-NEXT:    store i64 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = load i64, ptr addrspace(4) [[TMP21]], align 8
+; CHECK-NEXT:    store i64 [[TMP16]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 1), align 8
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP16]], 0
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 1
+; CHECK-NEXT:    [[TMP22:%.*]] = mul i64 [[TMP18]], 1
+; CHECK-NEXT:    store i64 [[TMP22]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 2), align 8
+; CHECK-NEXT:    [[TMP23:%.*]] = add i64 0, [[TMP22]]
+; CHECK-NEXT:    store i64 [[TMP23]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(4) [[TMP21]], align 8
+; CHECK-NEXT:    store i64 [[TMP13]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = add i64 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP24:%.*]] = udiv i64 [[TMP14]], 1
+; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP24]], 1
+; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP23]], [[TMP15]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP19]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       19:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 4
+; CHECK-NEXT:    store i8 8, ptr addrspace(3) [[TMP11]], align 8
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP12]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  store i8 7, ptr addrspace(3) @lds_1, align 4
+  store i8 8, ptr addrspace(3) @lds_2, align 8
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
new file mode 100644
index 0000000000000..a31553a98138b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check when multiple  kernels access the same non-kernel, LDS accesses are lowere correctly.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 2
+ at lds_3 = external addrspace(3) global [0 x i8], align 4
+ at lds_4 = external addrspace(3) global [0 x i8], align 8
+
+define void @use_variables_1() {
+; CHECK-LABEL: define void @use_variables_1() {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 2
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 3
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i8 3, ptr addrspace(3) @lds_3, align 4
+  store i8 3, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+
+define void @use_variables_2() {
+; CHECK-LABEL: define void @use_variables_2() {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP15]], align 2
+; CHECK-NEXT:    ret void
+;
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+
+define amdgpu_kernel void @k0() {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 2), align 8
+; CHECK-NEXT:    [[TMP25:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
+; CHECK-NEXT:    store i64 [[TMP25]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP26:%.*]] = udiv i64 [[TMP19]], 8
+; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP26]], 8
+; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP25]], [[TMP8]]
+; CHECK-NEXT:    store i64 [[TMP12]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 8
+; CHECK-NEXT:    [[TMP27:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 8
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP27]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP18]], 8
+; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       22:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
+; CHECK-NEXT:    call void @use_variables_1()
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 1
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP24:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP24]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables_1()
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  ret void
+}
+
+define amdgpu_kernel void @k1() {
+; CHECK-LABEL: define amdgpu_kernel void @k1(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP20:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 0), align 8
+; CHECK-NEXT:    [[TMP21:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 2), align 8
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP20]], [[TMP21]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
+; CHECK-NEXT:    store i64 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP24:%.*]] = udiv i64 [[TMP19]], 8
+; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP24]], 8
+; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 2), align 8
+; CHECK-NEXT:    [[TMP26:%.*]] = add i64 [[TMP27]], [[TMP8]]
+; CHECK-NEXT:    store i64 [[TMP26]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 8
+; CHECK-NEXT:    [[TMP25:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP25]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 8
+; CHECK-NEXT:    [[TMP28:%.*]] = add i64 [[TMP25]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP28]], 8
+; CHECK-NEXT:    [[TMP29:%.*]] = mul i64 [[TMP18]], 8
+; CHECK-NEXT:    store i64 [[TMP29]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 2), align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = add i64 [[TMP26]], [[TMP29]]
+; CHECK-NEXT:    store i64 [[TMP30]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 0), align 8
+; CHECK-NEXT:    [[TMP33:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP33]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 1), align 8
+; CHECK-NEXT:    [[TMP34:%.*]] = add i64 [[TMP33]], 7
+; CHECK-NEXT:    [[TMP23:%.*]] = udiv i64 [[TMP34]], 8
+; CHECK-NEXT:    [[TMP22:%.*]] = mul i64 [[TMP23]], 8
+; CHECK-NEXT:    store i64 [[TMP22]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 2), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP30]], [[TMP22]]
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP12]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       27:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, i32 [[TMP15]]
+; CHECK-NEXT:    [[TMP31:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, i32 [[TMP31]]
+; CHECK-NEXT:    call void @use_variables_1()
+; CHECK-NEXT:    call void @use_variables_2()
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP16]], align 4
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP17:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP17]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables_1()
+  call void @use_variables_2()
+  store i8 3, ptr addrspace(3) @lds_3, align 4
+  ret void
+}
+;.
+; CHECK: [[META0]] = !{i32 0}
+; CHECK: [[META1]] = !{i32 1}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
new file mode 100644
index 0000000000000..5222c3ae528cc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
@@ -0,0 +1,79 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check malloc and free blocks are placed correctly when multiple
+; blocks and branching is present in the function with LDS accesses lowered correctly.
+
+ at lds_1 = internal addrspace(3) global i32 poison
+ at lds_2 = internal addrspace(3) global i32 poison
+
+define amdgpu_kernel void @test_kernel() {
+; CHECK-LABEL: define amdgpu_kernel void @test_kernel() {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP15:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 0), align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 2), align 8
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], [[TMP16]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP17]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       10:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, i32 [[TMP10]]
+; CHECK-NEXT:    [[TMP12:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr addrspace(1)
+; CHECK-NEXT:    [[VAL1:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = addrspacecast ptr addrspace(3) [[TMP11]] to ptr addrspace(1)
+; CHECK-NEXT:    [[VAL2:%.*]] = load i32, ptr addrspace(1) [[TMP13]], align 4
+; CHECK-NEXT:    [[RESULT:%.*]] = add i32 [[VAL1]], [[VAL2]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[RESULT]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[POSITIVE:%.*]], label [[NEGATIVE:%.*]]
+; CHECK:       positive:
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       negative:
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sgt i32 [[VAL1]], 0
+; CHECK-NEXT:    br i1 [[CMP2]], label [[VAL1_POSITIVE:%.*]], label [[VAL1_NEGATIVE:%.*]]
+; CHECK:       val1_positive:
+; CHECK-NEXT:    br label [[CONDFREE]]
+; CHECK:       val1_negative:
+; CHECK-NEXT:    br label [[CONDFREE]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP14:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP14]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+%val1 = load i32, ptr addrspace(1) addrspacecast (ptr addrspace(3) @lds_1 to ptr addrspace(1))
+%val2 = load i32, ptr addrspace(1) addrspacecast (ptr addrspace(3) @lds_2 to ptr addrspace(1))
+
+%result = add i32 %val1, %val2
+%cmp = icmp sgt i32 %result, 0
+br i1 %cmp, label %positive, label %negative
+
+positive:
+ret void
+
+negative:
+%cmp2 = icmp sgt i32 %val1, 0
+br i1 %cmp2, label %val1_positive, label %val1_negative
+
+val1_positive:
+ret void
+
+val1_negative:
+ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
new file mode 100644
index 0000000000000..a275314d8f6b5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
@@ -0,0 +1,101 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check if static and dynamic LDS accesses are lowered correctly when a non-kernel
+; is called from kernel.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 2
+ at lds_3 = external addrspace(3) global [0 x i8], align 4
+ at lds_4 = external addrspace(3) global [0 x i8], align 8
+
+define void @use_variables() {
+; CHECK-LABEL: define void @use_variables() {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    ret void
+;
+  store i8 3, ptr addrspace(3) @lds_3, align 4
+  store i8 3, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+
+define amdgpu_kernel void @k0() {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
+; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
+; CHECK-NEXT:    store i64 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP28:%.*]] = udiv i64 [[TMP19]], 8
+; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP28]], 8
+; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP27]], [[TMP8]]
+; CHECK-NEXT:    store i64 [[TMP12]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 8
+; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP29]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 8
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP29]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP18]], 8
+; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       22:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
+; CHECK-NEXT:    call void @use_variables()
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP25]], align 2
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP26:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP26]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables()
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+;.
+; CHECK: [[META0]] = !{i32 0}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
new file mode 100644
index 0000000000000..188e494da119e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check if static and dynamic LDS accesses are lowered correctly in kernel.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
+ at lds_3 = external addrspace(3) global [0 x i8], align 4
+ at lds_4 = external addrspace(3) global [0 x i8], align 8
+
+;.
+; CHECK: @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+; CHECK: @lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
+; CHECK: @lds_3 = external addrspace(3) global [0 x i8], align 4
+; CHECK: @lds_4 = external addrspace(3) global [0 x i8], align 8
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, align 8
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 4, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 0, i32 0 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 0, i32 0 } }, no_sanitize_address
+;.
+define amdgpu_kernel void @k0() {
+; CHECK-LABEL: define amdgpu_kernel void @k0() {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
+; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
+; CHECK-NEXT:    store i64 [[TMP31]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 8
+; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP32:%.*]] = udiv i64 [[TMP19]], 8
+; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP32]], 8
+; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP31]], [[TMP8]]
+; CHECK-NEXT:    store i64 [[TMP12]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 8
+; CHECK-NEXT:    [[TMP33:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP33]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 8
+; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP33]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP18]], 8
+; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       22:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
+; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
+; CHECK-NEXT:    [[TMP26:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP26]]
+; CHECK-NEXT:    [[TMP28:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP28]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 4
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP25]], align 8
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP27]], align 4
+; CHECK-NEXT:    store i8 8, ptr addrspace(3) [[TMP29]], align 8
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP30:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP30]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  store i8 7, ptr addrspace(3) @lds_1, align 4
+  store i32 8, ptr addrspace(3) @lds_2, align 8
+  store i8 7, ptr addrspace(3) @lds_3, align 4
+  store i8 8, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
new file mode 100644
index 0000000000000..3a3950c007cd3
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check if LDS accesses are lowered correctly when LDS is passed as function
+; argument to non-kernel.
+
+ at lds_var = internal addrspace(3) global [1024 x i32] poison, align 4
+
+define void @my_function(ptr addrspace(3) %lds_arg) {
+; CHECK-LABEL: define void @my_function(
+; CHECK-SAME: ptr addrspace(3) [[LDS_ARG:%.*]]) {
+; CHECK-NEXT:    [[LDS_VAL:%.*]] = load i32, ptr addrspace(3) [[LDS_ARG]], align 4
+; CHECK-NEXT:    [[NEW_LDS_VAL:%.*]] = add i32 [[LDS_VAL]], 1
+; CHECK-NEXT:    store i32 [[NEW_LDS_VAL]], ptr addrspace(3) [[LDS_ARG]], align 4
+; CHECK-NEXT:    ret void
+;
+  %lds_val = load i32, ptr addrspace(3) %lds_arg, align 4
+  %new_lds_val = add i32 %lds_val, 1
+  store i32 %new_lds_val, ptr addrspace(3) %lds_arg, align 4
+  ret void
+}
+
+define amdgpu_kernel void @my_kernel() {
+; CHECK-LABEL: define amdgpu_kernel void @my_kernel() {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 0, i32 2), align 8
+; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP13]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       10:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, i32 [[TMP8]]
+; CHECK-NEXT:    [[LDS_PTR:%.*]] = getelementptr [1024 x i32], ptr addrspace(3) [[TMP9]], i32 0, i32 0
+; CHECK-NEXT:    call void @my_function(ptr addrspace(3) [[LDS_PTR]])
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP10:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP10]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  %lds_ptr = getelementptr [1024 x i32], ptr addrspace(3) @lds_var, i32 0, i32 0
+  call void @my_function(ptr addrspace(3) %lds_ptr)
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
new file mode 100644
index 0000000000000..f5114a8554248
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
@@ -0,0 +1,220 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check if LDS accesses are lowered correctly when a call is made to nested non-kernel.
+
+ at A = external addrspace(3) global [8 x ptr]
+ at B = external addrspace(3) global [0 x i32]
+
+define amdgpu_kernel void @kernel_0() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_0(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 0, i32 2), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP11]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       10:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    call void @call_store_A()
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP8:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP8]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @call_store_A()
+  ret void
+}
+
+define amdgpu_kernel void @kernel_1() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_1(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
+; CHECK-NEXT:    store i64 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 1), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
+; CHECK-NEXT:    [[TMP10:%.*]] = udiv i64 [[TMP12]], 4
+; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP10]], 4
+; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 2), align 8
+; CHECK-NEXT:    [[TMP9:%.*]] = add i64 0, [[TMP8]]
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP9]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       14:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[PTR:%.*]] = call ptr @get_B_ptr()
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP15:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP15]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  %ptr = call ptr @get_B_ptr()
+  ret void
+}
+
+define amdgpu_kernel void @kernel_2() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_2(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 0, i32 2), align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP11]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       10:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    call void @store_A()
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP8:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP8]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @store_A()
+  ret void
+}
+
+define amdgpu_kernel void @kernel_3() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_3(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
+; CHECK-NEXT:    store i64 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, align 8
+; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
+; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 1), align 8
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
+; CHECK-NEXT:    [[TMP10:%.*]] = udiv i64 [[TMP12]], 4
+; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP10]], 4
+; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 2), align 8
+; CHECK-NEXT:    [[TMP9:%.*]] = add i64 0, [[TMP8]]
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP9]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       14:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[PTR:%.*]] = call ptr @get_B_ptr()
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP15:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP15]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  %ptr = call ptr @get_B_ptr()
+  ret void
+}
+
+define private void @call_store_A() {
+; CHECK-LABEL: define private void @call_store_A() {
+; CHECK-NEXT:    call void @store_A()
+; CHECK-NEXT:    ret void
+;
+  call void @store_A()
+  ret void
+}
+
+define private void @store_A() {
+; CHECK-LABEL: define private void @store_A() {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
+; CHECK-NEXT:    store ptr [[TMP10]], ptr null, align 8
+; CHECK-NEXT:    ret void
+;
+  store ptr addrspacecast (ptr addrspace(3) @A to ptr), ptr null
+  ret void
+}
+
+define private ptr @get_B_ptr() {
+; CHECK-LABEL: define private ptr @get_B_ptr() {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
+; CHECK-NEXT:    ret ptr [[TMP10]]
+;
+  ret ptr addrspacecast (ptr addrspace(3) @B to ptr)
+}
+;.
+; CHECK: [[META0]] = !{i32 0}
+; CHECK: [[META1]] = !{i32 1}
+; CHECK: [[META2]] = !{i32 2}
+; CHECK: [[META3]] = !{i32 3}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
new file mode 100644
index 0000000000000..ac082dda0f10a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
@@ -0,0 +1,85 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check if static LDS is lowered correctly when a non-kernel with LDS accesses is called from kernel.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 2
+ at lds_3 = external addrspace(3) global [3 x i8], align 4
+ at lds_4 = external addrspace(3) global [4 x i8], align 8
+
+define void @use_variables() {
+; CHECK-LABEL: define void @use_variables() {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    [[X:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
+; CHECK-NEXT:    [[TMP16:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
+; CHECK-NEXT:    store i8 3, ptr [[TMP16]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
+; CHECK-NEXT:    ret void
+;
+  %X = addrspacecast ptr addrspace(3) @lds_3 to ptr
+  store i8 3, ptr addrspacecast( ptr addrspace(3) @lds_3 to ptr), align 4
+  store i8 3, ptr addrspace(3) @lds_4, align 8
+  ret void
+}
+
+define amdgpu_kernel void @k0() {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP15]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       10:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
+; CHECK-NEXT:    call void @use_variables()
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP11]], align 2
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP12]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  call void @use_variables()
+  store i8 7, ptr addrspace(3) @lds_1, align 1
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+;.
+; CHECK: [[META0]] = !{i32 0}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
new file mode 100644
index 0000000000000..928e83a1f30b5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+
+; Test to check if static LDS accesses in kernel are lowered correctly.
+ at lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+ at lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
+
+;.
+; CHECK: @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
+; CHECK: @lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, align 8
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 4, i32 8 } }, no_sanitize_address
+;.
+define amdgpu_kernel void @k0() {
+; CHECK-LABEL: define amdgpu_kernel void @k0() {
+; CHECK-NEXT:  WId:
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
+; CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
+; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
+; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
+; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP15]])
+; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    br label [[TMP7]]
+; CHECK:       10:
+; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 4
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP11]], align 2
+; CHECK-NEXT:    br label [[CONDFREE:%.*]]
+; CHECK:       CondFree:
+; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
+; CHECK:       Free:
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    call void @free(ptr [[TMP12]])
+; CHECK-NEXT:    br label [[END]]
+; CHECK:       End:
+; CHECK-NEXT:    ret void
+;
+  store i8 7, ptr addrspace(3) @lds_1, align 4
+  store i32 8, ptr addrspace(3) @lds_2, align 2
+  ret void
+}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.

>From f2f41388d1f1d39380c8ffefba610d482f981b38 Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Fri, 10 May 2024 12:56:51 +0530
Subject: [PATCH 2/3] [AMDGPU] Update PR as per review comments:1

---
 llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp   | 164 ++++++++++--------
 .../Target/AMDGPU/Utils/AMDGPUMemoryUtils.h   |   2 +-
 ...pu-sw-lower-lds-dynamic-indirect-access.ll |  56 +++---
 .../amdgpu-sw-lower-lds-dynamic-lds-test.ll   |  56 +++---
 ...ds-multi-static-dynamic-indirect-access.ll | 124 ++++++-------
 ...gpu-sw-lower-lds-multiple-blocks-return.ll |  29 +++-
 ...ower-lds-static-dynamic-indirect-access.ll |  56 +++---
 ...pu-sw-lower-lds-static-dynamic-lds-test.ll |  57 +++---
 ...s-static-indirect-access-function-param.ll |  20 ++-
 ...lower-lds-static-indirect-access-nested.ll |  93 +++++-----
 ...gpu-sw-lower-lds-static-indirect-access.ll |  22 ++-
 .../amdgpu-sw-lower-lds-static-lds-test.ll    |  31 ++--
 12 files changed, 398 insertions(+), 312 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
index 3b951e72aabed..b24a0f3f00c00 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
@@ -95,6 +95,7 @@
 #include <algorithm>
 
 #define DEBUG_TYPE "amdgpu-sw-lower-lds"
+#define COV5_HIDDEN_DYN_LDS_SIZE_ARG 15
 
 using namespace llvm;
 using namespace AMDGPU;
@@ -153,6 +154,10 @@ class AMDGPUSwLowerLDS {
   void lowerNonKernelLDSAccesses(Function *Func,
                                  SetVector<GlobalVariable *> &LDSGlobals,
                                  NonKernelLDSParameters &NKLDSParams);
+  void
+  updateMallocSizeForDynamicLDS(Function *Func, Value *CurrMallocSize,
+                                Value *HiddenDynLDSSize,
+                                SetVector<GlobalVariable *> &DynamicLDSGlobals);
 
 private:
   Module &M;
@@ -195,7 +200,6 @@ SetVector<Function *> AMDGPUSwLowerLDS::getOrderedIndirectLDSAccessingKernels(
     Function *Func = OrderedKernels[i];
     Func->setMetadata("llvm.amdgcn.lds.kernel.id",
                       MDNode::get(Ctx, AttrMDArgs));
-    auto &LDSParams = KernelToLDSParametersMap[Func];
   }
   return std::move(OrderedKernels);
 }
@@ -232,6 +236,9 @@ void AMDGPUSwLowerLDS::populateSwLDSGlobal(Function *Func) {
       M, IRB.getPtrTy(), false, GlobalValue::InternalLinkage,
       PoisonValue::get(IRB.getPtrTy()), "llvm.amdgcn.sw.lds." + Func->getName(),
       nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, false);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  LDSParams.SwLDS->setSanitizerMetadata(MD);
   return;
 }
 
@@ -265,7 +272,7 @@ void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
   //{StartOffset, AlignedSizeInBytes}
   SmallString<128> MDItemStr;
   raw_svector_ostream MDItemOS(MDItemStr);
-  MDItemOS << "llvm.amdgcn.sw.lds." << Func->getName().str() << ".md.item";
+  MDItemOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md.item";
 
   StructType *LDSItemTy =
       StructType::create(Ctx, {Int32Ty, Int32Ty, Int32Ty}, MDItemOS.str());
@@ -296,13 +303,13 @@ void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
 
   SmallString<128> MDTypeStr;
   raw_svector_ostream MDTypeOS(MDTypeStr);
-  MDTypeOS << "llvm.amdgcn.sw.lds." << Func->getName().str() << ".md.type";
+  MDTypeOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md.type";
 
   StructType *MetadataStructType =
       StructType::create(Ctx, Items, MDTypeOS.str());
   SmallString<128> MDStr;
   raw_svector_ostream MDOS(MDStr);
-  MDOS << "llvm.amdgcn.sw.lds." << Func->getName().str() << ".md";
+  MDOS << "llvm.amdgcn.sw.lds." << Func->getName() << ".md";
   LDSParams.SwLDSMetadata = new GlobalVariable(
       M, MetadataStructType, false, GlobalValue::InternalLinkage,
       PoisonValue::get(MetadataStructType), MDOS.str(), nullptr,
@@ -387,8 +394,7 @@ void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
       Value *Load = IRB.CreateLoad(Int32Ty, GEP);
       Value *BasePlusOffset =
           IRB.CreateInBoundsGEP(IRB.getInt8Ty(), SwLDS, {Load});
-      LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replacing LDS "
-                        << GV->getName().str());
+      LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replacing LDS " << GV->getName());
       replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
     }
   };
@@ -398,10 +404,57 @@ void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
   ReplaceLDSGlobalUses(IndirectAccess.DynamicLDSGlobals);
 }
 
+void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
+    Function *Func, Value *CurrMallocSize, Value *HiddenDynLDSSize,
+    SetVector<GlobalVariable *> &DynamicLDSGlobals) {
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  Type *Int32Ty = IRB.getInt32Ty();
+
+  GlobalVariable *SwLDS = LDSParams.SwLDS;
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDS && SwLDSMetadata);
+  StructType *MetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
+  unsigned MaxAlignment = SwLDS->getAlignment();
+  Value *MaxAlignValue = IRB.getInt32(MaxAlignment);
+  Value *MaxAlignValueMinusOne = IRB.getInt32(MaxAlignment - 1);
+
+  for (GlobalVariable *DynGV : DynamicLDSGlobals) {
+    auto &Indices = LDSParams.LDSToReplacementIndicesMap[DynGV];
+    // Update the Offset metadata.
+    Constant *Index0 = ConstantInt::get(Int32Ty, 0);
+    Constant *Index1 = ConstantInt::get(Int32Ty, Indices[1]);
+
+    Constant *Index2Offset = ConstantInt::get(Int32Ty, 0);
+    auto *GEPForOffset = IRB.CreateInBoundsGEP(
+        MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2Offset});
+
+    IRB.CreateStore(CurrMallocSize, GEPForOffset);
+    // Update the size and Aligned Size metadata.
+    Constant *Index2Size = ConstantInt::get(Int32Ty, 1);
+    auto *GEPForSize = IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
+                                             {Index0, Index1, Index2Size});
+
+    Value *CurrDynLDSSize = IRB.CreateLoad(Int32Ty, HiddenDynLDSSize);
+    IRB.CreateStore(CurrDynLDSSize, GEPForSize);
+    Constant *Index2AlignedSize = ConstantInt::get(Int32Ty, 1);
+    auto *GEPForAlignedSize = IRB.CreateInBoundsGEP(
+        MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2AlignedSize});
+
+    Value *AlignedDynLDSSize =
+        IRB.CreateAdd(CurrDynLDSSize, MaxAlignValueMinusOne);
+    AlignedDynLDSSize = IRB.CreateUDiv(AlignedDynLDSSize, MaxAlignValue);
+    AlignedDynLDSSize = IRB.CreateMul(AlignedDynLDSSize, MaxAlignValue);
+    IRB.CreateStore(AlignedDynLDSSize, GEPForAlignedSize);
+
+    // Update the Current Malloc Size
+    CurrMallocSize = IRB.CreateAdd(CurrMallocSize, AlignedDynLDSSize);
+  }
+}
+
 void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
                                               DomTreeUpdater &DTU) {
-  LLVM_DEBUG(dbgs() << "Sw Lowering Kernel LDS for : "
-                    << Func->getName().str());
+  LLVM_DEBUG(dbgs() << "Sw Lowering Kernel LDS for : " << Func->getName());
   auto &LDSParams = KernelToLDSParametersMap[Func];
   auto &Ctx = M.getContext();
   auto *PrevEntryBlock = &Func->getEntryBlock();
@@ -423,12 +476,6 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   auto *const XYZOr = IRB.CreateOr(XYOr, WIdz);
   auto *const WIdzCond = IRB.CreateICmpEQ(XYZOr, IRB.getInt32(0));
 
-  GlobalVariable *SwLDS = LDSParams.SwLDS;
-  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
-  assert(SwLDS && SwLDSMetadata);
-  StructType *MetadataStructType =
-      cast<StructType>(SwLDSMetadata->getValueType());
-
   // All work items will branch to PrevEntryBlock except {0,0,0} index
   // work item which will branch to malloc block.
   IRB.CreateCondBr(WIdzCond, MallocBlock, PrevEntryBlock);
@@ -439,8 +486,15 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   // If Dynamic LDS globals are accessed by the kernel,
   // Get the size of dyn lds from hidden dyn_lds_size kernel arg.
   // Update the corresponding metadata global entries for this dyn lds global.
+  GlobalVariable *SwLDS = LDSParams.SwLDS;
+  GlobalVariable *SwLDSMetadata = LDSParams.SwLDSMetadata;
+  assert(SwLDS && SwLDSMetadata);
+  StructType *MetadataStructType =
+      cast<StructType>(SwLDSMetadata->getValueType());
   uint32_t MallocSize = 0;
   Value *CurrMallocSize;
+  Type *Int32Ty = IRB.getInt32Ty();
+  Type *Int64Ty = IRB.getInt64Ty();
 
   unsigned NumStaticLDS = LDSParams.DirectAccess.StaticLDSGlobals.size() +
                           LDSParams.IndirectAccess.StaticLDSGlobals.size();
@@ -448,70 +502,40 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
                        LDSParams.IndirectAccess.DynamicLDSGlobals.size();
 
   if (NumStaticLDS) {
-    auto *GEPForEndStaticLDSOffset = IRB.CreateInBoundsGEP(
-        MetadataStructType, SwLDSMetadata,
-        {IRB.getInt32(0), IRB.getInt32(NumStaticLDS - 1), IRB.getInt32(0)});
-
-    auto *GEPForEndStaticLDSSize = IRB.CreateInBoundsGEP(
-        MetadataStructType, SwLDSMetadata,
-        {IRB.getInt32(0), IRB.getInt32(NumStaticLDS - 1), IRB.getInt32(2)});
+    auto *GEPForEndStaticLDSOffset =
+        IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
+                              {ConstantInt::get(Int32Ty, 0),
+                               ConstantInt::get(Int32Ty, NumStaticLDS - 1),
+                               ConstantInt::get(Int32Ty, 0)});
+
+    auto *GEPForEndStaticLDSSize =
+        IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
+                              {ConstantInt::get(Int32Ty, 0),
+                               ConstantInt::get(Int32Ty, NumStaticLDS - 1),
+                               ConstantInt::get(Int32Ty, 2)});
 
     Value *EndStaticLDSOffset =
-        IRB.CreateLoad(IRB.getInt64Ty(), GEPForEndStaticLDSOffset);
-    Value *EndStaticLDSSize =
-        IRB.CreateLoad(IRB.getInt64Ty(), GEPForEndStaticLDSSize);
+        IRB.CreateLoad(Int32Ty, GEPForEndStaticLDSOffset);
+    Value *EndStaticLDSSize = IRB.CreateLoad(Int32Ty, GEPForEndStaticLDSSize);
     CurrMallocSize = IRB.CreateAdd(EndStaticLDSOffset, EndStaticLDSSize);
   } else
-    CurrMallocSize = IRB.getInt64(MallocSize);
+    CurrMallocSize = IRB.getInt32(MallocSize);
 
   if (NumDynLDS) {
-    unsigned MaxAlignment = SwLDS->getAlignment();
-    Value *MaxAlignValue = IRB.getInt64(MaxAlignment);
-    Value *MaxAlignValueMinusOne = IRB.getInt64(MaxAlignment - 1);
-
+    // Get size from hidden dyn_lds_size argument of kernel
     Value *ImplicitArg =
         IRB.CreateIntrinsic(Intrinsic::amdgcn_implicitarg_ptr, {}, {});
     Value *HiddenDynLDSSize = IRB.CreateInBoundsGEP(
-        ImplicitArg->getType(), ImplicitArg, {IRB.getInt32(15)});
-
-    auto MallocSizeCalcLambda =
-        [&](SetVector<GlobalVariable *> &DynamicLDSGlobals) {
-          for (GlobalVariable *DynGV : DynamicLDSGlobals) {
-            auto &Indices = LDSParams.LDSToReplacementIndicesMap[DynGV];
-
-            // Update the Offset metadata.
-            auto *GEPForOffset = IRB.CreateInBoundsGEP(
-                MetadataStructType, SwLDSMetadata,
-                {IRB.getInt32(0), IRB.getInt32(Indices[1]), IRB.getInt32(0)});
-            IRB.CreateStore(CurrMallocSize, GEPForOffset);
-
-            // Get size from hidden dyn_lds_size argument of kernel
-            // Update the size and Aligned Size metadata.
-            auto *GEPForSize = IRB.CreateInBoundsGEP(
-                MetadataStructType, SwLDSMetadata,
-                {IRB.getInt32(0), IRB.getInt32(Indices[1]), IRB.getInt32(1)});
-            Value *CurrDynLDSSize =
-                IRB.CreateLoad(IRB.getInt64Ty(), HiddenDynLDSSize);
-            IRB.CreateStore(CurrDynLDSSize, GEPForSize);
-
-            auto *GEPForAlignedSize = IRB.CreateInBoundsGEP(
-                MetadataStructType, SwLDSMetadata,
-                {IRB.getInt32(0), IRB.getInt32(Indices[1]), IRB.getInt32(2)});
-            Value *AlignedDynLDSSize =
-                IRB.CreateAdd(CurrDynLDSSize, MaxAlignValueMinusOne);
-            AlignedDynLDSSize =
-                IRB.CreateUDiv(AlignedDynLDSSize, MaxAlignValue);
-            AlignedDynLDSSize = IRB.CreateMul(AlignedDynLDSSize, MaxAlignValue);
-            IRB.CreateStore(AlignedDynLDSSize, GEPForAlignedSize);
-
-            // Update the Current Malloc Size
-            CurrMallocSize = IRB.CreateAdd(CurrMallocSize, AlignedDynLDSSize);
-          }
-        };
-    MallocSizeCalcLambda(LDSParams.DirectAccess.DynamicLDSGlobals);
-    MallocSizeCalcLambda(LDSParams.IndirectAccess.DynamicLDSGlobals);
+        ImplicitArg->getType(), ImplicitArg,
+        {ConstantInt::get(Int64Ty, COV5_HIDDEN_DYN_LDS_SIZE_ARG)});
+    updateMallocSizeForDynamicLDS(Func, CurrMallocSize, HiddenDynLDSSize,
+                                  LDSParams.DirectAccess.DynamicLDSGlobals);
+    updateMallocSizeForDynamicLDS(Func, CurrMallocSize, HiddenDynLDSSize,
+                                  LDSParams.IndirectAccess.DynamicLDSGlobals);
   }
 
+  CurrMallocSize = IRB.CreateZExt(CurrMallocSize, Int64Ty);
+
   // Create a call to malloc function which does device global memory allocation
   // with size equals to all LDS global accesses size  in this kernel.
   FunctionCallee AMDGPUMallocFunc = M.getOrInsertFunction(
@@ -679,7 +703,7 @@ void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
   // Replace LDS access in non-kernel with replacement queried from
   // Base table and offset from offset table.
   LLVM_DEBUG(dbgs() << "Sw LDS lowering, lower non-kernel access for : "
-                    << Func->getName().str());
+                    << Func->getName());
   auto *EntryBlock = &Func->getEntryBlock();
   IRB.SetInsertPoint(EntryBlock, EntryBlock->begin());
   Function *Decl =
@@ -752,6 +776,8 @@ bool AMDGPUSwLowerLDS::run() {
     for (auto &K : LDSAccesses) {
       Function *F = K.first;
       assert(isKernelLDS(F));
+      if (!F->hasFnAttribute(Attribute::SanitizeAddress))
+        continue;
 
       if (!KernelToLDSParametersMap.contains(F)) {
         KernelLDSParameters KernelLDSParams;
@@ -816,6 +842,8 @@ bool AMDGPUSwLowerLDS::run() {
     buildNonKernelLDSOffsetTable(NKLDSParams);
     for (auto &K : NonKernelToLDSAccessMap) {
       Function *Func = K.first;
+      if (!Func->hasFnAttribute(Attribute::SanitizeAddress))
+        continue;
       DenseSet<GlobalVariable *> &LDSGlobals = K.second;
       SetVector<GlobalVariable *> OrderedLDSGlobals = sortByName(
           std::vector<GlobalVariable *>(LDSGlobals.begin(), LDSGlobals.end()));
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
index c95a153417b6f..4d3ad328e1310 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
@@ -31,7 +31,7 @@ namespace AMDGPU {
 using FunctionVariableMap = DenseMap<Function *, DenseSet<GlobalVariable *>>;
 using VariableFunctionMap = DenseMap<GlobalVariable *, DenseSet<Function *>>;
 
-Align getAlign(DataLayout const &DL, const GlobalVariable *GV);
+Align getAlign(const DataLayout &DL, const GlobalVariable *GV);
 
 bool isDynamicLDS(const GlobalVariable &GV);
 bool isLDSVariableToLower(const GlobalVariable &GV);
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
index 3ac43caebd918..587a1b78a0aaf 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check indirect dynamic LDS access through a non-kernel from kernel is lowered correctly.
 @lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
@@ -7,8 +7,11 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
-define void @use_variables() {
-; CHECK-LABEL: define void @use_variables() {
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [1 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k0 to i32)]
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [1 x [2 x i32]] [[2 x i32] [i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0) to i32)]]
+define void @use_variables() sanitize_address {
+; CHECK-LABEL: define void @use_variables(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
@@ -33,9 +36,9 @@ define void @use_variables() {
   ret void
 }
 
-define amdgpu_kernel void @k0() {
+define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -45,31 +48,32 @@ define amdgpu_kernel void @k0() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
-; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
-; CHECK-NEXT:    store i64 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 8
-; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
-; CHECK-NEXT:    [[TMP28:%.*]] = udiv i64 [[TMP19]], 8
-; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP28]], 8
-; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP27]], [[TMP8]]
-; CHECK-NEXT:    store i64 [[TMP12]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 8
-; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP29]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 8
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP29]], 7
-; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 8
-; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP18]], 8
-; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 8
-; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP27:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP27]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP28:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP8]] to i64
 ; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       22:
+; CHECK:       23:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
index ecb02d5cb5ebc..8de146b396c30 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
@@ -1,12 +1,19 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if direct access of dynamic LDS in kernel is lowered correctly.
 @lds_1 = external addrspace(3) global [0 x i8]
 @lds_2 = external addrspace(3) global [0 x i8]
 
-define amdgpu_kernel void @k0() {
-; CHECK-LABEL: define amdgpu_kernel void @k0() {
+;.
+; CHECK: @lds_1 = external addrspace(3) global [0 x i8]
+; CHECK: @lds_2 = external addrspace(3) global [0 x i8]
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 1
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type zeroinitializer, no_sanitize_address
+;.
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -17,24 +24,24 @@ define amdgpu_kernel void @k0() {
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
 ; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP20]], i32 15
-; CHECK-NEXT:    store i64 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 8
-; CHECK-NEXT:    [[TMP16:%.*]] = load i64, ptr addrspace(4) [[TMP21]], align 8
-; CHECK-NEXT:    store i64 [[TMP16]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 1), align 8
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP16]], 0
-; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 1
-; CHECK-NEXT:    [[TMP22:%.*]] = mul i64 [[TMP18]], 1
-; CHECK-NEXT:    store i64 [[TMP22]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 2), align 8
-; CHECK-NEXT:    [[TMP23:%.*]] = add i64 0, [[TMP22]]
-; CHECK-NEXT:    store i64 [[TMP23]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
-; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(4) [[TMP21]], align 8
-; CHECK-NEXT:    store i64 [[TMP13]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 8
-; CHECK-NEXT:    [[TMP14:%.*]] = add i64 [[TMP13]], 0
-; CHECK-NEXT:    [[TMP24:%.*]] = udiv i64 [[TMP14]], 1
-; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP24]], 1
-; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
-; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP23]], [[TMP15]]
-; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP19]])
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP20]], i64 15
+; CHECK-NEXT:    store i32 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr addrspace(4) [[TMP18]], align 4
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 1), align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP19]], 0
+; CHECK-NEXT:    [[TMP22:%.*]] = udiv i32 [[TMP21]], 1
+; CHECK-NEXT:    [[TMP23:%.*]] = mul i32 [[TMP22]], 1
+; CHECK-NEXT:    store i32 [[TMP23]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 1), align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = add i32 0, [[TMP23]]
+; CHECK-NEXT:    store i32 [[TMP24]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(4) [[TMP18]], align 4
+; CHECK-NEXT:    store i32 [[TMP13]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP13]], 0
+; CHECK-NEXT:    [[TMP15:%.*]] = udiv i32 [[TMP14]], 1
+; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], 1
+; CHECK-NEXT:    store i32 [[TMP16]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP24]], [[TMP16]]
+; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 0)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
 ; CHECK:       19:
@@ -61,3 +68,8 @@ define amdgpu_kernel void @k0() {
   store i8 8, ptr addrspace(3) @lds_2, align 8
   ret void
 }
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
index a31553a98138b..99f911ef48f93 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check when multiple  kernels access the same non-kernel, LDS accesses are lowere correctly.
 @lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
@@ -7,8 +7,11 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
-define void @use_variables_1() {
-; CHECK-LABEL: define void @use_variables_1() {
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [2 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k0 to i32), i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k1 to i32)]
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [2 x [4 x i32]] [[4 x i32] [i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md to i32), i32 poison, i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0) to i32)], [4 x i32] [i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 0) to i32)]]
+define void @use_variables_1() sanitize_address {
+; CHECK-LABEL: define void @use_variables_1(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
@@ -33,8 +36,9 @@ define void @use_variables_1() {
   ret void
 }
 
-define void @use_variables_2() {
-; CHECK-LABEL: define void @use_variables_2() {
+define void @use_variables_2() sanitize_address {
+; CHECK-LABEL: define void @use_variables_2(
+; CHECK-SAME: ) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
@@ -59,9 +63,9 @@ define void @use_variables_2() {
   ret void
 }
 
-define amdgpu_kernel void @k0() {
+define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -71,31 +75,32 @@ define amdgpu_kernel void @k0() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 2), align 8
-; CHECK-NEXT:    [[TMP25:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
-; CHECK-NEXT:    store i64 [[TMP25]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 8
-; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
-; CHECK-NEXT:    [[TMP26:%.*]] = udiv i64 [[TMP19]], 8
-; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP26]], 8
-; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP25]], [[TMP8]]
-; CHECK-NEXT:    store i64 [[TMP12]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 8
-; CHECK-NEXT:    [[TMP27:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 8
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP27]], 7
-; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 8
-; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP18]], 8
-; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 8
-; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP25]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP25]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP26:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP8]] to i64
 ; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       22:
+; CHECK:       23:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
@@ -118,9 +123,9 @@ define amdgpu_kernel void @k0() {
   ret void
 }
 
-define amdgpu_kernel void @k1() {
+define amdgpu_kernel void @k1() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k1(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -130,39 +135,40 @@ define amdgpu_kernel void @k1() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP20:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 0), align 8
-; CHECK-NEXT:    [[TMP21:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 2), align 8
-; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP20]], [[TMP21]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
-; CHECK-NEXT:    store i64 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 8
-; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
-; CHECK-NEXT:    [[TMP24:%.*]] = udiv i64 [[TMP19]], 8
-; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP24]], 8
-; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 2), align 8
-; CHECK-NEXT:    [[TMP26:%.*]] = add i64 [[TMP27]], [[TMP8]]
-; CHECK-NEXT:    store i64 [[TMP26]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 8
-; CHECK-NEXT:    [[TMP25:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP25]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 8
-; CHECK-NEXT:    [[TMP28:%.*]] = add i64 [[TMP25]], 7
-; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP28]], 8
-; CHECK-NEXT:    [[TMP29:%.*]] = mul i64 [[TMP18]], 8
-; CHECK-NEXT:    store i64 [[TMP29]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 2), align 8
-; CHECK-NEXT:    [[TMP30:%.*]] = add i64 [[TMP26]], [[TMP29]]
-; CHECK-NEXT:    store i64 [[TMP30]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 0), align 8
-; CHECK-NEXT:    [[TMP33:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP33]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 1), align 8
-; CHECK-NEXT:    [[TMP34:%.*]] = add i64 [[TMP33]], 7
-; CHECK-NEXT:    [[TMP23:%.*]] = udiv i64 [[TMP34]], 8
-; CHECK-NEXT:    [[TMP22:%.*]] = mul i64 [[TMP23]], 8
-; CHECK-NEXT:    store i64 [[TMP22]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 2), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP30]], [[TMP22]]
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP26:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP27:%.*]] = udiv i32 [[TMP26]], 8
+; CHECK-NEXT:    [[TMP28:%.*]] = mul i32 [[TMP27]], 8
+; CHECK-NEXT:    store i32 [[TMP28]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP29:%.*]] = add i32 [[TMP8]], [[TMP28]]
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP30:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP30]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP33:%.*]] = add i32 [[TMP30]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP33]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP8]], [[TMP19]]
+; CHECK-NEXT:    store i32 [[TMP20]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP21]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 1), align 4
+; CHECK-NEXT:    [[TMP22:%.*]] = add i32 [[TMP21]], 7
+; CHECK-NEXT:    [[TMP23:%.*]] = udiv i32 [[TMP22]], 8
+; CHECK-NEXT:    [[TMP24:%.*]] = mul i32 [[TMP23]], 8
+; CHECK-NEXT:    store i32 [[TMP24]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 1), align 4
+; CHECK-NEXT:    [[TMP25:%.*]] = add i32 [[TMP20]], [[TMP24]]
+; CHECK-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP8]] to i64
 ; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP12]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
 ; CHECK-NEXT:    br label [[TMP14]]
-; CHECK:       27:
+; CHECK:       28:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
index 5222c3ae528cc..7e6125503d649 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
@@ -1,5 +1,5 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check malloc and free blocks are placed correctly when multiple
 ; blocks and branching is present in the function with LDS accesses lowered correctly.
@@ -7,8 +7,15 @@
 @lds_1 = internal addrspace(3) global i32 poison
 @lds_2 = internal addrspace(3) global i32 poison
 
-define amdgpu_kernel void @test_kernel() {
-; CHECK-LABEL: define amdgpu_kernel void @test_kernel() {
+;.
+; CHECK: @lds_1 = internal addrspace(3) global i32 poison
+; CHECK: @lds_2 = internal addrspace(3) global i32 poison
+; CHECK: @llvm.amdgcn.sw.lds.test_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4
+; CHECK: @llvm.amdgcn.sw.lds.test_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.test_kernel.md.type { %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 0, i32 4, i32 4 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 4, i32 4, i32 4 } }, no_sanitize_address
+;.
+define amdgpu_kernel void @test_kernel() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @test_kernel(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -18,13 +25,14 @@ define amdgpu_kernel void @test_kernel() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP15:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 0), align 8
-; CHECK-NEXT:    [[TMP16:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 2), align 8
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], [[TMP16]]
+; CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP15]], [[TMP16]]
+; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP18]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP17]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       10:
+; CHECK:       11:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, align 4
@@ -77,3 +85,8 @@ ret void
 val1_negative:
 ret void
 }
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
index a275314d8f6b5..aa22cfaa387f4 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if static and dynamic LDS accesses are lowered correctly when a non-kernel
 ; is called from kernel.
@@ -8,8 +8,11 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
-define void @use_variables() {
-; CHECK-LABEL: define void @use_variables() {
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [1 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k0 to i32)]
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [1 x [2 x i32]] [[2 x i32] [i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0) to i32)]]
+define void @use_variables() sanitize_address {
+; CHECK-LABEL: define void @use_variables(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
@@ -34,9 +37,9 @@ define void @use_variables() {
   ret void
 }
 
-define amdgpu_kernel void @k0() {
+define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -46,31 +49,32 @@ define amdgpu_kernel void @k0() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
-; CHECK-NEXT:    [[TMP27:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
-; CHECK-NEXT:    store i64 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 8
-; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
-; CHECK-NEXT:    [[TMP28:%.*]] = udiv i64 [[TMP19]], 8
-; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP28]], 8
-; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP27]], [[TMP8]]
-; CHECK-NEXT:    store i64 [[TMP12]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 8
-; CHECK-NEXT:    [[TMP29:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP29]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 8
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP29]], 7
-; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 8
-; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP18]], 8
-; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 8
-; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP27:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP27]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP28:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP8]] to i64
 ; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       22:
+; CHECK:       23:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
index 188e494da119e..6d93c6a90b10b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if static and dynamic LDS accesses are lowered correctly in kernel.
 @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
@@ -12,11 +12,12 @@
 ; CHECK: @lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
 ; CHECK: @lds_3 = external addrspace(3) global [0 x i8], align 4
 ; CHECK: @lds_4 = external addrspace(3) global [0 x i8], align 8
-; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, align 8
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8
 ; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 4, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 0, i32 0 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 0, i32 0 } }, no_sanitize_address
 ;.
-define amdgpu_kernel void @k0() {
-; CHECK-LABEL: define amdgpu_kernel void @k0() {
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -26,31 +27,32 @@ define amdgpu_kernel void @k0() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
-; CHECK-NEXT:    [[TMP31:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
-; CHECK-NEXT:    store i64 [[TMP31]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 8
-; CHECK-NEXT:    [[TMP19:%.*]] = add i64 [[TMP11]], 7
-; CHECK-NEXT:    [[TMP32:%.*]] = udiv i64 [[TMP19]], 8
-; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP32]], 8
-; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP31]], [[TMP8]]
-; CHECK-NEXT:    store i64 [[TMP12]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 8
-; CHECK-NEXT:    [[TMP33:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP33]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 8
-; CHECK-NEXT:    [[TMP17:%.*]] = add i64 [[TMP33]], 7
-; CHECK-NEXT:    [[TMP18:%.*]] = udiv i64 [[TMP17]], 8
-; CHECK-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP18]], 8
-; CHECK-NEXT:    store i64 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 8
-; CHECK-NEXT:    [[TMP16:%.*]] = add i64 [[TMP12]], [[TMP15]]
+; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
+; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
+; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP31:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
+; CHECK-NEXT:    store i32 [[TMP31]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP31]], 7
+; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
+; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    [[TMP32:%.*]] = add i32 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP8]] to i64
 ; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       22:
+; CHECK:       23:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
@@ -83,6 +85,7 @@ define amdgpu_kernel void @k0() {
   ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
index 3a3950c007cd3..0ca8a51c049f2 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
@@ -1,14 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if LDS accesses are lowered correctly when LDS is passed as function
 ; argument to non-kernel.
 
 @lds_var = internal addrspace(3) global [1024 x i32] poison, align 4
 
-define void @my_function(ptr addrspace(3) %lds_arg) {
+define void @my_function(ptr addrspace(3) %lds_arg) sanitize_address {
 ; CHECK-LABEL: define void @my_function(
-; CHECK-SAME: ptr addrspace(3) [[LDS_ARG:%.*]]) {
+; CHECK-SAME: ptr addrspace(3) [[LDS_ARG:%.*]]) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[LDS_VAL:%.*]] = load i32, ptr addrspace(3) [[LDS_ARG]], align 4
 ; CHECK-NEXT:    [[NEW_LDS_VAL:%.*]] = add i32 [[LDS_VAL]], 1
 ; CHECK-NEXT:    store i32 [[NEW_LDS_VAL]], ptr addrspace(3) [[LDS_ARG]], align 4
@@ -20,8 +20,9 @@ define void @my_function(ptr addrspace(3) %lds_arg) {
   ret void
 }
 
-define amdgpu_kernel void @my_kernel() {
-; CHECK-LABEL: define amdgpu_kernel void @my_kernel() {
+define amdgpu_kernel void @my_kernel() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @my_kernel(
+; CHECK-SAME: ) #[[ATTR0]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -31,13 +32,14 @@ define amdgpu_kernel void @my_kernel() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 0, i32 2), align 8
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP11]], [[TMP12]]
+; CHECK-NEXT:    [[TMP13:%.*]] = zext i32 [[TMP14]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP13]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       10:
+; CHECK:       11:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
index f5114a8554248..5148e04d541e6 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if LDS accesses are lowered correctly when a call is made to nested non-kernel.
 
 @A = external addrspace(3) global [8 x ptr]
 @B = external addrspace(3) global [0 x i32]
 
-define amdgpu_kernel void @kernel_0() {
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [4 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0 to i32), i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1 to i32), i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2 to i32), i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3 to i32)]
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [4 x [2 x i32]] [[2 x i32] [i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md to i32), i32 poison], [2 x i32] [i32 poison, i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md to i32)], [2 x i32] [i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md to i32), i32 poison], [2 x i32] [i32 poison, i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md to i32)]]
+define amdgpu_kernel void @kernel_0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_0(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -18,13 +20,14 @@ define amdgpu_kernel void @kernel_0() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 0, i32 2), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP12]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP11]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       10:
+; CHECK:       11:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    call void @call_store_A()
@@ -43,9 +46,9 @@ define amdgpu_kernel void @kernel_0() {
   ret void
 }
 
-define amdgpu_kernel void @kernel_1() {
+define amdgpu_kernel void @kernel_1() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_1(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -56,16 +59,16 @@ define amdgpu_kernel void @kernel_1() {
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
 ; CHECK:       Malloc:
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
-; CHECK-NEXT:    store i64 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 1), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
-; CHECK-NEXT:    [[TMP10:%.*]] = udiv i64 [[TMP12]], 4
-; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP10]], 4
-; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 2), align 8
-; CHECK-NEXT:    [[TMP9:%.*]] = add i64 0, [[TMP8]]
-; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP9]])
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(4) [[TMP7]], align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 1), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = add i32 [[TMP8]], 3
+; CHECK-NEXT:    [[TMP10:%.*]] = udiv i32 [[TMP9]], 4
+; CHECK-NEXT:    [[TMP11:%.*]] = mul i32 [[TMP10]], 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 0, [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 0)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
 ; CHECK-NEXT:    br label [[TMP14]]
 ; CHECK:       14:
@@ -87,9 +90,9 @@ define amdgpu_kernel void @kernel_1() {
   ret void
 }
 
-define amdgpu_kernel void @kernel_2() {
+define amdgpu_kernel void @kernel_2() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_2(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -99,13 +102,14 @@ define amdgpu_kernel void @kernel_2() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i64, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, align 8
-; CHECK-NEXT:    [[TMP10:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 0, i32 2), align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP12]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP11]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       10:
+; CHECK:       11:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    call void @store_A()
@@ -124,9 +128,9 @@ define amdgpu_kernel void @kernel_2() {
   ret void
 }
 
-define amdgpu_kernel void @kernel_3() {
+define amdgpu_kernel void @kernel_3() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_3(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -137,16 +141,16 @@ define amdgpu_kernel void @kernel_3() {
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
 ; CHECK:       Malloc:
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
-; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i32 15
-; CHECK-NEXT:    store i64 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, align 8
-; CHECK-NEXT:    [[TMP11:%.*]] = load i64, ptr addrspace(4) [[TMP7]], align 8
-; CHECK-NEXT:    store i64 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 1), align 8
-; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
-; CHECK-NEXT:    [[TMP10:%.*]] = udiv i64 [[TMP12]], 4
-; CHECK-NEXT:    [[TMP8:%.*]] = mul i64 [[TMP10]], 4
-; CHECK-NEXT:    store i64 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 2), align 8
-; CHECK-NEXT:    [[TMP9:%.*]] = add i64 0, [[TMP8]]
-; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP9]])
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
+; CHECK-NEXT:    store i32 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(4) [[TMP7]], align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 1), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = add i32 [[TMP8]], 3
+; CHECK-NEXT:    [[TMP10:%.*]] = udiv i32 [[TMP9]], 4
+; CHECK-NEXT:    [[TMP11:%.*]] = mul i32 [[TMP10]], 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 1), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = add i32 0, [[TMP11]]
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 0)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
 ; CHECK-NEXT:    br label [[TMP14]]
 ; CHECK:       14:
@@ -168,8 +172,9 @@ define amdgpu_kernel void @kernel_3() {
   ret void
 }
 
-define private void @call_store_A() {
-; CHECK-LABEL: define private void @call_store_A() {
+define private void @call_store_A() sanitize_address {
+; CHECK-LABEL: define private void @call_store_A(
+; CHECK-SAME: ) #[[ATTR0]] {
 ; CHECK-NEXT:    call void @store_A()
 ; CHECK-NEXT:    ret void
 ;
@@ -177,8 +182,9 @@ define private void @call_store_A() {
   ret void
 }
 
-define private void @store_A() {
-; CHECK-LABEL: define private void @store_A() {
+define private void @store_A() sanitize_address {
+; CHECK-LABEL: define private void @store_A(
+; CHECK-SAME: ) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
@@ -196,8 +202,9 @@ define private void @store_A() {
   ret void
 }
 
-define private ptr @get_B_ptr() {
-; CHECK-LABEL: define private ptr @get_B_ptr() {
+define private ptr @get_B_ptr() sanitize_address {
+; CHECK-LABEL: define private ptr @get_B_ptr(
+; CHECK-SAME: ) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
index ac082dda0f10a..2fe685bc9c958 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if static LDS is lowered correctly when a non-kernel with LDS accesses is called from kernel.
 @lds_1 = internal addrspace(3) global [1 x i8] poison, align 1
@@ -7,8 +7,11 @@
 @lds_3 = external addrspace(3) global [3 x i8], align 4
 @lds_4 = external addrspace(3) global [4 x i8], align 8
 
-define void @use_variables() {
-; CHECK-LABEL: define void @use_variables() {
+; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [1 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k0 to i32)]
+; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [1 x [2 x i32]] [[2 x i32] [i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0) to i32)]]
+define void @use_variables() sanitize_address {
+; CHECK-LABEL: define void @use_variables(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
@@ -36,9 +39,9 @@ define void @use_variables() {
   ret void
 }
 
-define amdgpu_kernel void @k0() {
+define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -48,13 +51,14 @@ define amdgpu_kernel void @k0() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 8
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = add i32 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP16]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP15]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       10:
+; CHECK:       11:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
index 928e83a1f30b5..be1ff10550d7a 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
-; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-- | FileCheck %s
+; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if static LDS accesses in kernel are lowered correctly.
 @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
@@ -8,11 +8,12 @@
 ;.
 ; CHECK: @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
 ; CHECK: @lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
-; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, align 8
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8
 ; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 4, i32 8 } }, no_sanitize_address
 ;.
-define amdgpu_kernel void @k0() {
-; CHECK-LABEL: define amdgpu_kernel void @k0() {
+define amdgpu_kernel void @k0() sanitize_address {
+; CHECK-LABEL: define amdgpu_kernel void @k0(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -22,21 +23,22 @@ define amdgpu_kernel void @k0() {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 8
-; CHECK-NEXT:    [[TMP14:%.*]] = load i64, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 8
-; CHECK-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = add i32 [[TMP13]], [[TMP14]]
+; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP16]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP15]])
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       10:
+; CHECK:       11:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP8]]
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 4
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP11]], align 2
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP17]], align 4
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP18]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
@@ -53,6 +55,7 @@ define amdgpu_kernel void @k0() {
   ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
 ;.

>From 2dc9064a86faeb0ec1e3142ab63b0ec203b2a82d Mon Sep 17 00:00:00 2001
From: skc7 <Krishna.Sankisa at amd.com>
Date: Tue, 14 May 2024 08:45:02 +0530
Subject: [PATCH 3/3] [AMDGPU] Add attribute and metadata for the new LDS
 variable. [AMDGPU] Fix base and offset table types [AMDGPU] Update dyn lds
 gep index [AMDGPU] Create dynlds per kernel accessesing dynamic LDS.

---
 llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp   | 269 ++++++++++++++----
 ...pu-sw-lower-lds-dynamic-indirect-access.ll |  69 ++---
 .../amdgpu-sw-lower-lds-dynamic-lds-test.ll   |  53 ++--
 ...ds-multi-static-dynamic-indirect-access.ll | 126 ++++----
 ...gpu-sw-lower-lds-multiple-blocks-return.ll |  39 +--
 ...ower-lds-static-dynamic-indirect-access.ll |  69 ++---
 ...pu-sw-lower-lds-static-dynamic-lds-test.ll |  64 +++--
 ...s-static-indirect-access-function-param.ll |  37 ++-
 ...lower-lds-static-indirect-access-nested.ll | 138 +++++----
 ...gpu-sw-lower-lds-static-indirect-access.ll |  54 ++--
 .../amdgpu-sw-lower-lds-static-lds-test.ll    |  39 +--
 11 files changed, 585 insertions(+), 372 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
index b24a0f3f00c00..76dfdcb7e0a63 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
@@ -7,19 +7,30 @@
 //===----------------------------------------------------------------------===//
 //
 // This pass lowers the local data store, LDS, uses in kernel and non-kernel
-// functions in module with dynamically allocated device global memory.
+// functions in module and packs them together as single allocation.
+// Packed LDS Layout is emulated in the dynamically allocated device global
+// memory.
 //
 // Replacement of Kernel LDS accesses:
 //    For a kernel, LDS access can be static or dynamic which are direct
 //    (accessed within kernel) and indirect (accessed through non-kernels).
-//    A device global memory equal to size of all these LDS globals will be
-//    allocated. At the prologue of the kernel, a single work-item from the
-//    work-group, does a "malloc" and stores the pointer of the allocation in
-//    new LDS global that will be created for the kernel. This will be called
-//    "SW LDS" in this pass.
-//    Each LDS access corresponds to an offset in the allocated memory.
-//    All static LDS accesses will be allocated first and then dynamic LDS
-//    will occupy the device global memory.
+//    All these LDS accesses corresponding to kernel will be packed together,
+//    where all static LDS accesses will be allocated first and then dynamic
+//    LDS follows. The total size with alignment is calculated. A new LDS global
+//    will be created for the kernel called "SW LDS" and it will have the
+//    attribute "amdgpu-lds-size" attached with value of the size calculated.
+//    All the LDS accesses in the module will be replaced by GEP with offset
+//    into the "Sw LDS".
+//    A new "llvm.amdgcn.<kernel>.dynlds" is created per kernel accessing
+//    the dynamic LDS. This will be marked used by kernel and will have
+//    MD_absolue_symbol metadata set to total static LDS size, Since dynamic
+//    LDS allocation starts after all static LDS allocation.
+//
+//    A device global memory equal to the total LDS size will be allocated.
+//    At the prologue of the kernel, a single work-item from the
+//    work-group, does a "malloc" and stores the pointer of the
+//    allocation in "SW LDS".
+//
 //    To store the offsets corresponding to all LDS accesses, another global
 //    variable is created which will be called "SW LDS metadata" in this pass.
 //    - SW LDS Global:
@@ -40,8 +51,6 @@
 //        allocation done at runtime with query to "hidden_dynamic_lds_size"
 //        hidden kernel argument.
 //
-//    LDS accesses within the kernel will be replaced by "gep" ptr to
-//    corresponding offset into allocated device global memory for the kernel.
 //    At the epilogue of kernel, allocated memory would be made free by the same
 //    single work-item.
 //
@@ -53,18 +62,17 @@
 //    - Base table:
 //        Base table will have single row, with elements of the row
 //        placed as per kernel ID. Each element in the row corresponds
-//        to addresss of "SW LDS" variable created for
-//        that kernel.
+//        to ptr of "SW LDS" variable created for that kernel.
 //    - Offset table:
 //        Offset table will have multiple rows and columns.
 //        Rows are assumed to be from 0 to (n-1). n is total number
 //        of kernels accessing the LDS through non-kernels.
 //        Each row will have m elements. m is the total number of
 //        unique LDS globals accessed by all non-kernels.
-//        Each element in the row correspond to the address of
+//        Each element in the row correspond to the ptr of
 //        the replacement of LDS global done by that particular kernel.
 //    A LDS variable in non-kernel will be replaced based on the information
-//    from base and offset tables. Based on kernel-id query, address of "SW
+//    from base and offset tables. Based on kernel-id query, ptr of "SW
 //    LDS" for that corresponding kernel is obtained from base table.
 //    The Offset into the base "SW LDS" is obtained from
 //    corresponding element in offset table. With this information, replacement
@@ -114,11 +122,13 @@ struct LDSAccessTypeInfo {
 // in to device global memory.
 struct KernelLDSParameters {
   GlobalVariable *SwLDS = nullptr;
+  GlobalVariable *SwDynLDS = nullptr;
   GlobalVariable *SwLDSMetadata = nullptr;
   LDSAccessTypeInfo DirectAccess;
   LDSAccessTypeInfo IndirectAccess;
   DenseMap<GlobalVariable *, SmallVector<uint32_t, 3>>
       LDSToReplacementIndicesMap;
+  uint32_t MallocSize = 0;
 };
 
 // Struct to store infor for creation of offset table
@@ -141,8 +151,10 @@ class AMDGPUSwLowerLDS {
   getOrderedIndirectLDSAccessingKernels(SetVector<Function *> &&Kernels);
   SetVector<GlobalVariable *>
   getOrderedNonKernelAllLDSGlobals(SetVector<GlobalVariable *> &&Variables);
-  void populateSwLDSGlobal(Function *Func);
+  void buildSwLDSGlobal(Function *Func);
+  void buildSwDynLDSGlobal(Function *Func);
   void populateSwMetadataGlobal(Function *Func);
+  void populateSwLDSAttributeAndMetadata(Function *Func);
   void populateLDSToReplacementIndicesMap(Function *Func);
   void replaceKernelLDSAccesses(Function *Func);
   void lowerKernelLDSAccesses(Function *Func, DomTreeUpdater &DTU);
@@ -155,7 +167,7 @@ class AMDGPUSwLowerLDS {
                                  SetVector<GlobalVariable *> &LDSGlobals,
                                  NonKernelLDSParameters &NKLDSParams);
   void
-  updateMallocSizeForDynamicLDS(Function *Func, Value *CurrMallocSize,
+  updateMallocSizeForDynamicLDS(Function *Func, Value **CurrMallocSize,
                                 Value *HiddenDynLDSSize,
                                 SetVector<GlobalVariable *> &DynamicLDSGlobals);
 
@@ -188,7 +200,6 @@ SetVector<Function *> AMDGPUSwLowerLDS::getOrderedIndirectLDSAccessingKernels(
   // Also assign a kernel ID metadata based on the sorted order.
   LLVMContext &Ctx = M.getContext();
   if (Kernels.size() > UINT32_MAX) {
-    // 32 bit keeps it in one SGPR. > 2**32 kernels won't fit on the GPU
     report_fatal_error("Unimplemented SW LDS lowering for > 2**32 kernels");
   }
   SetVector<Function *> OrderedKernels =
@@ -227,7 +238,45 @@ void AMDGPUSwLowerLDS::getUsesOfLDSByNonKernels(
   }
 }
 
-void AMDGPUSwLowerLDS::populateSwLDSGlobal(Function *Func) {
+static void recordLDSAbsoluteAddress(Module &M, GlobalVariable *GV,
+                                     uint32_t Address) {
+  // Write the specified address into metadata where it can be retrieved by
+  // the assembler. Format is a half open range, [Address Address+1)
+  LLVMContext &Ctx = M.getContext();
+  auto *IntTy = M.getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS);
+  auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address));
+  auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address + 1));
+  GV->setMetadata(LLVMContext::MD_absolute_symbol,
+                  MDNode::get(Ctx, {MinC, MaxC}));
+}
+
+static void addLDSSizeAttribute(Function *Func, uint32_t Offset,
+                                bool IsDynLDS) {
+  if (Offset != 0) {
+    std::string Buffer;
+    raw_string_ostream SS{Buffer};
+    SS << format("%u", Offset);
+    if (IsDynLDS)
+      SS << format(",%u", Offset);
+    Func->addFnAttr("amdgpu-lds-size", Buffer);
+  }
+}
+
+static void markUsedByKernel(Function *Func, GlobalVariable *SGV) {
+  BasicBlock *Entry = &Func->getEntryBlock();
+  IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
+
+  Function *Decl =
+      Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
+
+  Value *UseInstance[1] = {
+      Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)};
+
+  Builder.CreateCall(Decl, {},
+                     {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)});
+}
+
+void AMDGPUSwLowerLDS::buildSwLDSGlobal(Function *Func) {
   // Create new LDS global required for each kernel to store
   // device global memory pointer.
   auto &LDSParams = KernelToLDSParametersMap[Func];
@@ -242,6 +291,35 @@ void AMDGPUSwLowerLDS::populateSwLDSGlobal(Function *Func) {
   return;
 }
 
+void AMDGPUSwLowerLDS::buildSwDynLDSGlobal(Function *Func) {
+  // Create new Dyn LDS global if kernel accesses dyn LDS.
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  if (LDSParams.DirectAccess.DynamicLDSGlobals.empty() &&
+      LDSParams.IndirectAccess.DynamicLDSGlobals.empty())
+    return;
+  // create new global pointer variable
+  auto emptyCharArray = ArrayType::get(IRB.getInt8Ty(), 0);
+  LDSParams.SwDynLDS = new GlobalVariable(
+      M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr,
+      "llvm.amdgcn." + Func->getName() + ".dynlds", nullptr,
+      GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS, false);
+  markUsedByKernel(Func, LDSParams.SwDynLDS);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  LDSParams.SwDynLDS->setSanitizerMetadata(MD);
+  return;
+}
+
+void AMDGPUSwLowerLDS::populateSwLDSAttributeAndMetadata(Function *Func) {
+  auto &LDSParams = KernelToLDSParametersMap[Func];
+  bool IsDynLDSUsed = LDSParams.SwDynLDS ? true : false;
+  uint32_t Offset = LDSParams.MallocSize;
+  recordLDSAbsoluteAddress(M, LDSParams.SwLDS, 0);
+  addLDSSizeAttribute(Func, Offset, IsDynLDSUsed);
+  if (LDSParams.SwDynLDS)
+    recordLDSAbsoluteAddress(M, LDSParams.SwDynLDS, Offset);
+}
+
 void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
   // Create new metadata global for every kernel and initialize the
   // start offsets and sizes corresponding to each LDS accesses.
@@ -276,10 +354,16 @@ void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
 
   StructType *LDSItemTy =
       StructType::create(Ctx, {Int32Ty, Int32Ty, Int32Ty}, MDItemOS.str());
-  uint32_t MallocSize = 0;
+  uint32_t &MallocSize = LDSParams.MallocSize;
+  SetVector<GlobalVariable *> UniqueLDSGlobals;
   auto buildInitializerForSwLDSMD =
       [&](SetVector<GlobalVariable *> &LDSGlobals) {
         for (auto &GV : LDSGlobals) {
+          if (std::find(UniqueLDSGlobals.begin(), UniqueLDSGlobals.end(), GV) !=
+              UniqueLDSGlobals.end())
+            continue;
+          else
+            UniqueLDSGlobals.insert(GV);
           Type *Ty = GV->getValueType();
           const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
           Items.push_back(LDSItemTy);
@@ -295,7 +379,9 @@ void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
           Initializers.push_back(InitItem);
         }
       };
-
+  SetVector<GlobalVariable *> SwLDSVector;
+  SwLDSVector.insert(LDSParams.SwLDS);
+  buildInitializerForSwLDSMD(SwLDSVector);
   buildInitializerForSwLDSMD(LDSParams.DirectAccess.StaticLDSGlobals);
   buildInitializerForSwLDSMD(LDSParams.IndirectAccess.StaticLDSGlobals);
   buildInitializerForSwLDSMD(LDSParams.DirectAccess.DynamicLDSGlobals);
@@ -319,6 +405,8 @@ void AMDGPUSwLowerLDS::populateSwMetadataGlobal(Function *Func) {
   assert(LDSParams.SwLDS);
   // Set the alignment to MaxAlignment for SwLDS.
   LDSParams.SwLDS->setAlignment(MaxAlignment);
+  if (LDSParams.SwDynLDS)
+    LDSParams.SwDynLDS->setAlignment(MaxAlignment);
   GlobalValue::SanitizerMetadata MD;
   MD.NoAddress = true;
   LDSParams.SwLDSMetadata->setSanitizerMetadata(MD);
@@ -329,14 +417,23 @@ void AMDGPUSwLowerLDS::populateLDSToReplacementIndicesMap(Function *Func) {
   // Fill the corresponding LDS replacement indices for each LDS access
   // related to this kernel.
   auto &LDSParams = KernelToLDSParametersMap[Func];
+  SetVector<GlobalVariable *> UniqueLDSGlobals;
   auto PopulateIndices = [&](SetVector<GlobalVariable *> &LDSGlobals,
                              uint32_t &Idx) {
     for (auto &GV : LDSGlobals) {
+      if (std::find(UniqueLDSGlobals.begin(), UniqueLDSGlobals.end(), GV) !=
+          UniqueLDSGlobals.end())
+        continue;
+      else
+        UniqueLDSGlobals.insert(GV);
       LDSParams.LDSToReplacementIndicesMap[GV] = {0, Idx, 0};
       ++Idx;
     }
   };
   uint32_t Idx = 0;
+  SetVector<GlobalVariable *> SwLDSVector;
+  SwLDSVector.insert(LDSParams.SwLDS);
+  PopulateIndices(SwLDSVector, Idx);
   PopulateIndices(LDSParams.DirectAccess.StaticLDSGlobals, Idx);
   PopulateIndices(LDSParams.IndirectAccess.StaticLDSGlobals, Idx);
   PopulateIndices(LDSParams.DirectAccess.DynamicLDSGlobals, Idx);
@@ -372,6 +469,7 @@ void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
   auto &IndirectAccess = LDSParams.IndirectAccess;
   auto &DirectAccess = LDSParams.DirectAccess;
   // Replace all uses of LDS global in this Function with a Replacement.
+  SetVector<GlobalVariable *> UniqueLDSGlobals;
   auto ReplaceLDSGlobalUses = [&](SetVector<GlobalVariable *> &LDSGlobals) {
     for (auto &GV : LDSGlobals) {
       // Do not generate instructions if LDS access is in non-kernel
@@ -381,6 +479,11 @@ void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
           (!DirectAccess.StaticLDSGlobals.contains(GV) &&
            !DirectAccess.DynamicLDSGlobals.contains(GV)))
         continue;
+      if (std::find(UniqueLDSGlobals.begin(), UniqueLDSGlobals.end(), GV) !=
+          UniqueLDSGlobals.end())
+        continue;
+      else
+        UniqueLDSGlobals.insert(GV);
       auto &Indices = LDSParams.LDSToReplacementIndicesMap[GV];
       assert(Indices.size() == 3);
       uint32_t Idx0 = Indices[0];
@@ -391,9 +494,9 @@ void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
                             ConstantInt::get(Int32Ty, Idx2)};
       Constant *GEP = ConstantExpr::getGetElementPtr(
           SwLDSMetadataStructType, SwLDSMetadata, GEPIdx, true);
-      Value *Load = IRB.CreateLoad(Int32Ty, GEP);
+      Value *Offset = IRB.CreateLoad(Int32Ty, GEP);
       Value *BasePlusOffset =
-          IRB.CreateInBoundsGEP(IRB.getInt8Ty(), SwLDS, {Load});
+          IRB.CreateInBoundsGEP(IRB.getInt8Ty(), SwLDS, {Offset});
       LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replacing LDS " << GV->getName());
       replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
     }
@@ -405,7 +508,7 @@ void AMDGPUSwLowerLDS::replaceKernelLDSAccesses(Function *Func) {
 }
 
 void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
-    Function *Func, Value *CurrMallocSize, Value *HiddenDynLDSSize,
+    Function *Func, Value **CurrMallocSize, Value *HiddenDynLDSSize,
     SetVector<GlobalVariable *> &DynamicLDSGlobals) {
   auto &LDSParams = KernelToLDSParametersMap[Func];
   Type *Int32Ty = IRB.getInt32Ty();
@@ -429,7 +532,7 @@ void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
     auto *GEPForOffset = IRB.CreateInBoundsGEP(
         MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2Offset});
 
-    IRB.CreateStore(CurrMallocSize, GEPForOffset);
+    IRB.CreateStore(*CurrMallocSize, GEPForOffset);
     // Update the size and Aligned Size metadata.
     Constant *Index2Size = ConstantInt::get(Int32Ty, 1);
     auto *GEPForSize = IRB.CreateInBoundsGEP(MetadataStructType, SwLDSMetadata,
@@ -437,7 +540,7 @@ void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
 
     Value *CurrDynLDSSize = IRB.CreateLoad(Int32Ty, HiddenDynLDSSize);
     IRB.CreateStore(CurrDynLDSSize, GEPForSize);
-    Constant *Index2AlignedSize = ConstantInt::get(Int32Ty, 1);
+    Constant *Index2AlignedSize = ConstantInt::get(Int32Ty, 2);
     auto *GEPForAlignedSize = IRB.CreateInBoundsGEP(
         MetadataStructType, SwLDSMetadata, {Index0, Index1, Index2AlignedSize});
 
@@ -448,7 +551,7 @@ void AMDGPUSwLowerLDS::updateMallocSizeForDynamicLDS(
     IRB.CreateStore(AlignedDynLDSSize, GEPForAlignedSize);
 
     // Update the Current Malloc Size
-    CurrMallocSize = IRB.CreateAdd(CurrMallocSize, AlignedDynLDSSize);
+    *CurrMallocSize = IRB.CreateAdd(*CurrMallocSize, AlignedDynLDSSize);
   }
 }
 
@@ -496,10 +599,21 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   Type *Int32Ty = IRB.getInt32Ty();
   Type *Int64Ty = IRB.getInt64Ty();
 
-  unsigned NumStaticLDS = LDSParams.DirectAccess.StaticLDSGlobals.size() +
-                          LDSParams.IndirectAccess.StaticLDSGlobals.size();
-  unsigned NumDynLDS = LDSParams.DirectAccess.DynamicLDSGlobals.size() +
-                       LDSParams.IndirectAccess.DynamicLDSGlobals.size();
+  SetVector<GlobalVariable *> UniqueLDSGlobals;
+  auto GetUniqueLDSGlobals = [&](SetVector<GlobalVariable *> &LDSGlobals) {
+    for (auto &GV : LDSGlobals) {
+      if (std::find(UniqueLDSGlobals.begin(), UniqueLDSGlobals.end(), GV) !=
+          UniqueLDSGlobals.end())
+        continue;
+      else
+        UniqueLDSGlobals.insert(GV);
+    }
+  };
+
+  GetUniqueLDSGlobals(LDSParams.DirectAccess.StaticLDSGlobals);
+  GetUniqueLDSGlobals(LDSParams.IndirectAccess.StaticLDSGlobals);
+  unsigned NumStaticLDS = 1 + UniqueLDSGlobals.size();
+  UniqueLDSGlobals.clear();
 
   if (NumStaticLDS) {
     auto *GEPForEndStaticLDSOffset =
@@ -521,30 +635,37 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   } else
     CurrMallocSize = IRB.getInt32(MallocSize);
 
-  if (NumDynLDS) {
+  if (LDSParams.SwDynLDS) {
     // Get size from hidden dyn_lds_size argument of kernel
     Value *ImplicitArg =
         IRB.CreateIntrinsic(Intrinsic::amdgcn_implicitarg_ptr, {}, {});
     Value *HiddenDynLDSSize = IRB.CreateInBoundsGEP(
         ImplicitArg->getType(), ImplicitArg,
         {ConstantInt::get(Int64Ty, COV5_HIDDEN_DYN_LDS_SIZE_ARG)});
-    updateMallocSizeForDynamicLDS(Func, CurrMallocSize, HiddenDynLDSSize,
-                                  LDSParams.DirectAccess.DynamicLDSGlobals);
-    updateMallocSizeForDynamicLDS(Func, CurrMallocSize, HiddenDynLDSSize,
-                                  LDSParams.IndirectAccess.DynamicLDSGlobals);
+    UniqueLDSGlobals.clear();
+    GetUniqueLDSGlobals(LDSParams.DirectAccess.DynamicLDSGlobals);
+    GetUniqueLDSGlobals(LDSParams.IndirectAccess.DynamicLDSGlobals);
+    updateMallocSizeForDynamicLDS(Func, &CurrMallocSize, HiddenDynLDSSize,
+                                  UniqueLDSGlobals);
   }
 
   CurrMallocSize = IRB.CreateZExt(CurrMallocSize, Int64Ty);
 
   // Create a call to malloc function which does device global memory allocation
   // with size equals to all LDS global accesses size  in this kernel.
-  FunctionCallee AMDGPUMallocFunc = M.getOrInsertFunction(
-      StringRef("malloc"),
-      FunctionType::get(IRB.getPtrTy(1), {IRB.getInt64Ty()}, false));
-  Value *MCI = IRB.CreateCall(AMDGPUMallocFunc, {CurrMallocSize});
+  Value *ReturnAddress = IRB.CreateCall(
+      Intrinsic::getDeclaration(&M, Intrinsic::returnaddress), IRB.getInt32(0));
+  FunctionCallee AsanMallocFunc = M.getOrInsertFunction(
+      StringRef("__asan_malloc_impl"),
+      FunctionType::get(Int64Ty, {Int64Ty, Int64Ty}, false));
+  Value *RAPtrToInt = IRB.CreatePtrToInt(ReturnAddress, Int64Ty);
+  Value *AsanMalloc =
+      IRB.CreateCall(AsanMallocFunc, {CurrMallocSize, RAPtrToInt});
+  Value *MallocPtr =
+      IRB.CreateIntToPtr(AsanMalloc, IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS));
 
   // create store of malloc to new global
-  IRB.CreateStore(MCI, SwLDS);
+  IRB.CreateStore(MallocPtr, SwLDS);
 
   // Create branch to PrevEntryBlock
   IRB.CreateBr(PrevEntryBlock);
@@ -582,12 +703,18 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
   IRB.SetInsertPoint(FreeBlock, FreeBlock->begin());
 
   // Free the previously allocate device global memory.
-  FunctionCallee AMDGPUFreeReturn = M.getOrInsertFunction(
-      StringRef("free"),
-      FunctionType::get(IRB.getVoidTy(), {IRB.getPtrTy()}, false));
+  Value *LoadMallocPtr =
+      IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), SwLDS);
+
+  FunctionCallee AsanFreeFunc = M.getOrInsertFunction(
+      StringRef("__asan_free_impl"),
+      FunctionType::get(IRB.getVoidTy(), {Int64Ty, Int64Ty}, false));
+  Value *ReturnAddr = IRB.CreateCall(
+      Intrinsic::getDeclaration(&M, Intrinsic::returnaddress), IRB.getInt32(0));
+  Value *RAPToInt = IRB.CreatePtrToInt(ReturnAddr, Int64Ty);
+  Value *MallocPtrToInt = IRB.CreatePtrToInt(LoadMallocPtr, Int64Ty);
+  IRB.CreateCall(AsanFreeFunc, {MallocPtrToInt, RAPToInt});
 
-  Value *MallocPtr = IRB.CreateLoad(IRB.getPtrTy(), SwLDS);
-  IRB.CreateCall(AMDGPUFreeReturn, {MallocPtr});
   IRB.CreateBr(EndBlock);
 
   // End Block
@@ -609,7 +736,8 @@ Constant *AMDGPUSwLowerLDS::getAddressesOfVariablesInKernel(
   assert(SwLDSMetadata);
   auto *SwLDSMetadataStructType =
       cast<StructType>(SwLDSMetadata->getValueType());
-  ArrayType *KernelOffsetsType = ArrayType::get(Int32Ty, Variables.size());
+  ArrayType *KernelOffsetsType =
+      ArrayType::get(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), Variables.size());
 
   SmallVector<Constant *> Elements;
   for (size_t i = 0; i < Variables.size(); i++) {
@@ -627,8 +755,7 @@ Constant *AMDGPUSwLowerLDS::getAddressesOfVariablesInKernel(
                           ConstantInt::get(Int32Ty, Idx2)};
     Constant *GEP = ConstantExpr::getGetElementPtr(SwLDSMetadataStructType,
                                                    SwLDSMetadata, GEPIdx, true);
-    auto elt = ConstantExpr::getPtrToInt(GEP, Int32Ty);
-    Elements.push_back(elt);
+    Elements.push_back(GEP);
   }
   return ConstantArray::get(KernelOffsetsType, Elements);
 }
@@ -641,7 +768,8 @@ void AMDGPUSwLowerLDS::buildNonKernelLDSBaseTable(
   auto &Kernels = NKLDSParams.OrderedKernels;
   Type *Int32Ty = IRB.getInt32Ty();
   const size_t NumberKernels = Kernels.size();
-  ArrayType *AllKernelsOffsetsType = ArrayType::get(Int32Ty, NumberKernels);
+  ArrayType *AllKernelsOffsetsType =
+      ArrayType::get(IRB.getPtrTy(AMDGPUAS::LOCAL_ADDRESS), NumberKernels);
   std::vector<Constant *> OverallConstantExprElts(NumberKernels);
   for (size_t i = 0; i < NumberKernels; i++) {
     Function *Func = Kernels[i];
@@ -651,15 +779,17 @@ void AMDGPUSwLowerLDS::buildNonKernelLDSBaseTable(
     Constant *GEPIdx[] = {ConstantInt::get(Int32Ty, 0)};
     Constant *GEP =
         ConstantExpr::getGetElementPtr(SwLDS->getType(), SwLDS, GEPIdx, true);
-    auto Elt = ConstantExpr::getPtrToInt(GEP, Int32Ty);
-    OverallConstantExprElts[i] = Elt;
+    OverallConstantExprElts[i] = GEP;
   }
   Constant *init =
       ConstantArray::get(AllKernelsOffsetsType, OverallConstantExprElts);
   NKLDSParams.LDSBaseTable = new GlobalVariable(
       M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init,
       "llvm.amdgcn.sw.lds.base.table", nullptr, GlobalValue::NotThreadLocal,
-      AMDGPUAS::CONSTANT_ADDRESS);
+      AMDGPUAS::GLOBAL_ADDRESS);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  NKLDSParams.LDSBaseTable->setSanitizerMetadata(MD);
 }
 
 void AMDGPUSwLowerLDS::buildNonKernelLDSOffsetTable(
@@ -679,7 +809,7 @@ void AMDGPUSwLowerLDS::buildNonKernelLDSOffsetTable(
   const size_t NumberKernels = Kernels.size();
 
   ArrayType *KernelOffsetsType =
-      ArrayType::get(IRB.getInt32Ty(), NumberVariables);
+      ArrayType::get(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), NumberVariables);
 
   ArrayType *AllKernelsOffsetsType =
       ArrayType::get(KernelOffsetsType, NumberKernels);
@@ -694,7 +824,10 @@ void AMDGPUSwLowerLDS::buildNonKernelLDSOffsetTable(
   NKLDSParams.LDSOffsetTable = new GlobalVariable(
       M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, Init,
       "llvm.amdgcn.sw.lds.offset.table", nullptr, GlobalValue::NotThreadLocal,
-      AMDGPUAS::CONSTANT_ADDRESS);
+      AMDGPUAS::GLOBAL_ADDRESS);
+  GlobalValue::SanitizerMetadata MD;
+  MD.NoAddress = true;
+  NKLDSParams.LDSOffsetTable->setSanitizerMetadata(MD);
 }
 
 void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
@@ -715,21 +848,22 @@ void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
   assert(LDSBaseTable && LDSOffsetTable);
   Value *BaseGEP = IRB.CreateInBoundsGEP(
       LDSBaseTable->getValueType(), LDSBaseTable, {IRB.getInt32(0), KernelId});
-  Value *BaseLoad = IRB.CreateLoad(IRB.getInt32Ty(), BaseGEP);
+  Value *BaseLoad =
+      IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::LOCAL_ADDRESS), BaseGEP);
 
   for (GlobalVariable *GV : LDSGlobals) {
-    Value *BasePtr = IRB.CreateIntToPtr(BaseLoad, GV->getType());
     auto GVIt = std::find(OrdereLDSGlobals.begin(), OrdereLDSGlobals.end(), GV);
     assert(GVIt != OrdereLDSGlobals.end());
     uint32_t GVOffset = std::distance(OrdereLDSGlobals.begin(), GVIt);
+
     Value *OffsetGEP = IRB.CreateInBoundsGEP(
         LDSOffsetTable->getValueType(), LDSOffsetTable,
         {IRB.getInt32(0), KernelId, IRB.getInt32(GVOffset)});
-    Value *OffsetLoad = IRB.CreateLoad(IRB.getInt32Ty(), OffsetGEP);
-    OffsetLoad = IRB.CreateIntToPtr(OffsetLoad, GV->getType());
-    OffsetLoad = IRB.CreateLoad(IRB.getInt32Ty(), OffsetLoad);
+    Value *OffsetLoad =
+        IRB.CreateLoad(IRB.getPtrTy(AMDGPUAS::GLOBAL_ADDRESS), OffsetGEP);
+    Value *Offset = IRB.CreateLoad(IRB.getInt32Ty(), OffsetLoad);
     Value *BasePlusOffset =
-        IRB.CreateInBoundsGEP(IRB.getInt8Ty(), BasePtr, {OffsetLoad});
+        IRB.CreateInBoundsGEP(IRB.getInt8Ty(), BaseLoad, {Offset});
     LLVM_DEBUG(dbgs() << "Sw LDS Lowering, Replace non-kernel LDS for "
                       << GV->getName().str());
     replacesUsesOfGlobalInFunction(Func, GV, BasePlusOffset);
@@ -820,8 +954,10 @@ bool AMDGPUSwLowerLDS::run() {
       removeFnAttrFromReachable(CG, Func, "amdgpu-no-workitem-id-y");
       removeFnAttrFromReachable(CG, Func, "amdgpu-no-workitem-id-z");
       reorderStaticDynamicIndirectLDSSet(LDSParams);
-      populateSwLDSGlobal(Func);
+      buildSwLDSGlobal(Func);
+      buildSwDynLDSGlobal(Func);
       populateSwMetadataGlobal(Func);
+      populateSwLDSAttributeAndMetadata(Func);
       populateLDSToReplacementIndicesMap(Func);
       DomTreeUpdater DTU(DTCallback(*Func),
                          DomTreeUpdater::UpdateStrategy::Lazy);
@@ -850,6 +986,15 @@ bool AMDGPUSwLowerLDS::run() {
       lowerNonKernelLDSAccesses(Func, OrderedLDSGlobals, NKLDSParams);
     }
   }
+
+  for (auto &GV : make_early_inc_range(M.globals())) {
+    if (AMDGPU::isLDSVariableToLower(GV)) {
+      // probably want to remove from used lists
+      GV.removeDeadConstantUsers();
+      if (GV.use_empty())
+        GV.eraseFromParent();
+    }
+  }
   return Changed;
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
index 587a1b78a0aaf..7fb2366f9843c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-indirect-access.ll
@@ -7,26 +7,20 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
-; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [1 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k0 to i32)]
-; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [1 x [2 x i32]] [[2 x i32] [i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0) to i32)]]
 define void @use_variables() sanitize_address {
 ; CHECK-LABEL: define void @use_variables(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
-; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
 ; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
 ; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
 ; CHECK-NEXT:    ret void
@@ -38,7 +32,7 @@ define void @use_variables() sanitize_address {
 
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -48,48 +42,55 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
 ; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
-; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
 ; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
 ; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
-; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
-; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
 ; CHECK-NEXT:    [[TMP27:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 1), align 4
 ; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP27]], 7
 ; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
 ; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
-; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 2), align 4
 ; CHECK-NEXT:    [[TMP28:%.*]] = add i32 [[TMP15]], [[TMP19]]
-; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP8]] to i64
-; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP28]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP23]])
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP35]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       23:
+; CHECK:       26:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
 ; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
+; CHECK-NEXT:    [[TMP29:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP29]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
 ; CHECK-NEXT:    call void @use_variables()
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 1
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP25]], align 2
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP30]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP26:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP26]])
+; CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP32:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64
+; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP34]], i64 [[TMP33]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -100,5 +101,5 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
-; CHECK: [[META0]] = !{i32 0}
+; CHECK: [[META2]] = !{i32 0}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
index 8de146b396c30..be8044d8dae07 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-dynamic-lds-test.ll
@@ -6,10 +6,9 @@
 @lds_2 = external addrspace(3) global [0 x i8]
 
 ;.
-; CHECK: @lds_1 = external addrspace(3) global [0 x i8]
-; CHECK: @lds_2 = external addrspace(3) global [0 x i8]
-; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 1
-; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type zeroinitializer, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 1, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.k0.dynlds = external addrspace(3) global [0 x i8], no_sanitize_address, align 1, !absolute_symbol [[META1:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 0, i32 0 } }, no_sanitize_address
 ;.
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
@@ -23,53 +22,57 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = add i32 [[TMP8]], [[TMP9]]
 ; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP20]], i64 15
-; CHECK-NEXT:    store i32 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP19:%.*]] = load i32, ptr addrspace(4) [[TMP18]], align 4
-; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 1), align 4
-; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP19]], 0
-; CHECK-NEXT:    [[TMP22:%.*]] = udiv i32 [[TMP21]], 1
-; CHECK-NEXT:    [[TMP23:%.*]] = mul i32 [[TMP22]], 1
-; CHECK-NEXT:    store i32 [[TMP23]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 1), align 4
-; CHECK-NEXT:    [[TMP24:%.*]] = add i32 0, [[TMP23]]
 ; CHECK-NEXT:    store i32 [[TMP24]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(4) [[TMP18]], align 4
 ; CHECK-NEXT:    store i32 [[TMP13]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
 ; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP13]], 0
 ; CHECK-NEXT:    [[TMP15:%.*]] = udiv i32 [[TMP14]], 1
 ; CHECK-NEXT:    [[TMP16:%.*]] = mul i32 [[TMP15]], 1
-; CHECK-NEXT:    store i32 [[TMP16]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP16]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
 ; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP24]], [[TMP16]]
-; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 0)
+; CHECK-NEXT:    [[TMP21:%.*]] = zext i32 [[TMP17]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP21]], i64 [[TMP23]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       19:
+; CHECK:       21:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 4
-; CHECK-NEXT:    store i8 8, ptr addrspace(3) [[TMP11]], align 8
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP11]], align 4
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP12:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP12]])
+; CHECK-NEXT:    [[TMP28:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP25:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = ptrtoint ptr addrspace(1) [[TMP28]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP27]], i64 [[TMP26]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
 ;
   store i8 7, ptr addrspace(3) @lds_1, align 4
-  store i8 8, ptr addrspace(3) @lds_2, align 8
+  ;store i8 8, ptr addrspace(3) @lds_2, align 8
   ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0]] = { sanitize_address }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="8,8" }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+; CHECK: [[META1]] = !{i32 8, i32 9}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
index 99f911ef48f93..3deba8ce138a8 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multi-static-dynamic-indirect-access.ll
@@ -7,26 +7,20 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
-; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [2 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k0 to i32), i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k1 to i32)]
-; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [2 x [4 x i32]] [[4 x i32] [i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md to i32), i32 poison, i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0) to i32)], [4 x i32] [i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k1.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 0) to i32)]]
 define void @use_variables_1() sanitize_address {
 ; CHECK-LABEL: define void @use_variables_1(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 2
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 2
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
-; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 3
-; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 3
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
 ; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
 ; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
 ; CHECK-NEXT:    ret void
@@ -40,20 +34,16 @@ define void @use_variables_2() sanitize_address {
 ; CHECK-LABEL: define void @use_variables_2(
 ; CHECK-SAME: ) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [2 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [2 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
-; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [2 x [4 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
 ; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 1
 ; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP15]], align 2
 ; CHECK-NEXT:    ret void
@@ -65,7 +55,7 @@ define void @use_variables_2() sanitize_address {
 
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -75,36 +65,40 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
 ; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
-; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
 ; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
 ; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
-; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
-; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
 ; CHECK-NEXT:    [[TMP25:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP25]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP25]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
 ; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP25]], 7
 ; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
 ; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
-; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
 ; CHECK-NEXT:    [[TMP26:%.*]] = add i32 [[TMP15]], [[TMP19]]
-; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP8]] to i64
-; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
+; CHECK-NEXT:    [[TMP27:%.*]] = zext i32 [[TMP26]] to i64
+; CHECK-NEXT:    [[TMP28:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP28]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP27]], i64 [[TMP33]])
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP24]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       23:
+; CHECK:       26:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
+; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
 ; CHECK-NEXT:    call void @use_variables_1()
 ; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 1
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
@@ -112,8 +106,11 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP24:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP24]])
+; CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP30:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP31:%.*]] = ptrtoint ptr [[TMP30]] to i64
+; CHECK-NEXT:    [[TMP32:%.*]] = ptrtoint ptr addrspace(1) [[TMP29]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP32]], i64 [[TMP31]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -125,7 +122,7 @@ define amdgpu_kernel void @k0() sanitize_address {
 
 define amdgpu_kernel void @k1() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k1(
-; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META4:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -135,26 +132,18 @@ define amdgpu_kernel void @k1() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 0), align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 2, i32 2), align 4
 ; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
 ; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
-; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
-; CHECK-NEXT:    [[TMP26:%.*]] = add i32 [[TMP11]], 7
-; CHECK-NEXT:    [[TMP27:%.*]] = udiv i32 [[TMP26]], 8
-; CHECK-NEXT:    [[TMP28:%.*]] = mul i32 [[TMP27]], 8
-; CHECK-NEXT:    store i32 [[TMP28]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
-; CHECK-NEXT:    [[TMP29:%.*]] = add i32 [[TMP8]], [[TMP28]]
-; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
 ; CHECK-NEXT:    [[TMP30:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
 ; CHECK-NEXT:    store i32 [[TMP30]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
 ; CHECK-NEXT:    [[TMP33:%.*]] = add i32 [[TMP30]], 7
 ; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP33]], 8
 ; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
-; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 2), align 4
 ; CHECK-NEXT:    [[TMP20:%.*]] = add i32 [[TMP8]], [[TMP19]]
 ; CHECK-NEXT:    store i32 [[TMP20]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 0), align 4
 ; CHECK-NEXT:    [[TMP21:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
@@ -162,29 +151,34 @@ define amdgpu_kernel void @k1() sanitize_address {
 ; CHECK-NEXT:    [[TMP22:%.*]] = add i32 [[TMP21]], 7
 ; CHECK-NEXT:    [[TMP23:%.*]] = udiv i32 [[TMP22]], 8
 ; CHECK-NEXT:    [[TMP24:%.*]] = mul i32 [[TMP23]], 8
-; CHECK-NEXT:    store i32 [[TMP24]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP24]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 4, i32 2), align 4
 ; CHECK-NEXT:    [[TMP25:%.*]] = add i32 [[TMP20]], [[TMP24]]
-; CHECK-NEXT:    [[TMP12:%.*]] = zext i32 [[TMP8]] to i64
-; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP12]])
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP25]] to i64
+; CHECK-NEXT:    [[TMP27:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP28:%.*]] = ptrtoint ptr [[TMP27]] to i64
+; CHECK-NEXT:    [[TMP34:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP28]])
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP34]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
 ; CHECK-NEXT:    br label [[TMP14]]
-; CHECK:       28:
+; CHECK:       26:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
-; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, i32 [[TMP15]]
 ; CHECK-NEXT:    [[TMP31:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k1.md, i32 0, i32 3, i32 0), align 4
 ; CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, i32 [[TMP31]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k1.dynlds) ]
 ; CHECK-NEXT:    call void @use_variables_1()
 ; CHECK-NEXT:    call void @use_variables_2()
-; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP16]], align 4
+; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP32]], align 4
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP17:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP17]])
+; CHECK-NEXT:    [[TMP29:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k1, align 8
+; CHECK-NEXT:    [[TMP35:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP36:%.*]] = ptrtoint ptr [[TMP35]] to i64
+; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr addrspace(1) [[TMP29]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP37]], i64 [[TMP36]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -195,6 +189,6 @@ define amdgpu_kernel void @k1() sanitize_address {
   ret void
 }
 ;.
-; CHECK: [[META0]] = !{i32 0}
-; CHECK: [[META1]] = !{i32 1}
+; CHECK: [[META3]] = !{i32 0}
+; CHECK: [[META4]] = !{i32 1}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
index 7e6125503d649..4384287797f35 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-multiple-blocks-return.ll
@@ -8,10 +8,8 @@
 @lds_2 = internal addrspace(3) global i32 poison
 
 ;.
-; CHECK: @lds_1 = internal addrspace(3) global i32 poison
-; CHECK: @lds_2 = internal addrspace(3) global i32 poison
-; CHECK: @llvm.amdgcn.sw.lds.test_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4
-; CHECK: @llvm.amdgcn.sw.lds.test_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.test_kernel.md.type { %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 0, i32 4, i32 4 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 4, i32 4, i32 4 } }, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.test_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.test_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.test_kernel.md.type { %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 8, i32 4, i32 4 }, %llvm.amdgcn.sw.lds.test_kernel.md.item { i32 12, i32 4, i32 4 } }, no_sanitize_address
 ;.
 define amdgpu_kernel void @test_kernel() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @test_kernel(
@@ -25,23 +23,26 @@ define amdgpu_kernel void @test_kernel() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 0), align 4
-; CHECK-NEXT:    [[TMP16:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP16:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 2, i32 2), align 4
 ; CHECK-NEXT:    [[TMP18:%.*]] = add i32 [[TMP15]], [[TMP16]]
 ; CHECK-NEXT:    [[TMP17:%.*]] = zext i32 [[TMP18]] to i64
-; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP17]])
+; CHECK-NEXT:    [[TMP14:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[TMP14]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP17]], i64 [[TMP19]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP20]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       11:
+; CHECK:       14:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, align 4
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, i32 [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, i32 [[TMP10]]
-; CHECK-NEXT:    [[TMP12:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr addrspace(1)
+; CHECK-NEXT:    [[TMP25:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_TEST_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.test_kernel.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP26:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, i32 [[TMP25]]
+; CHECK-NEXT:    [[TMP12:%.*]] = addrspacecast ptr addrspace(3) [[TMP11]] to ptr addrspace(1)
 ; CHECK-NEXT:    [[VAL1:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
-; CHECK-NEXT:    [[TMP13:%.*]] = addrspacecast ptr addrspace(3) [[TMP11]] to ptr addrspace(1)
+; CHECK-NEXT:    [[TMP13:%.*]] = addrspacecast ptr addrspace(3) [[TMP26]] to ptr addrspace(1)
 ; CHECK-NEXT:    [[VAL2:%.*]] = load i32, ptr addrspace(1) [[TMP13]], align 4
 ; CHECK-NEXT:    [[RESULT:%.*]] = add i32 [[VAL1]], [[VAL2]]
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[RESULT]], 0
@@ -59,8 +60,11 @@ define amdgpu_kernel void @test_kernel() sanitize_address {
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP14:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP14]])
+; CHECK-NEXT:    [[TMP21:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.test_kernel, align 8
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr addrspace(1) [[TMP21]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP24]], i64 [[TMP23]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -86,7 +90,10 @@ val1_negative:
 ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="16" }
 ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
index aa22cfaa387f4..2548c872347b8 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-indirect-access.ll
@@ -8,26 +8,20 @@
 @lds_3 = external addrspace(3) global [0 x i8], align 4
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
-; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [1 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k0 to i32)]
-; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [1 x [2 x i32]] [[2 x i32] [i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0) to i32)]]
 define void @use_variables() sanitize_address {
 ; CHECK-LABEL: define void @use_variables(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
-; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
 ; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP9]], align 4
 ; CHECK-NEXT:    store i8 3, ptr addrspace(3) [[TMP15]], align 8
 ; CHECK-NEXT:    ret void
@@ -39,7 +33,7 @@ define void @use_variables() sanitize_address {
 
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -49,48 +43,55 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
 ; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
-; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
 ; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
 ; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
-; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
-; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
 ; CHECK-NEXT:    [[TMP27:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP27]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 1), align 4
 ; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP27]], 7
 ; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
 ; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
-; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 2), align 4
 ; CHECK-NEXT:    [[TMP28:%.*]] = add i32 [[TMP15]], [[TMP19]]
-; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP8]] to i64
-; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
+; CHECK-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP28]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP35:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP26]], i64 [[TMP23]])
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP35]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       23:
+; CHECK:       26:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
 ; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
+; CHECK-NEXT:    [[TMP29:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP29]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
 ; CHECK-NEXT:    call void @use_variables()
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 1
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP25]], align 2
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP30]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP26:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP26]])
+; CHECK-NEXT:    [[TMP31:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP32:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64
+; CHECK-NEXT:    [[TMP34:%.*]] = ptrtoint ptr addrspace(1) [[TMP31]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP34]], i64 [[TMP33]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -101,5 +102,5 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
-; CHECK: [[META0]] = !{i32 0}
+; CHECK: [[META2]] = !{i32 0}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
index 6d93c6a90b10b..35f96fd21ee55 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-dynamic-lds-test.ll
@@ -8,12 +8,9 @@
 @lds_4 = external addrspace(3) global [0 x i8], align 8
 
 ;.
-; CHECK: @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
-; CHECK: @lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
-; CHECK: @lds_3 = external addrspace(3) global [0 x i8], align 4
-; CHECK: @lds_4 = external addrspace(3) global [0 x i8], align 8
-; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8
-; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 4, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 0, i32 0 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 0, i32 0 } }, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.k0.dynlds = external addrspace(3) global [0 x i8], no_sanitize_address, align 8, !absolute_symbol [[META1:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 4, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 24, i32 0, i32 0 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 24, i32 0, i32 0 } }, no_sanitize_address
 ;.
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
@@ -27,53 +24,60 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP21:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
 ; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP9]], [[TMP7]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
-; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP11]], 7
 ; CHECK-NEXT:    [[TMP13:%.*]] = udiv i32 [[TMP12]], 8
 ; CHECK-NEXT:    [[TMP14:%.*]] = mul i32 [[TMP13]], 8
-; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP14]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP8]], [[TMP14]]
-; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
+; CHECK-NEXT:    store i32 [[TMP15]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
 ; CHECK-NEXT:    [[TMP31:%.*]] = load i32, ptr addrspace(4) [[TMP10]], align 4
-; CHECK-NEXT:    store i32 [[TMP31]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP31]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 1), align 4
 ; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP31]], 7
 ; CHECK-NEXT:    [[TMP18:%.*]] = udiv i32 [[TMP17]], 8
 ; CHECK-NEXT:    [[TMP19:%.*]] = mul i32 [[TMP18]], 8
-; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP19]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 2), align 4
 ; CHECK-NEXT:    [[TMP32:%.*]] = add i32 [[TMP15]], [[TMP19]]
-; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP8]] to i64
-; CHECK-NEXT:    [[TMP20:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP16]])
+; CHECK-NEXT:    [[TMP30:%.*]] = zext i32 [[TMP32]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP23:%.*]] = ptrtoint ptr [[TMP22]] to i64
+; CHECK-NEXT:    [[TMP39:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP30]], i64 [[TMP23]])
+; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP39]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP20]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP21]]
-; CHECK:       23:
+; CHECK:       26:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP22:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP22]]
 ; CHECK-NEXT:    [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP24]]
 ; CHECK-NEXT:    [[TMP26:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
 ; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP26]]
 ; CHECK-NEXT:    [[TMP28:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
 ; CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP28]]
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP23]], align 4
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP25]], align 8
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP27]], align 4
-; CHECK-NEXT:    store i8 8, ptr addrspace(3) [[TMP29]], align 8
+; CHECK-NEXT:    [[TMP33:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP34:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP33]]
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.k0.dynlds) ]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP25]], align 4
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP27]], align 8
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP29]], align 4
+; CHECK-NEXT:    store i8 8, ptr addrspace(3) [[TMP34]], align 8
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP30:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP30]])
+; CHECK-NEXT:    [[TMP35:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP36:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP37:%.*]] = ptrtoint ptr [[TMP36]] to i64
+; CHECK-NEXT:    [[TMP38:%.*]] = ptrtoint ptr addrspace(1) [[TMP35]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP38]], i64 [[TMP37]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -85,7 +89,11 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0]] = { sanitize_address }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="24,24" }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+; CHECK: [[META1]] = !{i32 24, i32 25}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
index 0ca8a51c049f2..5b285f467c5fa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-function-param.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
 ; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
 
 ; Test to check if LDS accesses are lowered correctly when LDS is passed as function
@@ -6,6 +6,10 @@
 
 @lds_var = internal addrspace(3) global [1024 x i32] poison, align 4
 
+;.
+; CHECK: @llvm.amdgcn.sw.lds.my_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.my_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.my_kernel.md.type { %llvm.amdgcn.sw.lds.my_kernel.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.my_kernel.md.item { i32 8, i32 4096, i32 4096 } }, no_sanitize_address
+;.
 define void @my_function(ptr addrspace(3) %lds_arg) sanitize_address {
 ; CHECK-LABEL: define void @my_function(
 ; CHECK-SAME: ptr addrspace(3) [[LDS_ARG:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -22,7 +26,7 @@ define void @my_function(ptr addrspace(3) %lds_arg) sanitize_address {
 
 define amdgpu_kernel void @my_kernel() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @my_kernel(
-; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -32,17 +36,20 @@ define amdgpu_kernel void @my_kernel() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, align 4
-; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 1, i32 2), align 4
 ; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP11]], [[TMP12]]
 ; CHECK-NEXT:    [[TMP13:%.*]] = zext i32 [[TMP14]] to i64
-; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP13]])
+; CHECK-NEXT:    [[TMP10:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[TMP10]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP13]], i64 [[TMP15]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP16]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       11:
+; CHECK:       14:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_MY_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.my_kernel.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, i32 [[TMP8]]
 ; CHECK-NEXT:    [[LDS_PTR:%.*]] = getelementptr [1024 x i32], ptr addrspace(3) [[TMP9]], i32 0, i32 0
 ; CHECK-NEXT:    call void @my_function(ptr addrspace(3) [[LDS_PTR]])
@@ -51,8 +58,11 @@ define amdgpu_kernel void @my_kernel() sanitize_address {
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP10:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP10]])
+; CHECK-NEXT:    [[TMP17:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.my_kernel, align 8
+; CHECK-NEXT:    [[TMP18:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[TMP18]] to i64
+; CHECK-NEXT:    [[TMP20:%.*]] = ptrtoint ptr addrspace(1) [[TMP17]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP20]], i64 [[TMP19]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -61,3 +71,12 @@ define amdgpu_kernel void @my_kernel() sanitize_address {
   call void @my_function(ptr addrspace(3) %lds_ptr)
   ret void
 }
+;.
+; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR1]] = { sanitize_address "amdgpu-lds-size"="4104" }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR4:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
index 5148e04d541e6..9570c41838eef 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access-nested.ll
@@ -6,11 +6,9 @@
 @A = external addrspace(3) global [8 x ptr]
 @B = external addrspace(3) global [0 x i32]
 
-; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [4 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0 to i32), i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1 to i32), i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2 to i32), i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3 to i32)]
-; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [4 x [2 x i32]] [[2 x i32] [i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md to i32), i32 poison], [2 x i32] [i32 poison, i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md to i32)], [2 x i32] [i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md to i32), i32 poison], [2 x i32] [i32 poison, i32 ptrtoint (ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md to i32)]]
 define amdgpu_kernel void @kernel_0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_0(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -20,14 +18,17 @@ define amdgpu_kernel void @kernel_0() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, align 4
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_0.md, i32 0, i32 1, i32 2), align 4
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP9]], [[TMP10]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP12]] to i64
-; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP11]])
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP11]], i64 [[TMP14]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       11:
+; CHECK:       14:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    call void @call_store_A()
@@ -36,8 +37,11 @@ define amdgpu_kernel void @kernel_0() sanitize_address {
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP8:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP8]])
+; CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_0, align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr addrspace(1) [[TMP15]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP18]], i64 [[TMP17]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -48,7 +52,7 @@ define amdgpu_kernel void @kernel_0() sanitize_address {
 
 define amdgpu_kernel void @kernel_1() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_1(
-; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -58,30 +62,41 @@ define amdgpu_kernel void @kernel_1() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
 ; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP12]], [[TMP20]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
-; CHECK-NEXT:    store i32 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, align 4
+; CHECK-NEXT:    store i32 [[TMP21]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(4) [[TMP7]], align 4
-; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 1, i32 1), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i32 [[TMP8]], 3
 ; CHECK-NEXT:    [[TMP10:%.*]] = udiv i32 [[TMP9]], 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = mul i32 [[TMP10]], 4
-; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 0, i32 1), align 4
-; CHECK-NEXT:    [[TMP12:%.*]] = add i32 0, [[TMP11]]
-; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 0)
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_1_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_1.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP21]], [[TMP11]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP16]], i64 [[TMP18]])
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
 ; CHECK-NEXT:    br label [[TMP14]]
-; CHECK:       14:
+; CHECK:       21:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel_1.dynlds) ]
 ; CHECK-NEXT:    [[PTR:%.*]] = call ptr @get_B_ptr()
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP15:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP15]])
+; CHECK-NEXT:    [[TMP22:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_1, align 8
+; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = ptrtoint ptr addrspace(1) [[TMP22]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP25]], i64 [[TMP24]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -92,7 +107,7 @@ define amdgpu_kernel void @kernel_1() sanitize_address {
 
 define amdgpu_kernel void @kernel_2() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_2(
-; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META2:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META4:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -102,14 +117,17 @@ define amdgpu_kernel void @kernel_2() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, align 4
-; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 1, i32 0), align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_2_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_2.md, i32 0, i32 1, i32 2), align 4
 ; CHECK-NEXT:    [[TMP12:%.*]] = add i32 [[TMP9]], [[TMP10]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = zext i32 [[TMP12]] to i64
-; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP11]])
+; CHECK-NEXT:    [[TMP13:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP11]], i64 [[TMP14]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       11:
+; CHECK:       14:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    call void @store_A()
@@ -118,8 +136,11 @@ define amdgpu_kernel void @kernel_2() sanitize_address {
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP8:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP8]])
+; CHECK-NEXT:    [[TMP15:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_2, align 8
+; CHECK-NEXT:    [[TMP16:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[TMP16]] to i64
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr addrspace(1) [[TMP15]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP18]], i64 [[TMP17]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -130,7 +151,7 @@ define amdgpu_kernel void @kernel_2() sanitize_address {
 
 define amdgpu_kernel void @kernel_3() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @kernel_3(
-; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META3:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR1]] !llvm.amdgcn.lds.kernel.id [[META5:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -140,30 +161,41 @@ define amdgpu_kernel void @kernel_3() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP14:%.*]]
 ; CHECK:       Malloc:
+; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, align 4
+; CHECK-NEXT:    [[TMP20:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 2), align 4
+; CHECK-NEXT:    [[TMP21:%.*]] = add i32 [[TMP12]], [[TMP20]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[TMP6]], i64 15
-; CHECK-NEXT:    store i32 0, ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, align 4
+; CHECK-NEXT:    store i32 [[TMP21]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(4) [[TMP7]], align 4
-; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 1), align 4
+; CHECK-NEXT:    store i32 [[TMP8]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 1, i32 1), align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i32 [[TMP8]], 3
 ; CHECK-NEXT:    [[TMP10:%.*]] = udiv i32 [[TMP9]], 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = mul i32 [[TMP10]], 4
-; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 0, i32 1), align 4
-; CHECK-NEXT:    [[TMP12:%.*]] = add i32 0, [[TMP11]]
-; CHECK-NEXT:    [[TMP13:%.*]] = call ptr addrspace(1) @malloc(i64 0)
+; CHECK-NEXT:    store i32 [[TMP11]], ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_KERNEL_3_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.kernel_3.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP21]], [[TMP11]]
+; CHECK-NEXT:    [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
+; CHECK-NEXT:    [[TMP17:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64
+; CHECK-NEXT:    [[TMP19:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP16]], i64 [[TMP18]])
+; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP19]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
 ; CHECK-NEXT:    br label [[TMP14]]
-; CHECK:       14:
+; CHECK:       21:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
+; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel_3.dynlds) ]
 ; CHECK-NEXT:    [[PTR:%.*]] = call ptr @get_B_ptr()
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP15:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP15]])
+; CHECK-NEXT:    [[TMP22:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.kernel_3, align 8
+; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP25:%.*]] = ptrtoint ptr addrspace(1) [[TMP22]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP25]], i64 [[TMP24]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -174,7 +206,7 @@ define amdgpu_kernel void @kernel_3() sanitize_address {
 
 define private void @call_store_A() sanitize_address {
 ; CHECK-LABEL: define private void @call_store_A(
-; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] {
 ; CHECK-NEXT:    call void @store_A()
 ; CHECK-NEXT:    ret void
 ;
@@ -184,15 +216,13 @@ define private void @call_store_A() sanitize_address {
 
 define private void @store_A() sanitize_address {
 ; CHECK-LABEL: define private void @store_A(
-; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-SAME: ) #[[ATTR2]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
 ; CHECK-NEXT:    store ptr [[TMP10]], ptr null, align 8
@@ -204,15 +234,13 @@ define private void @store_A() sanitize_address {
 
 define private ptr @get_B_ptr() sanitize_address {
 ; CHECK-LABEL: define private ptr @get_B_ptr(
-; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-SAME: ) #[[ATTR2]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [4 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [4 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [4 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
 ; CHECK-NEXT:    ret ptr [[TMP10]]
@@ -220,8 +248,8 @@ define private ptr @get_B_ptr() sanitize_address {
   ret ptr addrspacecast (ptr addrspace(3) @B to ptr)
 }
 ;.
-; CHECK: [[META0]] = !{i32 0}
-; CHECK: [[META1]] = !{i32 1}
-; CHECK: [[META2]] = !{i32 2}
-; CHECK: [[META3]] = !{i32 3}
+; CHECK: [[META2]] = !{i32 0}
+; CHECK: [[META3]] = !{i32 1}
+; CHECK: [[META4]] = !{i32 2}
+; CHECK: [[META5]] = !{i32 3}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
index 2fe685bc9c958..deb4475878356 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-indirect-access.ll
@@ -7,26 +7,20 @@
 @lds_3 = external addrspace(3) global [3 x i8], align 4
 @lds_4 = external addrspace(3) global [4 x i8], align 8
 
-; @llvm.amdgcn.sw.lds.base.table = internal addrspace(4) constant [1 x i32] [i32 ptrtoint (ptr addrspace(3) @llvm.amdgcn.sw.lds.k0 to i32)]
-; @llvm.amdgcn.sw.lds.offset.table = internal addrspace(4) constant [1 x [2 x i32]] [[2 x i32] [i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0) to i32), i32 ptrtoint (ptr addrspace(1) getelementptr inbounds (%llvm.amdgcn.sw.lds.k0.md.type, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0) to i32)]]
 define void @use_variables() sanitize_address {
 ; CHECK-LABEL: define void @use_variables(
 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
-; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x i32], ptr addrspace(4) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr addrspace(4) [[TMP2]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
-; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr addrspace(4) [[TMP5]], align 4
-; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i32 [[TMP6]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [1 x ptr addrspace(3)], ptr addrspace(1) @llvm.amdgcn.sw.lds.base.table, i32 0, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load ptr addrspace(3), ptr addrspace(1) [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[TMP5:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP6]], align 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) [[TMP5]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP8]]
-; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i32 [[TMP3]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x i32]], ptr addrspace(4) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
-; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr addrspace(4) [[TMP11]], align 4
-; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i32 [[TMP12]] to ptr addrspace(3)
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
-; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP10]], i32 [[TMP14]]
+; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [1 x [2 x ptr addrspace(1)]], ptr addrspace(1) @llvm.amdgcn.sw.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[TMP11]], align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[TMP10]]
 ; CHECK-NEXT:    [[X:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
 ; CHECK-NEXT:    [[TMP16:%.*]] = addrspacecast ptr addrspace(3) [[TMP9]] to ptr
 ; CHECK-NEXT:    store i8 3, ptr [[TMP16]], align 4
@@ -41,7 +35,7 @@ define void @use_variables() sanitize_address {
 
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
-; CHECK-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id [[META0:![0-9]+]] {
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id [[META1:![0-9]+]] {
 ; CHECK-NEXT:  WId:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -51,30 +45,36 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 0), align 4
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 3, i32 2), align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 0), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 4, i32 2), align 4
 ; CHECK-NEXT:    [[TMP16:%.*]] = add i32 [[TMP13]], [[TMP14]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP16]] to i64
-; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP15]])
+; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP12:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP15]], i64 [[TMP24]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP12]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       11:
+; CHECK:       14:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
+; CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP17]]
 ; CHECK-NEXT:    call void @use_variables()
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP9]], align 1
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP11]], align 2
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP11]], align 1
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP18]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP12:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP12]])
+; CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr addrspace(1) [[TMP19]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP22]], i64 [[TMP21]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -85,5 +85,5 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
-; CHECK: [[META0]] = !{i32 0}
+; CHECK: [[META1]] = !{i32 0}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
index be1ff10550d7a..4849b804835e0 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-sw-lower-lds-static-lds-test.ll
@@ -6,10 +6,8 @@
 @lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
 
 ;.
-; CHECK: @lds_1 = internal addrspace(3) global [1 x i8] poison, align 4
-; CHECK: @lds_2 = internal addrspace(3) global [1 x i32] poison, align 8
-; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8
-; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 4, i32 8 } }, no_sanitize_address
+; CHECK: @llvm.amdgcn.sw.lds.k0 = internal addrspace(3) global ptr poison, no_sanitize_address, align 8, !absolute_symbol [[META0:![0-9]+]]
+; CHECK: @llvm.amdgcn.sw.lds.k0.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.k0.md.type { %llvm.amdgcn.sw.lds.k0.md.item { i32 0, i32 8, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 8, i32 1, i32 8 }, %llvm.amdgcn.sw.lds.k0.md.item { i32 16, i32 4, i32 8 } }, no_sanitize_address
 ;.
 define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-LABEL: define amdgpu_kernel void @k0(
@@ -23,29 +21,35 @@ define amdgpu_kernel void @k0() sanitize_address {
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[TMP4]], 0
 ; CHECK-NEXT:    br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP7:%.*]]
 ; CHECK:       Malloc:
-; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
-; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 2), align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 2), align 4
 ; CHECK-NEXT:    [[TMP16:%.*]] = add i32 [[TMP13]], [[TMP14]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP16]] to i64
-; CHECK-NEXT:    [[TMP6:%.*]] = call ptr addrspace(1) @malloc(i64 [[TMP15]])
+; CHECK-NEXT:    [[TMP23:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[TMP23]] to i64
+; CHECK-NEXT:    [[TMP12:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP15]], i64 [[TMP11]])
+; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP12]] to ptr addrspace(1)
 ; CHECK-NEXT:    store ptr addrspace(1) [[TMP6]], ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
 ; CHECK-NEXT:    br label [[TMP7]]
-; CHECK:       11:
+; CHECK:       14:
 ; CHECK-NEXT:    [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, align 4
-; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 1, i32 0), align 4
 ; CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP10]]
-; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP17]], align 4
-; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP18]], align 2
+; CHECK-NEXT:    [[TMP17:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_K0_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.k0.md, i32 0, i32 2, i32 0), align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, i32 [[TMP17]]
+; CHECK-NEXT:    store i8 7, ptr addrspace(3) [[TMP18]], align 4
+; CHECK-NEXT:    store i32 8, ptr addrspace(3) [[TMP24]], align 2
 ; CHECK-NEXT:    br label [[CONDFREE:%.*]]
 ; CHECK:       CondFree:
 ; CHECK-NEXT:    call void @llvm.amdgcn.s.barrier()
 ; CHECK-NEXT:    br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
 ; CHECK:       Free:
-; CHECK-NEXT:    [[TMP12:%.*]] = load ptr, ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
-; CHECK-NEXT:    call void @free(ptr [[TMP12]])
+; CHECK-NEXT:    [[TMP19:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.k0, align 8
+; CHECK-NEXT:    [[TMP20:%.*]] = call ptr @llvm.returnaddress(i32 0)
+; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
+; CHECK-NEXT:    [[TMP22:%.*]] = ptrtoint ptr addrspace(1) [[TMP19]] to i64
+; CHECK-NEXT:    call void @__asan_free_impl(i64 [[TMP22]], i64 [[TMP21]])
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       End:
 ; CHECK-NEXT:    ret void
@@ -55,7 +59,10 @@ define amdgpu_kernel void @k0() sanitize_address {
   ret void
 }
 ;.
-; CHECK: attributes #[[ATTR0]] = { sanitize_address }
+; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="24" }
 ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
+;.
+; CHECK: [[META0]] = !{i32 0, i32 1}
 ;.



More information about the llvm-commits mailing list