[clang] b79ba02 - [AMDGPU][GFX12.5] Reimplement monitor load as an atomic operation (#177343)

via cfe-commits cfe-commits at lists.llvm.org
Mon Feb 9 00:57:33 PST 2026


Author: Pierre van Houtryve
Date: 2026-02-09T09:57:27+01:00
New Revision: b79ba024790200d8e3900cdd050585c962535eb6

URL: https://github.com/llvm/llvm-project/commit/b79ba024790200d8e3900cdd050585c962535eb6
DIFF: https://github.com/llvm/llvm-project/commit/b79ba024790200d8e3900cdd050585c962535eb6.diff

LOG: [AMDGPU][GFX12.5] Reimplement monitor load as an atomic operation (#177343)

Load monitor operations make more sense as atomic operations, as
non-atomic operations cannot be used for inter-thread communication w/o
additional synchronization.
The previous built-in made it work because one could just override the
CPol bits, but that bypasses the memory model and forces the user to learn
about ISA bits encoding.

Making load monitor an atomic operation has a couple of advantages.
First, the memory model foundation for it is stronger. We just lean on the
existing rules for atomic operations. Second, the CPol bits are abstracted away
from the user, which avoids leaking ISA details into the API.

This patch also adds supporting memory model and intrinsics
documentation to AMDGPUUsage.

Solves SWDEV-516398.

Added: 
    clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
    clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
    clang/test/SemaHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
    clang/test/SemaHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip

Modified: 
    clang/include/clang/Basic/BuiltinsAMDGPU.td
    clang/include/clang/Sema/SemaAMDGPU.h
    clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
    clang/lib/CodeGen/TargetInfo.cpp
    clang/lib/CodeGen/TargetInfo.h
    clang/lib/CodeGen/Targets/AMDGPU.cpp
    clang/lib/CodeGen/Targets/SPIR.cpp
    clang/lib/Sema/SemaAMDGPU.cpp
    clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
    clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
    llvm/docs/AMDGPUUsage.rst
    llvm/include/llvm/IR/IntrinsicsAMDGPU.td
    llvm/lib/Target/AMDGPU/AMDGPUGISel.td
    llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
    llvm/lib/Target/AMDGPU/FLATInstructions.td
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIInstructions.td
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index 17f081a906364..b7839b2febcd3 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -712,12 +712,12 @@ def __builtin_amdgcn_s_cluster_barrier : AMDGPUBuiltin<"void()", [], "gfx1250-in
 def __builtin_amdgcn_flat_prefetch : AMDGPUBuiltin<"void(void const address_space<0> *, _Constant int)", [Const], "vmem-pref-insts">;
 def __builtin_amdgcn_global_prefetch : AMDGPUBuiltin<"void(void const address_space<1> *, _Constant int)", [Const], "vmem-pref-insts">;
 
-def __builtin_amdgcn_global_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<0> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<0> *, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<0> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<0> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
 def __builtin_amdgcn_cluster_load_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;

diff  --git a/clang/include/clang/Sema/SemaAMDGPU.h b/clang/include/clang/Sema/SemaAMDGPU.h
index bac812a9d4fcf..e080ccd008863 100644
--- a/clang/include/clang/Sema/SemaAMDGPU.h
+++ b/clang/include/clang/Sema/SemaAMDGPU.h
@@ -26,7 +26,14 @@ class SemaAMDGPU : public SemaBase {
 
   bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 
+  /// Emits a diagnostic if the \p E is not an atomic ordering encoded in the C
+  /// ABI format, or if the atomic ordering is not valid for the operation type
+  /// as defined by \p MayLoad and \p MayStore. \returns true if a diagnostic
+  /// was emitted.
+  bool checkAtomicOrderingCABIArg(Expr *E, bool MayLoad, bool MayStore);
+
   bool checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore);
+  bool checkAtomicMonitorLoad(CallExpr *TheCall);
 
   bool checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs,
                                unsigned NumDataArgs);

diff  --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index a096ed27a788e..e923c285eff28 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -12,6 +12,7 @@
 
 #include "CGBuiltin.h"
 #include "CodeGenFunction.h"
+#include "TargetInfo.h"
 #include "clang/Basic/DiagnosticFrontend.h"
 #include "clang/Basic/SyncScope.h"
 #include "clang/Basic/TargetBuiltins.h"
@@ -21,6 +22,7 @@
 #include "llvm/IR/IntrinsicsR600.h"
 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
 #include "llvm/Support/AMDGPUAddrSpace.h"
+#include "llvm/Support/AtomicOrdering.h"
 
 using namespace clang;
 using namespace CodeGen;
@@ -272,6 +274,24 @@ static inline StringRef mapScopeToSPIRV(StringRef AMDGCNScope) {
   return AMDGCNScope;
 }
 
+static llvm::AtomicOrdering mapCABIAtomicOrdering(unsigned AO) {
+  // Map C11/C++11 memory ordering to LLVM memory ordering
+  assert(llvm::isValidAtomicOrderingCABI(AO));
+  switch (static_cast<llvm::AtomicOrderingCABI>(AO)) {
+  case llvm::AtomicOrderingCABI::acquire:
+  case llvm::AtomicOrderingCABI::consume:
+    return llvm::AtomicOrdering::Acquire;
+  case llvm::AtomicOrderingCABI::release:
+    return llvm::AtomicOrdering::Release;
+  case llvm::AtomicOrderingCABI::acq_rel:
+    return llvm::AtomicOrdering::AcquireRelease;
+  case llvm::AtomicOrderingCABI::seq_cst:
+    return llvm::AtomicOrdering::SequentiallyConsistent;
+  case llvm::AtomicOrderingCABI::relaxed:
+    return llvm::AtomicOrdering::Monotonic;
+  }
+}
+
 // For processing memory ordering and memory scope arguments of various
 // amdgcn builtins.
 // \p Order takes a C++11 compatible memory-ordering specifier and converts
@@ -284,25 +304,7 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
   int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
 
   // Map C11/C++11 memory ordering to LLVM memory ordering
-  assert(llvm::isValidAtomicOrderingCABI(ord));
-  switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
-  case llvm::AtomicOrderingCABI::acquire:
-  case llvm::AtomicOrderingCABI::consume:
-    AO = llvm::AtomicOrdering::Acquire;
-    break;
-  case llvm::AtomicOrderingCABI::release:
-    AO = llvm::AtomicOrdering::Release;
-    break;
-  case llvm::AtomicOrderingCABI::acq_rel:
-    AO = llvm::AtomicOrdering::AcquireRelease;
-    break;
-  case llvm::AtomicOrderingCABI::seq_cst:
-    AO = llvm::AtomicOrdering::SequentiallyConsistent;
-    break;
-  case llvm::AtomicOrderingCABI::relaxed:
-    AO = llvm::AtomicOrdering::Monotonic;
-    break;
-  }
+  AO = mapCABIAtomicOrdering(ord);
 
   // Some of the atomic builtins take the scope as a string name.
   StringRef scp;
@@ -818,11 +820,24 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
       break;
     }
 
+    LLVMContext &Ctx = CGM.getLLVMContext();
     llvm::Type *LoadTy = ConvertType(E->getType());
     llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
-    llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+
+    auto *AOExpr = cast<llvm::ConstantInt>(EmitScalarExpr(E->getArg(1)));
+    auto *ScopeExpr = cast<llvm::ConstantInt>(EmitScalarExpr(E->getArg(2)));
+
+    auto Scope = static_cast<SyncScope>(ScopeExpr->getZExtValue());
+    llvm::AtomicOrdering AO = mapCABIAtomicOrdering(AOExpr->getZExtValue());
+
+    StringRef ScopeStr = CGM.getTargetCodeGenInfo().getLLVMSyncScopeStr(
+        CGM.getLangOpts(), Scope, AO);
+
+    llvm::MDNode *MD =
+        llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, ScopeStr)});
+    llvm::Value *ScopeMD = llvm::MetadataAsValue::get(Ctx, MD);
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
-    return Builder.CreateCall(F, {Addr, Val});
+    return Builder.CreateCall(F, {Addr, AOExpr, ScopeMD});
   }
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b32:
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b64:

diff  --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 51d3ad384f934..dc0b392fd37f7 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -148,12 +148,20 @@ LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
   return D ? D->getType().getAddressSpace() : LangAS::Default;
 }
 
+StringRef
+TargetCodeGenInfo::getLLVMSyncScopeStr(const LangOptions &LangOpts,
+                                       SyncScope Scope,
+                                       llvm::AtomicOrdering Ordering) const {
+  return ""; /* default sync scope */
+}
+
 llvm::SyncScope::ID
 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
                                       SyncScope Scope,
                                       llvm::AtomicOrdering Ordering,
                                       llvm::LLVMContext &Ctx) const {
-  return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
+  return Ctx.getOrInsertSyncScopeID(
+      getLLVMSyncScopeStr(LangOpts, Scope, Ordering));
 }
 
 void TargetCodeGenInfo::addStackProbeTargetAttributes(

diff  --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index 6394ef4cb0180..98ee894fe557f 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -326,11 +326,16 @@ class TargetCodeGenInfo {
     return LangAS::Default;
   }
 
-  /// Get the syncscope used in LLVM IR.
-  virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
-                                                 SyncScope Scope,
-                                                 llvm::AtomicOrdering Ordering,
-                                                 llvm::LLVMContext &Ctx) const;
+  /// Get the syncscope used in LLVM IR as a string
+  virtual StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts,
+                                        SyncScope Scope,
+                                        llvm::AtomicOrdering Ordering) const;
+
+  /// Get the syncscope used in LLVM IR as a SyncScope ID.
+  llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
+                                         SyncScope Scope,
+                                         llvm::AtomicOrdering Ordering,
+                                         llvm::LLVMContext &Ctx) const;
 
   /// Allow the target to apply other metadata to an atomic instruction
   virtual void setTargetAtomicMetadata(CodeGenFunction &CGF,

diff  --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 7ba32b92cfd55..27e937b81129d 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -310,10 +310,8 @@ class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
   }
   LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
                                   const VarDecl *D) const override;
-  llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
-                                         SyncScope Scope,
-                                         llvm::AtomicOrdering Ordering,
-                                         llvm::LLVMContext &Ctx) const override;
+  StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
+                                llvm::AtomicOrdering Ordering) const override;
   void setTargetAtomicMetadata(CodeGenFunction &CGF,
                                llvm::Instruction &AtomicInst,
                                const AtomicExpr *Expr = nullptr) const override;
@@ -493,55 +491,40 @@ AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
   return DefaultGlobalAS;
 }
 
-llvm::SyncScope::ID
-AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
-                                            SyncScope Scope,
-                                            llvm::AtomicOrdering Ordering,
-                                            llvm::LLVMContext &Ctx) const {
-  std::string Name;
+StringRef AMDGPUTargetCodeGenInfo::getLLVMSyncScopeStr(
+    const LangOptions &LangOpts, SyncScope Scope,
+    llvm::AtomicOrdering Ordering) const {
+
+  // OpenCL assumes by default that atomic scopes are per-address space for
+  // non-sequentially consistent operations.
+  bool IsOneAs = (Scope >= SyncScope::OpenCLWorkGroup &&
+                  Scope <= SyncScope::OpenCLSubGroup &&
+                  Ordering != llvm::AtomicOrdering::SequentiallyConsistent);
+
   switch (Scope) {
   case SyncScope::HIPSingleThread:
   case SyncScope::SingleScope:
-    Name = "singlethread";
-    break;
+    return IsOneAs ? "singlethread-one-as" : "singlethread";
   case SyncScope::HIPWavefront:
   case SyncScope::OpenCLSubGroup:
   case SyncScope::WavefrontScope:
-    Name = "wavefront";
-    break;
+    return IsOneAs ? "wavefront-one-as" : "wavefront";
   case SyncScope::HIPCluster:
   case SyncScope::ClusterScope:
-    Name = "cluster";
-    break;
+    return IsOneAs ? "cluster-one-as" : "cluster";
   case SyncScope::HIPWorkgroup:
   case SyncScope::OpenCLWorkGroup:
   case SyncScope::WorkgroupScope:
-    Name = "workgroup";
-    break;
+    return IsOneAs ? "workgroup-one-as" : "workgroup";
   case SyncScope::HIPAgent:
   case SyncScope::OpenCLDevice:
   case SyncScope::DeviceScope:
-    Name = "agent";
-    break;
+    return IsOneAs ? "agent-one-as" : "agent";
   case SyncScope::SystemScope:
   case SyncScope::HIPSystem:
   case SyncScope::OpenCLAllSVMDevices:
-    Name = "";
-    break;
-  }
-
-  // OpenCL assumes by default that atomic scopes are per-address space for
-  // non-sequentially consistent operations.
-  if (Scope >= SyncScope::OpenCLWorkGroup &&
-      Scope <= SyncScope::OpenCLSubGroup &&
-      Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
-    if (!Name.empty())
-      Name = Twine(Twine(Name) + Twine("-")).str();
-
-    Name = Twine(Twine(Name) + Twine("one-as")).str();
+    return IsOneAs ? "one-as" : "";
   }
-
-  return Ctx.getOrInsertSyncScopeID(Name);
 }
 
 void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(

diff  --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index 32998bb5d60d5..52d019b855dbc 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -131,42 +131,13 @@ class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
                                   const VarDecl *D) const override;
   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
                            CodeGen::CodeGenModule &M) const override;
-  llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
-                                         SyncScope Scope,
-                                         llvm::AtomicOrdering Ordering,
-                                         llvm::LLVMContext &Ctx) const override;
+  StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
+                                llvm::AtomicOrdering Ordering) const override;
   bool supportsLibCall() const override {
     return getABIInfo().getTarget().getTriple().getVendor() !=
            llvm::Triple::AMD;
   }
 };
-
-inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) {
-  switch (Scope) {
-  case SyncScope::HIPSingleThread:
-  case SyncScope::SingleScope:
-    return "singlethread";
-  case SyncScope::HIPWavefront:
-  case SyncScope::OpenCLSubGroup:
-  case SyncScope::WavefrontScope:
-    return "subgroup";
-  case SyncScope::HIPCluster:
-  case SyncScope::ClusterScope:
-  case SyncScope::HIPWorkgroup:
-  case SyncScope::OpenCLWorkGroup:
-  case SyncScope::WorkgroupScope:
-    return "workgroup";
-  case SyncScope::HIPAgent:
-  case SyncScope::OpenCLDevice:
-  case SyncScope::DeviceScope:
-    return "device";
-  case SyncScope::SystemScope:
-  case SyncScope::HIPSystem:
-  case SyncScope::OpenCLAllSVMDevices:
-    return "";
-  }
-  return "";
-}
 } // End anonymous namespace.
 
 void CommonSPIRABIInfo::setCCs() {
@@ -563,11 +534,32 @@ void SPIRVTargetCodeGenInfo::setTargetAttributes(
                  llvm::MDNode::get(M.getLLVMContext(), AttrMDArgs));
 }
 
-llvm::SyncScope::ID
-SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &, SyncScope Scope,
-                                           llvm::AtomicOrdering,
-                                           llvm::LLVMContext &Ctx) const {
-  return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
+StringRef SPIRVTargetCodeGenInfo::getLLVMSyncScopeStr(
+    const LangOptions &, SyncScope Scope, llvm::AtomicOrdering) const {
+  switch (Scope) {
+  case SyncScope::HIPSingleThread:
+  case SyncScope::SingleScope:
+    return "singlethread";
+  case SyncScope::HIPWavefront:
+  case SyncScope::OpenCLSubGroup:
+  case SyncScope::WavefrontScope:
+    return "subgroup";
+  case SyncScope::HIPCluster:
+  case SyncScope::ClusterScope:
+  case SyncScope::HIPWorkgroup:
+  case SyncScope::OpenCLWorkGroup:
+  case SyncScope::WorkgroupScope:
+    return "workgroup";
+  case SyncScope::HIPAgent:
+  case SyncScope::OpenCLDevice:
+  case SyncScope::DeviceScope:
+    return "device";
+  case SyncScope::SystemScope:
+  case SyncScope::HIPSystem:
+  case SyncScope::OpenCLAllSVMDevices:
+    return "";
+  }
+  return "";
 }
 
 /// Construct a SPIR-V target extension type for the given OpenCL image type.

diff  --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index cec8f9d2675e6..2fa29ef966cfd 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -143,6 +143,13 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B:
     return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true);
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
+    return checkAtomicMonitorLoad(TheCall);
   case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32:
   case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32:
   case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32:
@@ -365,6 +372,27 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
   return false;
 }
 
+bool SemaAMDGPU::checkAtomicOrderingCABIArg(Expr *E, bool MayLoad,
+                                            bool MayStore) {
+  Expr::EvalResult AtomicOrdArgRes;
+  if (!E->EvaluateAsInt(AtomicOrdArgRes, getASTContext()))
+    llvm_unreachable("Intrinsic requires imm for atomic ordering argument!");
+  auto Ord =
+      llvm::AtomicOrderingCABI(AtomicOrdArgRes.Val.getInt().getZExtValue());
+
+  // Atomic ordering cannot be acq_rel in any case, acquire for stores or
+  // release for loads.
+  if (!llvm::isValidAtomicOrderingCABI((unsigned)Ord) ||
+      (!(MayLoad && MayStore) && (Ord == llvm::AtomicOrderingCABI::acq_rel)) ||
+      (!MayLoad && Ord == llvm::AtomicOrderingCABI::acquire) ||
+      (!MayStore && Ord == llvm::AtomicOrderingCABI::release)) {
+    return Diag(E->getBeginLoc(), diag::warn_atomic_op_has_invalid_memory_order)
+           << 0 << E->getSourceRange();
+  }
+
+  return false;
+}
+
 bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
   bool Fail = false;
 
@@ -379,31 +407,47 @@ bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
         << PtrArg->getSourceRange();
   }
 
+  Expr *AO = TheCall->getArg(IsStore ? 2 : 1);
+  Expr *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
+
+  if (AO->isValueDependent() || Scope->isValueDependent())
+    return false;
+
   // Check atomic ordering
-  Expr *AtomicOrdArg = TheCall->getArg(IsStore ? 2 : 1);
-  Expr::EvalResult AtomicOrdArgRes;
-  if (!AtomicOrdArg->EvaluateAsInt(AtomicOrdArgRes, getASTContext()))
-    llvm_unreachable("Intrinsic requires imm for atomic ordering argument!");
-  auto Ord =
-      llvm::AtomicOrderingCABI(AtomicOrdArgRes.Val.getInt().getZExtValue());
+  Fail |=
+      checkAtomicOrderingCABIArg(TheCall->getArg(IsStore ? 2 : 1),
+                                 /*MayLoad=*/!IsStore, /*MayStore=*/IsStore);
 
-  // Atomic ordering cannot be acq_rel in any case, acquire for stores or
-  // release for loads.
-  if (!llvm::isValidAtomicOrderingCABI((unsigned)Ord) ||
-      (Ord == llvm::AtomicOrderingCABI::acq_rel) ||
-      Ord == (IsStore ? llvm::AtomicOrderingCABI::acquire
-                      : llvm::AtomicOrderingCABI::release)) {
-    return Diag(AtomicOrdArg->getBeginLoc(),
-                diag::warn_atomic_op_has_invalid_memory_order)
-           << 0 << AtomicOrdArg->getSourceRange();
+  // Last argument is the syncscope as a string literal.
+  if (!isa<StringLiteral>(Scope->IgnoreParenImpCasts())) {
+    Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
+        << Scope->getSourceRange();
+    Fail = true;
   }
 
-  // Last argument is a string literal
-  Expr *Arg = TheCall->getArg(TheCall->getNumArgs() - 1);
-  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
-    Fail = true;
-    Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
-        << Arg->getSourceRange();
+  return Fail;
+}
+
+bool SemaAMDGPU::checkAtomicMonitorLoad(CallExpr *TheCall) {
+  bool Fail = false;
+
+  Expr *AO = TheCall->getArg(1);
+  Expr *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
+
+  if (AO->isValueDependent() || Scope->isValueDependent())
+    return false;
+
+  Fail |= checkAtomicOrderingCABIArg(TheCall->getArg(1), /*MayLoad=*/true,
+                                     /*MayStore=*/false);
+
+  auto ScopeModel = AtomicScopeModel::create(AtomicScopeModelKind::Generic);
+  if (std::optional<llvm::APSInt> Result =
+          Scope->getIntegerConstantExpr(SemaRef.Context)) {
+    if (!ScopeModel->isValid(Result->getZExtValue())) {
+      Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_sync_scope)
+          << Scope->getSourceRange();
+      Fail = true;
+    }
   }
 
   return Fail;

diff  --git a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
new file mode 100644
index 0000000000000..68d3ed674a15a
--- /dev/null
+++ b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
@@ -0,0 +1,91 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --version 5
+
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1250 -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1251 -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
+
+#define __device__ __attribute__((device))
+
+typedef int    v2i   __attribute__((ext_vector_type(2)));
+typedef int    v4i   __attribute__((ext_vector_type(4)));
+
+template<unsigned AO>
+__device__ void template_cooperative_atomic_store_32x4B(int* gaddr, int val) {
+  __builtin_amdgcn_cooperative_atomic_store_32x4B(gaddr, val, AO, "agent");
+}
+
+__device__ void test_amdgcn_cooperative_atomic_store_32x4B(int* gaddr, int val)
+{
+  template_cooperative_atomic_store_32x4B<__ATOMIC_SEQ_CST>(gaddr, val);
+}
+
+template<unsigned AO>
+__device__ int template_cooperative_atomic_load_32x4B(int* gaddr) {
+  return __builtin_amdgcn_cooperative_atomic_load_32x4B(gaddr, AO, "");
+}
+
+__device__ void test_amdgcn_cooperative_atomic_load_32x4B(int* addr, int *out)
+{
+  *out = template_cooperative_atomic_load_32x4B<__ATOMIC_SEQ_CST>(addr);
+}
+
+// CHECK-LABEL: define dso_local void @_Z42test_amdgcn_cooperative_atomic_store_32x4BPii(
+// CHECK-SAME: ptr noundef [[GADDR:%.*]], i32 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[GADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT:    [[GADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GADDR_ADDR]] to ptr
+// CHECK-NEXT:    [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[GADDR]], ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store i32 [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @_Z39template_cooperative_atomic_store_32x4BILj5EEvPii(ptr noundef [[TMP0]], i32 noundef [[TMP1]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define linkonce_odr void @_Z39template_cooperative_atomic_store_32x4BILj5EEvPii(
+// CHECK-SAME: ptr noundef [[GADDR:%.*]], i32 noundef [[VAL:%.*]]) #[[ATTR0]] comdat {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[GADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT:    [[GADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GADDR_ADDR]] to ptr
+// CHECK-NEXT:    [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[GADDR]], ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store i32 [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.cooperative.atomic.store.32x4B.p0(ptr [[TMP0]], i32 [[TMP1]], i32 5, metadata [[META4:![0-9]+]])
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define dso_local void @_Z41test_amdgcn_cooperative_atomic_load_32x4BPiS_(
+// CHECK-SAME: ptr noundef [[ADDR:%.*]], ptr noundef [[OUT:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT:    [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[ADDR]], ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store ptr [[OUT]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z38template_cooperative_atomic_load_32x4BILj5EEiPi(ptr noundef [[TMP0]]) #[[ATTR3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store i32 [[CALL]], ptr [[TMP1]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define linkonce_odr noundef i32 @_Z38template_cooperative_atomic_load_32x4BILj5EEiPi(
+// CHECK-SAME: ptr noundef [[GADDR:%.*]]) #[[ATTR0]] comdat {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[GADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[GADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GADDR_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[GADDR]], ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr [[TMP0]], i32 5, metadata [[META5:![0-9]+]])
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
+//.
+// CHECK: [[META4]] = !{!"agent"}
+// CHECK: [[META5]] = !{!""}
+//.

diff  --git a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
new file mode 100644
index 0000000000000..bc7a0878c9e57
--- /dev/null
+++ b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
@@ -0,0 +1,48 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --version 5
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -emit-llvm -fcuda-is-device -o - %s | FileCheck %s
+
+#define __device__ __attribute__((device))
+
+typedef int    v4i   __attribute__((ext_vector_type(4)));
+
+template<unsigned AO, unsigned Scope>
+__device__ v4i templated_amdgcn_flat_load_monitor_b128(v4i* inptr)
+{
+  return __builtin_amdgcn_flat_load_monitor_b128(inptr, AO, Scope);
+}
+
+__device__ void test_amdgcn_flat_load_monitor_b128_from_template(v4i* inptr, v4i *out)
+{
+  *out = templated_amdgcn_flat_load_monitor_b128<__ATOMIC_SEQ_CST, __MEMORY_SCOPE_SYSTEM>(inptr);
+}
+
+// CHECK-LABEL: define dso_local void @_Z48test_amdgcn_flat_load_monitor_b128_from_templatePDv4_iS0_(
+// CHECK-SAME: ptr noundef [[INPTR:%.*]], ptr noundef [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[INPTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[INPTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[INPTR_ADDR]] to ptr
+// CHECK-NEXT:    [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[INPTR]], ptr [[INPTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store ptr [[OUT]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[INPTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[CALL:%.*]] = call noundef <4 x i32> @_Z39templated_amdgcn_flat_load_monitor_b128ILj5ELj0EEDv4_iPS0_(ptr noundef [[TMP0]]) #[[ATTR2:[0-9]+]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store <4 x i32> [[CALL]], ptr [[TMP1]], align 16
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define linkonce_odr noundef <4 x i32> @_Z39templated_amdgcn_flat_load_monitor_b128ILj5ELj0EEDv4_iPS0_(
+// CHECK-SAME: ptr noundef [[INPTR:%.*]]) #[[ATTR0]] comdat {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[INPTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[INPTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[INPTR_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[INPTR]], ptr [[INPTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[INPTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[TMP0]], i32 5, metadata [[META4:![0-9]+]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+//.
+// CHECK: [[META4]] = !{!""}
+//.

diff  --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
index f2552d40fa273..8ecd6ba61a03e 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
@@ -7,60 +7,60 @@ typedef int    v4i   __attribute__((ext_vector_type(4)));
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b32(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) [[INPTR:%.*]], i32 1)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) [[INPTR:%.*]], i32 0, metadata [[META8:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret i32 [[TMP0]]
 //
 int test_amdgcn_global_load_monitor_b32(global int* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b32(inptr, 1);
+  return __builtin_amdgcn_global_load_monitor_b32(inptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b64(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) [[INPTR:%.*]], i32 10)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) [[INPTR:%.*]], i32 2, metadata [[META9:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <2 x i32> [[TMP0]]
 //
 v2i test_amdgcn_global_load_monitor_b64(global v2i* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b64(inptr, 10);
+  return __builtin_amdgcn_global_load_monitor_b64(inptr, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_DEVICE);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b128(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) [[INPTR:%.*]], i32 22)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) [[INPTR:%.*]], i32 2, metadata [[META10:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <4 x i32> [[TMP0]]
 //
 v4i test_amdgcn_global_load_monitor_b128(global v4i* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b128(inptr, 22);
+  return __builtin_amdgcn_global_load_monitor_b128(inptr, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_WRKGRP);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b32(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr [[INPTR:%.*]], i32 27)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr [[INPTR:%.*]], i32 0, metadata [[META8]])
 // CHECK-GFX1250-NEXT:    ret i32 [[TMP0]]
 //
 int test_amdgcn_flat_load_monitor_b32(int* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b32(inptr, 27);
+  return __builtin_amdgcn_flat_load_monitor_b32(inptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b64(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr [[INPTR:%.*]], i32 1)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr [[INPTR:%.*]], i32 5, metadata [[META11:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <2 x i32> [[TMP0]]
 //
 v2i test_amdgcn_flat_load_monitor_b64(v2i* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b64(inptr, 1);
+  return __builtin_amdgcn_flat_load_monitor_b64(inptr, __ATOMIC_SEQ_CST, __MEMORY_SCOPE_CLUSTR);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b128(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 0)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 0, metadata [[META8]])
 // CHECK-GFX1250-NEXT:    ret <4 x i32> [[TMP0]]
 //
 v4i test_amdgcn_flat_load_monitor_b128(v4i* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b128(inptr, 0);
+  return __builtin_amdgcn_flat_load_monitor_b128(inptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
 }

diff  --git a/clang/test/SemaHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip b/clang/test/SemaHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
new file mode 100644
index 0000000000000..b57d059d7767e
--- /dev/null
+++ b/clang/test/SemaHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
@@ -0,0 +1,32 @@
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -fsyntax-only -fcuda-is-device -verify %s
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1251 -fsyntax-only -fcuda-is-device -verify %s
+
+#define __device__ __attribute__((device))
+
+typedef int    v2i   __attribute__((ext_vector_type(2)));
+typedef int    v4i   __attribute__((ext_vector_type(4)));
+
+template<unsigned AO>
+__device__ void template_cooperative_atomic_store_32x4B(int* gaddr, int val) {
+  // expected-warning at +1 {{memory order argument to atomic operation is invalid}}
+  __builtin_amdgcn_cooperative_atomic_store_32x4B(gaddr, val, AO, "agent");
+}
+
+__device__ void test_amdgcn_cooperative_atomic_store_32x4B_invalid_ao(int* gaddr, int val)
+{
+  // expected-note at +1 {{in instantiation of function template specialization 'template_cooperative_atomic_store_32x4B<42U>' requested here}}
+  template_cooperative_atomic_store_32x4B<42>(gaddr, val);
+}
+
+template<unsigned AO>
+__device__ int template_cooperative_atomic_load_32x4B(int* gaddr) {
+  // expected-warning at +1 {{memory order argument to atomic operation is invalid}}
+  return __builtin_amdgcn_cooperative_atomic_load_32x4B(gaddr, AO, "");
+}
+
+__device__ void test_amdgcn_cooperative_atomic_load_32x4B_invalid_ao(int* addr, int *out)
+{
+  // expected-note at +1 {{in instantiation of function template specialization 'template_cooperative_atomic_load_32x4B<42U>' requested here}}
+  *out = template_cooperative_atomic_load_32x4B<42>(addr);
+}

diff  --git a/clang/test/SemaHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip b/clang/test/SemaHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
new file mode 100644
index 0000000000000..a47e9a7ef2822
--- /dev/null
+++ b/clang/test/SemaHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
@@ -0,0 +1,26 @@
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -fsyntax-only -fcuda-is-device -verify %s
+
+#define __device__ __attribute__((device))
+
+typedef int    v4i   __attribute__((ext_vector_type(4)));
+
+template<unsigned AO, unsigned Scope>
+__device__ v4i templated_amdgcn_flat_load_monitor_b128(v4i* inptr)
+{
+  // expected-error at +2 {{synchronization scope argument to atomic operation is invalid}}
+  // expected-warning at +1 {{memory order argument to atomic operation is invalid}}
+  return __builtin_amdgcn_flat_load_monitor_b128(inptr, AO, Scope);
+}
+
+__device__ void test_amdgcn_flat_load_monitor_b128_invalid_ao_from_template(v4i* inptr, v4i *out)
+{
+  // expected-note at +1 {{in instantiation of function template specialization 'templated_amdgcn_flat_load_monitor_b128<42U, 0U>' requested here}}
+  *out = templated_amdgcn_flat_load_monitor_b128<42, __MEMORY_SCOPE_SYSTEM>(inptr);
+}
+
+__device__ void test_amdgcn_flat_load_monitor_b128_invalid_sc_from_template(v4i* inptr, v4i *out)
+{
+  // expected-note at +1 {{in instantiation of function template specialization 'templated_amdgcn_flat_load_monitor_b128<5U, 42U>' requested here}}
+  *out = templated_amdgcn_flat_load_monitor_b128<__ATOMIC_SEQ_CST, 42>(inptr);
+}

diff  --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index da6a03bc93eeb..8ab4f43d70c40 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -94,15 +94,48 @@ void test_cvt_scale_pk(global half8 *outh8, global bfloat8 *outy8, uint2 src2,
   *outf16 = __builtin_amdgcn_cvt_scale_pk16_f32_bf6(src3, scale, 16); // expected-error {{argument value 16 is outside the valid range [0, 15]}}
 }
 
-void test_amdgcn_load_monitor(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
-                              global int* b32out, global v2i* b64out, global v4i* b128out, int cpol)
+void test_amdgcn_load_monitor_ao_constant(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out, int ao)
 {
-  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, cpol); // expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant integer}}
-  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, cpol); // expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant integer}}
-  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, cpol); // expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant integer}}
-  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, cpol); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant integer}}
-  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, cpol); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant integer}}
-  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, cpol); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
+}
+
+void test_amdgcn_load_monitor_ao_valid(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out)
+{
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_ACQ_REL, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_ACQ_REL, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_ACQ_REL, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+}
+
+void test_amdgcn_load_monitor_scope_constant(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out, int sc)
+{
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
+}
+
+void test_amdgcn_load_monitor_scope_valid(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out)
+{
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
 }
 
 void test_amdgcn_cluster_load(global int* addr32, global v2i* addr64, global v4i* addr128, global int* b32out, global v2i* b64out, global v4i* b128out, int cpol, int mask)

diff  --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 1282dcf98727a..c7d66a38a4e11 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1687,7 +1687,6 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
 
                                                    Instruction cache prefetches are unsafe on invalid address.
 
-
   llvm.amdgcn.s.barrier                            Performs a barrier *signal* operation immediately followed
                                                    by a barrier *wait* operation on the *workgroup barrier* object.
                                                    see :ref:`amdgpu-amdhsa-execution-barriers`.
@@ -1700,6 +1699,32 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
                                                    See :ref:`amdgpu-amdhsa-execution-barriers`.
                                                    Available starting GFX12.
 
+  llvm.amdgcn.flat.load.monitor                    Available on GFX12.5 only.
+                                                   Corresponds to ``flat_load_monitor_b32/64/128`` (``.b32/64/128`` suffixes)
+                                                   instructions.
+                                                   For the purposes of the memory model, this is an atomic load operation in
+                                                   the generic (flat) address space.
+
+                                                   This intrinsic has 3 operands:
+
+                                                   * Flat pointer.
+                                                   * :ref:`Load Atomic Ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>`.
+                                                   * :ref:`Synchronization Scope<amdgpu-intrinsics-syncscope-metadata-operand>`.
+                                                     Note that the scope used must ensure that the L2 cache will be hit.
+
+  llvm.amdgcn.global.load.monitor                  Available on GFX12.5 only.
+                                                   Corresponds to ``global_load_monitor_b32/64/128`` (``.b32/64/128`` suffixes)
+                                                   instructions.
+                                                   For the purposes of the memory model, this is an atomic load operation in
+                                                   the global address space.
+
+                                                   This intrinsic has 3 operands:
+
+                                                   * Flat pointer.
+                                                   * :ref:`Load Atomic Ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>`.
+                                                   * :ref:`Synchronization Scope<amdgpu-intrinsics-syncscope-metadata-operand>`.
+                                                     Note that the scope used must ensure that the L2 cache will be hit.
+
   ==============================================   ==========================================================
 
 .. TODO::
@@ -1765,28 +1790,64 @@ then this intrinsic causes undefined behavior.
 
 The intrinsics are available for the global (``.p1`` suffix) and generic (``.p0`` suffix) address spaces.
 
-The atomic ordering operand (3rd operand for ``.store``, 2nd for ``.load``) is an integer that follows the
-C ABI encoding of atomic memory orderings. The supported values are in
-:ref:`the table below<amdgpu-cooperative-atomic-intrinsics-atomic-memory-orderings-table>`.
+The 3rd operand for ``.store`` or 2nd for ``.load`` intrinsics is the
+:ref:`atomic ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>` of the operation.
+
+The last operand of the intrinsic is the
+:ref:`synchronization scope<amdgpu-intrinsics-syncscope-metadata-operand>` of the operation.
+
+Intrinsic Operands
+~~~~~~~~~~~~~~~~~~
+
+.. _amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand:
+
+C ABI Atomic Ordering Operand
++++++++++++++++++++++++++++++
+
+Intrinsic operands in this format are always ``i32`` integer constants whose value is
+determined by the C ABI encoding of atomic memory orderings. The supported values are in
+:ref:`the table below<amdgpu-intrinsics-c-abi-atomic-memory-orderings-table>`.
+
+  .. table:: AMDGPU Intrinsics C ABI Atomic Memory Ordering Values
+    :name: amdgpu-intrinsics-c-abi-atomic-memory-orderings-table
+
+    ========= ================ =================================
+    Value     Atomic Memory    Notes
+              Ordering
+    ========= ================ =================================
+    ``i32 0`` ``relaxed``      The default for unsupported values.
+
+    ``i32 2`` ``acquire``      Only for loads.
 
-  .. table:: AMDGPU Cooperative Atomic Intrinsics Atomic Memory Orderings
-    :name: amdgpu-cooperative-atomic-intrinsics-atomic-memory-orderings-table
+    ``i32 3`` ``release``      Only for stores.
 
-    ====== ================ =================================
-    Value  Atomic Memory    Notes
-           Ordering
-    ====== ================ =================================
-    ``0``  ``relaxed``      The default for unsupported values.
+    ``i32 5`` ``seq_cst``
+    ========= ================ =================================
 
-    ``2``  ``acquire``      Only for ``.load``
+Example:
 
-    ``3``  ``release``      Only for ``.store``
+.. code::
+
+  ; "i32 5" is the atomic ordering operand
+  %0 = tail call i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr %addr, i32 5, metadata !0)
 
-    ``5``  ``seq_cst``
-    ====== ================ =================================
+.. _amdgpu-intrinsics-syncscope-metadata-operand:
+
+Syncscope Metadata Operand
+++++++++++++++++++++++++++
 
-The last argument of the intrinsic is the synchronization scope
-as a metadata string, which must be one of the supported :ref:`memory scopes<amdgpu-memory-scopes>`.
+Intrinsics operand in this format are metadata strings which must be one of the supported
+:ref:`memory scopes<amdgpu-memory-scopes>`.
+The metadata node must be made of a single ``MDString`` at the top level.
+
+Example:
+
+.. code::
+
+  ; "metadata !0" is the syncscope metadata operand.
+  %0 = tail call i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr %addr, i32 4, metadata !0)
+
+  !0 = !{ !"agent" }
 
 .. _amdgpu_metadata:
 
@@ -17258,7 +17319,7 @@ For GFX125x:
 
   This section is currently incomplete as work on the compiler is still ongoing.
   The following is a non-exhaustive list of unimplemented/undocumented features:
-  non-volatile bit code sequences, monitor and wait, globally accessing scratch atomics,
+  non-volatile bit code sequences, globally accessing scratch atomics,
   multicast loads, barriers (including split barriers) and cooperative atomics.
   Scalar operations memory model needs more elaboration as well.
 
@@ -17359,6 +17420,17 @@ For GFX125x:
     issued to every address at the same time. They are kept in order with other
     memory operations from the same wave.
 
+* ``global_load_monitor_*`` and ``flat_load_monitor_*`` instructions load
+  data and request that the wave is notified (see ``s_monitor_sleep``) if
+  the L2 cache line that holds the data is evicted, or written to.
+
+  * In order to monitor a cache line in the L2 cache, these instructions must
+    ensure that the L2 cache is always hit by setting the ``SCOPE`` of the instruction
+    appropriately.
+  * For non-atomic and atomic code sequences, it is valid to replace
+    ``global_load_b32/64/128`` with a ``global_load_monitor_b32/64/128`` and a
+    ``flat_load_b32/64/128`` with a ``flat_load_monitor_b32/64/128``.
+
 Scalar memory operations are only used to access memory that is proven to not
 change during the execution of the kernel dispatch. This includes constant
 address space and global address space for program scope ``const`` variables.

diff  --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index e35376ba404c0..28bd6c3409e4d 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -4219,13 +4219,12 @@ class AMDGPULoadMonitor<LLVMType ptr_ty>:
   Intrinsic<
     [llvm_any_ty],
     [ptr_ty,
-     llvm_i32_ty],  // gfx12+ cachepolicy:
-                    //   bits [0-2] = th
-                    //   bits [3-4] = scope
+     llvm_i32_ty,            // C ABI Atomic Ordering ID
+     llvm_metadata_ty],      // syncscope
     [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
      IntrWillReturn, IntrConvergent, IntrNoCallback, IntrNoFree],
     "",
-    [SDNPMemOperand]
+    [SDNPMemOperand, SDNPMayLoad]
   >;
 
 def int_amdgcn_flat_load_monitor_b32    : AMDGPULoadMonitor<flat_ptr_ty>;

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 2781618a17077..cfef04644835c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -331,6 +331,10 @@ def : GINodeEquiv<G_AMDGPU_WHOLE_WAVE_FUNC_SETUP, AMDGPUwhole_wave_setup>;
 
 def : GINodeEquiv<G_AMDGPU_SPONENTRY, sponentry>;
 
+def : GINodeEquiv<G_AMDGPU_FLAT_LOAD_MONITOR, AMDGPUflat_load_monitor>;
+def : GINodeEquiv<G_AMDGPU_GLOBAL_LOAD_MONITOR, AMDGPUglobal_load_monitor>;
+
+
 class GISelSop2Pat <
   SDPatternOperator node,
   Instruction inst,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index 8a43c2da38346..8dc5d45aa73ba 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -402,6 +402,15 @@ def AMDGPUExportOp : SDTypeProfile<0, 8, [
 
 ]>;
 
+def AMDGPUflat_load_monitor : SDNode<
+  "AMDGPUISD::FLAT_LOAD_MONITOR", SDTLoad,
+  [SDNPHasChain, SDNPMemOperand]
+>;
+
+def AMDGPUglobal_load_monitor : SDNode<
+  "AMDGPUISD::GLOBAL_LOAD_MONITOR", SDTLoad,
+  [SDNPHasChain, SDNPMemOperand]
+>;
 
 //===----------------------------------------------------------------------===//
 // Flow Control Profile Types

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 6d4e4e64280c1..0bdb48cb23ee1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -8209,6 +8209,26 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     B.buildStore(MI.getOperand(2), MI.getOperand(1), **MI.memoperands_begin());
     MI.eraseFromParent();
     return true;
+  case Intrinsic::amdgcn_flat_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_load_monitor_b128:
+    assert(MI.hasOneMemOperand() && "Expected IRTranslator to set MemOp!");
+    B.buildInstr(AMDGPU::G_AMDGPU_FLAT_LOAD_MONITOR)
+        .add(MI.getOperand(0))
+        .add(MI.getOperand(2))
+        .addMemOperand(*MI.memoperands_begin());
+    MI.eraseFromParent();
+    return true;
+  case Intrinsic::amdgcn_global_load_monitor_b32:
+  case Intrinsic::amdgcn_global_load_monitor_b64:
+  case Intrinsic::amdgcn_global_load_monitor_b128:
+    assert(MI.hasOneMemOperand() && "Expected IRTranslator to set MemOp!");
+    B.buildInstr(AMDGPU::G_AMDGPU_GLOBAL_LOAD_MONITOR)
+        .add(MI.getOperand(0))
+        .add(MI.getOperand(2))
+        .addMemOperand(*MI.memoperands_begin());
+    MI.eraseFromParent();
+    return true;
   default: {
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
             AMDGPU::getImageDimIntrinsicInfo(IntrID))

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 62a252add0091..04dbc0721ebf4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3499,6 +3499,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
     executeInWaterfallLoop(B, make_range(Start, End), SGPROperandRegs);
     break;
   }
+  case AMDGPU::G_AMDGPU_FLAT_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_GLOBAL_LOAD_MONITOR:
   case AMDGPU::G_LOAD:
   case AMDGPU::G_ZEXTLOAD:
   case AMDGPU::G_SEXTLOAD: {
@@ -5342,12 +5344,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     case Intrinsic::amdgcn_ds_load_tr16_b128:
     case Intrinsic::amdgcn_ds_load_tr4_b64:
     case Intrinsic::amdgcn_ds_load_tr6_b96:
-    case Intrinsic::amdgcn_flat_load_monitor_b32:
-    case Intrinsic::amdgcn_flat_load_monitor_b64:
-    case Intrinsic::amdgcn_flat_load_monitor_b128:
-    case Intrinsic::amdgcn_global_load_monitor_b32:
-    case Intrinsic::amdgcn_global_load_monitor_b64:
-    case Intrinsic::amdgcn_global_load_monitor_b128:
     case Intrinsic::amdgcn_ds_read_tr4_b64:
     case Intrinsic::amdgcn_ds_read_tr6_b96:
     case Intrinsic::amdgcn_ds_read_tr8_b64:
@@ -5762,6 +5758,14 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   case AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_RETURN:
     OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
     break;
+  case AMDGPU::G_AMDGPU_FLAT_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_GLOBAL_LOAD_MONITOR: {
+    unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
+    unsigned PtrSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
+    OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
+    OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, PtrSize);
+    break;
+  }
   }
 
   return getInstructionMapping(/*ID*/1, /*Cost*/1,

diff  --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 3ad15ae28e51e..feecd5825ac74 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -2389,13 +2389,13 @@ let WaveSizePredicate = isWave32,  OtherPredicates = [HasTransposeLoadF4F6Insts]
 }
 
 let OtherPredicates = [isGFX125xOnly] in {
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B32,  int_amdgcn_flat_load_monitor_b32,  i32>;
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B64,  int_amdgcn_flat_load_monitor_b64,  v2i32>;
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B128, int_amdgcn_flat_load_monitor_b128, v4i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B32,  AMDGPUflat_load_monitor,  i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B64,  AMDGPUflat_load_monitor,  v2i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B128, AMDGPUflat_load_monitor, v4i32>;
 
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B32,  int_amdgcn_global_load_monitor_b32,  i32>;
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B64,  int_amdgcn_global_load_monitor_b64,  v2i32>;
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B128, int_amdgcn_global_load_monitor_b128, v4i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B32,  AMDGPUglobal_load_monitor,  i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B64,  AMDGPUglobal_load_monitor,  v2i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B128, AMDGPUglobal_load_monitor, v4i32>;
 } // End SubtargetPredicate = isGFX125xOnly
 
 let OtherPredicates = [isGFX1250Plus] in {

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7d2abfe8f17df..44535f471b70d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1293,51 +1293,54 @@ static unsigned getIntrMemWidth(unsigned IntrID) {
   case Intrinsic::amdgcn_global_store_async_from_lds_b32:
   case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
   case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
+  case Intrinsic::amdgcn_flat_load_monitor_b32:
+  case Intrinsic::amdgcn_global_load_monitor_b32:
     return 32;
   case Intrinsic::amdgcn_global_load_async_to_lds_b64:
   case Intrinsic::amdgcn_cluster_load_async_to_lds_b64:
   case Intrinsic::amdgcn_global_store_async_from_lds_b64:
   case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
   case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
+  case Intrinsic::amdgcn_flat_load_monitor_b64:
+  case Intrinsic::amdgcn_global_load_monitor_b64:
     return 64;
   case Intrinsic::amdgcn_global_load_async_to_lds_b128:
   case Intrinsic::amdgcn_cluster_load_async_to_lds_b128:
   case Intrinsic::amdgcn_global_store_async_from_lds_b128:
   case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
   case Intrinsic::amdgcn_cooperative_atomic_store_8x16B:
+  case Intrinsic::amdgcn_flat_load_monitor_b128:
+  case Intrinsic::amdgcn_global_load_monitor_b128:
     return 128;
   default:
     llvm_unreachable("Unknown width");
   }
 }
 
-static void getCoopAtomicOperandsInfo(const CallBase &CI, bool IsLoad,
-                                      TargetLoweringBase::IntrinsicInfo &Info) {
-  Value *OrderingArg = CI.getArgOperand(IsLoad ? 1 : 2);
+static AtomicOrdering parseAtomicOrderingCABIArg(const CallBase &CI,
+                                                 unsigned ArgIdx) {
+  Value *OrderingArg = CI.getArgOperand(ArgIdx);
   unsigned Ord = cast<ConstantInt>(OrderingArg)->getZExtValue();
   switch (AtomicOrderingCABI(Ord)) {
   case AtomicOrderingCABI::acquire:
-    Info.order = AtomicOrdering::Acquire;
+    return AtomicOrdering::Acquire;
     break;
   case AtomicOrderingCABI::release:
-    Info.order = AtomicOrdering::Release;
+    return AtomicOrdering::Release;
     break;
   case AtomicOrderingCABI::seq_cst:
-    Info.order = AtomicOrdering::SequentiallyConsistent;
+    return AtomicOrdering::SequentiallyConsistent;
     break;
   default:
-    Info.order = AtomicOrdering::Monotonic;
-    break;
+    return AtomicOrdering::Monotonic;
   }
+}
 
-  Info.flags =
-      (IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore);
-  Info.flags |= MOCooperative;
-
+static unsigned parseSyncscopeMDArg(const CallBase &CI, unsigned ArgIdx) {
   MDNode *ScopeMD = cast<MDNode>(
-      cast<MetadataAsValue>(CI.getArgOperand(IsLoad ? 2 : 3))->getMetadata());
+      cast<MetadataAsValue>(CI.getArgOperand(ArgIdx))->getMetadata());
   StringRef Scope = cast<MDString>(ScopeMD->getOperand(0))->getString();
-  Info.ssid = CI.getContext().getOrInsertSyncScopeID(Scope);
+  return CI.getContext().getOrInsertSyncScopeID(Scope);
 }
 
 void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
@@ -1578,12 +1581,6 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Infos.push_back(Info);
     return;
   }
-  case Intrinsic::amdgcn_flat_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_load_monitor_b64:
-  case Intrinsic::amdgcn_flat_load_monitor_b128:
-  case Intrinsic::amdgcn_global_load_monitor_b32:
-  case Intrinsic::amdgcn_global_load_monitor_b64:
-  case Intrinsic::amdgcn_global_load_monitor_b128:
   case Intrinsic::amdgcn_cluster_load_b32:
   case Intrinsic::amdgcn_cluster_load_b64:
   case Intrinsic::amdgcn_cluster_load_b128:
@@ -1607,6 +1604,22 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Infos.push_back(Info);
     return;
   }
+  case Intrinsic::amdgcn_flat_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_load_monitor_b128:
+  case Intrinsic::amdgcn_global_load_monitor_b32:
+  case Intrinsic::amdgcn_global_load_monitor_b64:
+  case Intrinsic::amdgcn_global_load_monitor_b128: {
+    Info.opc = ISD::INTRINSIC_W_CHAIN;
+    Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
+    Info.ptrVal = CI.getOperand(0);
+    Info.align.reset();
+    Info.flags = MachineMemOperand::MOLoad;
+    Info.order = parseAtomicOrderingCABIArg(CI, 1);
+    Info.ssid = parseSyncscopeMDArg(CI, 2);
+    Infos.push_back(Info);
+    return;
+  }
   case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
   case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
   case Intrinsic::amdgcn_cooperative_atomic_load_8x16B: {
@@ -1614,7 +1627,9 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
     Info.ptrVal = CI.getOperand(0);
     Info.align.reset();
-    getCoopAtomicOperandsInfo(CI, /*IsLoad=*/true, Info);
+    Info.flags = (MachineMemOperand::MOLoad | MOCooperative);
+    Info.order = parseAtomicOrderingCABIArg(CI, 1);
+    Info.ssid = parseSyncscopeMDArg(CI, 2);
     Infos.push_back(Info);
     return;
   }
@@ -1625,7 +1640,9 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
     Info.ptrVal = CI.getArgOperand(0);
     Info.align.reset();
-    getCoopAtomicOperandsInfo(CI, /*IsLoad=*/false, Info);
+    Info.flags = (MachineMemOperand::MOStore | MOCooperative);
+    Info.order = parseAtomicOrderingCABIArg(CI, 2);
+    Info.ssid = parseSyncscopeMDArg(CI, 3);
     Infos.push_back(Info);
     return;
   }
@@ -1801,15 +1818,9 @@ bool SITargetLowering::getAddrModeArguments(const IntrinsicInst *II,
   case Intrinsic::amdgcn_ds_atomic_barrier_arrive_rtn_b64:
   case Intrinsic::amdgcn_flat_atomic_fmax_num:
   case Intrinsic::amdgcn_flat_atomic_fmin_num:
-  case Intrinsic::amdgcn_flat_load_monitor_b128:
-  case Intrinsic::amdgcn_flat_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_load_monitor_b64:
   case Intrinsic::amdgcn_global_atomic_fmax_num:
   case Intrinsic::amdgcn_global_atomic_fmin_num:
   case Intrinsic::amdgcn_global_atomic_ordered_add_b64:
-  case Intrinsic::amdgcn_global_load_monitor_b128:
-  case Intrinsic::amdgcn_global_load_monitor_b32:
-  case Intrinsic::amdgcn_global_load_monitor_b64:
   case Intrinsic::amdgcn_global_load_tr_b64:
   case Intrinsic::amdgcn_global_load_tr_b128:
   case Intrinsic::amdgcn_global_load_tr4_b64:
@@ -11265,6 +11276,26 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
     return DAG.getAtomicLoad(ISD::NON_EXTLOAD, DL, MII->getMemoryVT(), VT,
                              Chain, Ptr, MII->getMemOperand());
   }
+  case Intrinsic::amdgcn_flat_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_load_monitor_b128: {
+    MemIntrinsicSDNode *MII = cast<MemIntrinsicSDNode>(Op);
+    SDValue Chain = Op->getOperand(0);
+    SDValue Ptr = Op->getOperand(2);
+    return DAG.getMemIntrinsicNode(AMDGPUISD::FLAT_LOAD_MONITOR, DL,
+                                   Op->getVTList(), {Chain, Ptr},
+                                   MII->getMemoryVT(), MII->getMemOperand());
+  }
+  case Intrinsic::amdgcn_global_load_monitor_b32:
+  case Intrinsic::amdgcn_global_load_monitor_b64:
+  case Intrinsic::amdgcn_global_load_monitor_b128: {
+    MemIntrinsicSDNode *MII = cast<MemIntrinsicSDNode>(Op);
+    SDValue Chain = Op->getOperand(0);
+    SDValue Ptr = Op->getOperand(2);
+    return DAG.getMemIntrinsicNode(AMDGPUISD::GLOBAL_LOAD_MONITOR, DL,
+                                   Op->getVTList(), {Chain, Ptr},
+                                   MII->getMemoryVT(), MII->getMemOperand());
+  }
   default:
 
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =

diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index cd1874a1e325a..cde352313f86a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4841,6 +4841,16 @@ def G_AMDGPU_SPONENTRY : AMDGPUGenericInstruction {
   let hasSideEffects = 0;
 }
 
+class LoadMonitorInstruction : AMDGPUGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins ptype1:$ptr);
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+}
+
+def G_AMDGPU_FLAT_LOAD_MONITOR   : LoadMonitorInstruction;
+def G_AMDGPU_GLOBAL_LOAD_MONITOR : LoadMonitorInstruction;
+
 //============================================================================//
 // Dummy Instructions
 //============================================================================//

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
index 910c55a041ede..f353deab46672 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
@@ -2,77 +2,82 @@
 ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
 ; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
 
-declare i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1), i32)
-declare <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1), i32)
-declare <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1), i32)
-declare i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr, i32)
-declare <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr, i32)
-declare <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr, i32)
-
-define amdgpu_ps void @global_load_monitor_b32_vaddr(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b32_vaddr:
+declare i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1), i32, metadata)
+declare <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1), i32, metadata)
+declare <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1), i32, metadata)
+declare i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr, i32, metadata)
+declare <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr, i32, metadata)
+declare <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr, i32, metadata)
+
+
+define amdgpu_ps void @global_load_monitor_b32_vaddr_relaxed_sys(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b32_vaddr_relaxed_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b32 v0, v[0:1], off offset:32 th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b32 v0, v[0:1], off offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 1)
+  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 0, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b32_saddr(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b32_saddr:
+define amdgpu_ps void @global_load_monitor_b32_saddr_relaxed_sys(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b32_saddr_relaxed_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] offset:32 th:TH_LOAD_HT scope:SCOPE_SE
+; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[0:1], v2, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 10)
+  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 0, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_vaddr(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b64_vaddr:
+define amdgpu_ps void @global_load_monitor_b64_vaddr_acquire_agent(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b64_vaddr_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b64 v[0:1], v[0:1], off offset:32 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+; GFX1250-NEXT:    global_load_monitor_b64 v[0:1], v[0:1], off offset:32 scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 22)
+  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr:
+define amdgpu_ps void @global_load_monitor_b64_saddr_acquire_agent(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b64_saddr_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] offset:32 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] offset:32 scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 27)
+  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b128_vaddr(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b128_vaddr:
+define amdgpu_ps void @global_load_monitor_b128_vaddr_seq_cst_workgroup(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b128_vaddr_seq_cst_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    global_load_monitor_b128 v[4:7], v[0:1], off offset:32
@@ -81,122 +86,137 @@ define amdgpu_ps void @global_load_monitor_b128_vaddr(ptr addrspace(1) %addr, pt
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 0)
+  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 5, metadata !2)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b128_saddr(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b128_saddr:
+define amdgpu_ps void @global_load_monitor_b128_saddr_seq_cst_workgroup(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b128_saddr_seq_cst_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b128 v[2:5], v2, s[0:1] offset:32 th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b128 v[2:5], v2, s[0:1] offset:32
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b128 v[0:1], v[2:5], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 1)
+  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 5, metadata !2)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b32(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_load_monitor_b32:
+define amdgpu_ps void @flat_load_monitor_b32_seq_cst_sys(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_load_monitor_b32_seq_cst_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b32 v0, v[0:1] offset:32 th:TH_LOAD_HT scope:SCOPE_SE
+; GFX1250-NEXT:    flat_load_monitor_b32 v0, v[0:1] offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr addrspace(0) %gep, i32 10)
+  %val = call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr addrspace(0) %gep, i32 5, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b64(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_load_monitor_b64:
+define amdgpu_ps void @flat_load_monitor_b64_seq_cst_agent(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_load_monitor_b64_seq_cst_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b64 v[0:1], v[0:1] offset:32 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+; GFX1250-NEXT:    flat_load_monitor_b64 v[0:1], v[0:1] offset:32 scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr addrspace(0) %gep, i32 22)
+  %val = call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr addrspace(0) %gep, i32 5, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b128(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_load_monitor_b128:
+define amdgpu_ps void @flat_load_monitor_b128_acquire_sys(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_load_monitor_b128_acquire_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b128 v[4:7], v[0:1] offset:32 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX1250-NEXT:    flat_load_monitor_b128 v[4:7], v[0:1] offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b128 v[2:3], v[4:7], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr addrspace(0) %gep, i32 27)
+  %val = call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr addrspace(0) %gep, i32 2, metadata !0)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b32_saddr_scale_offset(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b32_saddr_scale_offset:
+define amdgpu_ps void @global_load_monitor_b32_saddr_scale_offset_acquire_agent(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_load_monitor_b32_saddr_scale_offset_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] scale_offset th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] scale_offset scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[0:1], v2, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i32, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 1)
+  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr_scale_offset(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr_scale_offset:
+define amdgpu_ps void @global_load_monitor_b64_saddr_scale_offset_acquire_workgroup(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_load_monitor_b64_saddr_scale_offset_acquire_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] scale_offset th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] scale_offset
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i64, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 1)
+  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !2)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr_no_scale_offset(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr_no_scale_offset:
+define amdgpu_ps void @global_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX1250-NEXT:    v_lshl_add_u64 v[2:3], v[2:3], 2, s[0:1]
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v[2:3], off th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v[2:3], off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i32, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 1)
+  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 5, metadata !0)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
+
+!0 = !{ !"" }
+!1 = !{ !"agent" }
+!2 = !{ !"workgroup" }
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX1250-GISEL: {{.*}}
 ; GFX1250-SDAG: {{.*}}


        


More information about the cfe-commits mailing list