[clang] [llvm] [AMDGPU][GFX12.5] Reimplement monitor load as an atomic operation (PR #177343)

Pierre van Houtryve via cfe-commits cfe-commits at lists.llvm.org
Fri Feb 6 03:21:27 PST 2026


https://github.com/Pierre-vh updated https://github.com/llvm/llvm-project/pull/177343

>From 397c902296cf095e127478c860598f7fea06ee74 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Wed, 21 Jan 2026 14:54:22 +0100
Subject: [PATCH 01/10] [AMDGPU][GFX12.5] Reimplement monitor load as an atomic
 operation

Load monitor operations make more sense as atomic operations, as
non-atomic operations cannot be used for inter-thread communication w/o
additional synchronization.
The previous built-in made it work because one could just override the CPol
bits, but that bypasses the memory model and forces the user to learn about
ISA bits encoding.

Making load monitor an atomic operation has a couple of advantages. First,
the memory model foundation for it is stronger. We just lean on the existing
rules for atomic operations. Second, the CPol bits are abstracted away from
the user, which avoids leaking ISA details into the API.

This patch also adds supporting memory model and intrinsics documentation to
AMDGPUUsage.

Solves SWDEV-516398.
---
 clang/include/clang/Basic/BuiltinsAMDGPU.td   |  12 +-
 clang/include/clang/Sema/SemaAMDGPU.h         |  12 ++
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   |  55 ++++----
 clang/lib/Sema/SemaAMDGPU.cpp                 |  69 +++++++---
 .../builtins-amdgcn-gfx1250-load-monitor.cl   |  48 +++----
 .../builtins-amdgcn-error-gfx1250-param.cl    |  38 ++++--
 llvm/docs/AMDGPUUsage.rst                     | 108 ++++++++++++---
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  21 ++-
 llvm/lib/Target/AMDGPU/AMDGPUGISel.td         |   4 +
 llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td     |   9 ++
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |  20 +++
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  |  22 ++-
 llvm/lib/Target/AMDGPU/FLATInstructions.td    |  12 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  88 ++++++++----
 llvm/lib/Target/AMDGPU/SIInstructions.td      |  10 ++
 .../llvm.amdgcn.load.monitor.gfx1250.ll       | 128 ++++++++++--------
 16 files changed, 448 insertions(+), 208 deletions(-)

diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index 740d136f465c1..1902b35d83f07 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -711,12 +711,12 @@ def __builtin_amdgcn_s_cluster_barrier : AMDGPUBuiltin<"void()", [], "gfx1250-in
 def __builtin_amdgcn_flat_prefetch : AMDGPUBuiltin<"void(void const address_space<0> *, _Constant int)", [Const], "vmem-pref-insts">;
 def __builtin_amdgcn_global_prefetch : AMDGPUBuiltin<"void(void const address_space<1> *, _Constant int)", [Const], "vmem-pref-insts">;
 
-def __builtin_amdgcn_global_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<0> *, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<0> *, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_atomic_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_atomic_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<1> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_atomic_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<1> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_atomic_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_atomic_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<0> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_atomic_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<0> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
 def __builtin_amdgcn_cluster_load_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
diff --git a/clang/include/clang/Sema/SemaAMDGPU.h b/clang/include/clang/Sema/SemaAMDGPU.h
index bac812a9d4fcf..eb6e73dd7322f 100644
--- a/clang/include/clang/Sema/SemaAMDGPU.h
+++ b/clang/include/clang/Sema/SemaAMDGPU.h
@@ -26,7 +26,19 @@ class SemaAMDGPU : public SemaBase {
 
   bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 
+  /// Emits a diagnostic if the arg at \p ArgIdx of \p TheCall is not a string
+  /// literal. \returns true if a diagnostic was emitted.
+  bool checkStringLiteralArg(CallExpr *TheCall, unsigned ArgIdx);
+
+  /// Emits a diagnostic if the arg at \p ArgIdx of \p TheCall is not atomic
+  /// ordering encoded in the C ABI format, or if the atomic ordering is not
+  /// valid for the operation type as defined by \p MayLoad and \p MayStore.
+  /// \returns true if a diagnostic was emitted.
+  bool checkAtomicOrderingCABIArg(CallExpr *TheCall, unsigned ArgIdx,
+                                  bool MayLoad, bool MayStore);
+
   bool checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore);
+  bool checkAtomicMonitorLoad(CallExpr *TheCall);
 
   bool checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs,
                                unsigned NumDataArgs);
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index a096ed27a788e..07ce81d56a1a4 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -366,6 +366,14 @@ void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
   Inst->setMetadata(LLVMContext::MD_mmra, MMRAMetadata::getMD(Ctx, MMRAs));
 }
 
+static llvm::MetadataAsValue *getStringAsMDValue(llvm::LLVMContext &Ctx,
+                                                 const clang::Expr *E) {
+  StringRef Arg =
+      cast<clang::StringLiteral>(E->IgnoreParenCasts())->getString();
+  llvm::MDNode *MD = llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, Arg)});
+  return llvm::MetadataAsValue::get(Ctx, MD);
+}
+
 static Intrinsic::ID getIntrinsicIDforWaveReduction(unsigned BuiltinID) {
   switch (BuiltinID) {
   default:
@@ -789,40 +797,42 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
     return Builder.CreateCall(F, {Addr});
   }
-  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
-  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
-  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
-  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
-  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
-  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128: {
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128: {
 
     Intrinsic::ID IID;
     switch (BuiltinID) {
-    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
-      IID = Intrinsic::amdgcn_global_load_monitor_b32;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
+      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b32;
       break;
-    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
-      IID = Intrinsic::amdgcn_global_load_monitor_b64;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
+      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b64;
       break;
-    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
-      IID = Intrinsic::amdgcn_global_load_monitor_b128;
+    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
+      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b128;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
-      IID = Intrinsic::amdgcn_flat_load_monitor_b32;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
+      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b32;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
-      IID = Intrinsic::amdgcn_flat_load_monitor_b64;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
+      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b64;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
-      IID = Intrinsic::amdgcn_flat_load_monitor_b128;
+    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128:
+      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b128;
       break;
     }
 
+    LLVMContext &Ctx = CGM.getLLVMContext();
     llvm::Type *LoadTy = ConvertType(E->getType());
     llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
-    llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+    llvm::Value *AO = EmitScalarExpr(E->getArg(1));
+    llvm::Value *Scope = getStringAsMDValue(Ctx, E->getArg(2));
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
-    return Builder.CreateCall(F, {Addr, Val});
+    return Builder.CreateCall(F, {Addr, AO, Scope});
   }
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b32:
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b64:
@@ -884,10 +894,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     const unsigned ScopeArg = E->getNumArgs() - 1;
     for (unsigned i = 0; i != ScopeArg; ++i)
       Args.push_back(EmitScalarExpr(E->getArg(i)));
-    StringRef Arg = cast<StringLiteral>(E->getArg(ScopeArg)->IgnoreParenCasts())
-                        ->getString();
-    llvm::MDNode *MD = llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, Arg)});
-    Args.push_back(llvm::MetadataAsValue::get(Ctx, MD));
+    Args.push_back(getStringAsMDValue(Ctx, E->getArg(ScopeArg)));
     // Intrinsic is typed based on the pointer AS. Pointer is always the first
     // argument.
     llvm::Function *F = CGM.getIntrinsic(IID, {Args[0]->getType()});
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index 4261e1849133f..a53cadd27a184 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -119,6 +119,13 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B:
     return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true);
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
+    return checkAtomicMonitorLoad(TheCall);
   case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32:
   case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32:
   case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32:
@@ -341,22 +348,9 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
   return false;
 }
 
-bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
-  bool Fail = false;
-
-  // First argument is a global or generic pointer.
-  Expr *PtrArg = TheCall->getArg(0);
-  QualType PtrTy = PtrArg->getType()->getPointeeType();
-  unsigned AS = getASTContext().getTargetAddressSpace(PtrTy.getAddressSpace());
-  if (AS != llvm::AMDGPUAS::FLAT_ADDRESS &&
-      AS != llvm::AMDGPUAS::GLOBAL_ADDRESS) {
-    Fail = true;
-    Diag(TheCall->getBeginLoc(), diag::err_amdgcn_coop_atomic_invalid_as)
-        << PtrArg->getSourceRange();
-  }
-
-  // Check atomic ordering
-  Expr *AtomicOrdArg = TheCall->getArg(IsStore ? 2 : 1);
+bool SemaAMDGPU::checkAtomicOrderingCABIArg(CallExpr *TheCall, unsigned ArgIdx,
+                                            bool MayLoad, bool MayStore) {
+  Expr *AtomicOrdArg = TheCall->getArg(ArgIdx);
   Expr::EvalResult AtomicOrdArgRes;
   if (!AtomicOrdArg->EvaluateAsInt(AtomicOrdArgRes, getASTContext()))
     llvm_unreachable("Intrinsic requires imm for atomic ordering argument!");
@@ -366,22 +360,55 @@ bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
   // Atomic ordering cannot be acq_rel in any case, acquire for stores or
   // release for loads.
   if (!llvm::isValidAtomicOrderingCABI((unsigned)Ord) ||
-      (Ord == llvm::AtomicOrderingCABI::acq_rel) ||
-      Ord == (IsStore ? llvm::AtomicOrderingCABI::acquire
-                      : llvm::AtomicOrderingCABI::release)) {
+      (!(MayLoad && MayStore) && (Ord == llvm::AtomicOrderingCABI::acq_rel)) ||
+      (!MayLoad && Ord == llvm::AtomicOrderingCABI::acquire) ||
+      (!MayStore && Ord == llvm::AtomicOrderingCABI::release)) {
     return Diag(AtomicOrdArg->getBeginLoc(),
                 diag::warn_atomic_op_has_invalid_memory_order)
            << 0 << AtomicOrdArg->getSourceRange();
   }
 
-  // Last argument is a string literal
+  return false;
+}
+
+bool SemaAMDGPU::checkStringLiteralArg(CallExpr *TheCall, unsigned ArgIdx) {
   Expr *Arg = TheCall->getArg(TheCall->getNumArgs() - 1);
   if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
-    Fail = true;
     Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
         << Arg->getSourceRange();
+    return true;
+  }
+  return false;
+}
+
+bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
+  bool Fail = false;
+
+  // First argument is a global or generic pointer.
+  Expr *PtrArg = TheCall->getArg(0);
+  QualType PtrTy = PtrArg->getType()->getPointeeType();
+  unsigned AS = getASTContext().getTargetAddressSpace(PtrTy.getAddressSpace());
+  if (AS != llvm::AMDGPUAS::FLAT_ADDRESS &&
+      AS != llvm::AMDGPUAS::GLOBAL_ADDRESS) {
+    Fail = true;
+    Diag(TheCall->getBeginLoc(), diag::err_amdgcn_coop_atomic_invalid_as)
+        << PtrArg->getSourceRange();
   }
 
+  // Check atomic ordering
+  Fail |= checkAtomicOrderingCABIArg(
+      TheCall, IsStore ? 2 : 1, /*MayLoad=*/!IsStore, /*MayStore=*/IsStore);
+  // Last argument is the syncscope as a string literal.
+  Fail |= checkStringLiteralArg(TheCall, TheCall->getNumArgs() - 1);
+
+  return Fail;
+}
+
+bool SemaAMDGPU::checkAtomicMonitorLoad(CallExpr *TheCall) {
+  bool Fail = false;
+  Fail |= checkAtomicOrderingCABIArg(TheCall, 1, /*MayLoad=*/true,
+                                     /*MayStore=*/false);
+  Fail |= checkStringLiteralArg(TheCall, 2);
   return Fail;
 }
 
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
index f2552d40fa273..efdbfc25714fb 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
@@ -5,62 +5,62 @@
 typedef int    v2i   __attribute__((ext_vector_type(2)));
 typedef int    v4i   __attribute__((ext_vector_type(4)));
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b32(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b32(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) [[INPTR:%.*]], i32 1)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) [[INPTR:%.*]], i32 0, metadata [[META8:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret i32 [[TMP0]]
 //
-int test_amdgcn_global_load_monitor_b32(global int* inptr)
+int test_amdgcn_global_atomic_load_monitor_b32(global int* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b32(inptr, 1);
+  return __builtin_amdgcn_global_atomic_load_monitor_b32(inptr, __ATOMIC_RELAXED, "");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b64(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b64(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) [[INPTR:%.*]], i32 10)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) [[INPTR:%.*]], i32 2, metadata [[META9:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <2 x i32> [[TMP0]]
 //
-v2i test_amdgcn_global_load_monitor_b64(global v2i* inptr)
+v2i test_amdgcn_global_atomic_load_monitor_b64(global v2i* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b64(inptr, 10);
+  return __builtin_amdgcn_global_atomic_load_monitor_b64(inptr, __ATOMIC_ACQUIRE, "agent");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b128(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b128(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) [[INPTR:%.*]], i32 22)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1) [[INPTR:%.*]], i32 2, metadata [[META10:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <4 x i32> [[TMP0]]
 //
-v4i test_amdgcn_global_load_monitor_b128(global v4i* inptr)
+v4i test_amdgcn_global_atomic_load_monitor_b128(global v4i* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b128(inptr, 22);
+  return __builtin_amdgcn_global_atomic_load_monitor_b128(inptr, __ATOMIC_ACQUIRE, "workgroup");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b32(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b32(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr [[INPTR:%.*]], i32 27)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr [[INPTR:%.*]], i32 0, metadata [[META8]])
 // CHECK-GFX1250-NEXT:    ret i32 [[TMP0]]
 //
-int test_amdgcn_flat_load_monitor_b32(int* inptr)
+int test_amdgcn_flat_atomic_load_monitor_b32(int* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b32(inptr, 27);
+  return __builtin_amdgcn_flat_atomic_load_monitor_b32(inptr, __ATOMIC_RELAXED, "");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b64(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b64(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr [[INPTR:%.*]], i32 1)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr [[INPTR:%.*]], i32 5, metadata [[META11:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <2 x i32> [[TMP0]]
 //
-v2i test_amdgcn_flat_load_monitor_b64(v2i* inptr)
+v2i test_amdgcn_flat_atomic_load_monitor_b64(v2i* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b64(inptr, 1);
+  return __builtin_amdgcn_flat_atomic_load_monitor_b64(inptr, __ATOMIC_SEQ_CST, "cluster");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b128(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b128(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 0)
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 0, metadata [[META8]])
 // CHECK-GFX1250-NEXT:    ret <4 x i32> [[TMP0]]
 //
-v4i test_amdgcn_flat_load_monitor_b128(v4i* inptr)
+v4i test_amdgcn_flat_atomic_load_monitor_b128(v4i* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b128(inptr, 0);
+  return __builtin_amdgcn_flat_atomic_load_monitor_b128(inptr, __ATOMIC_RELAXED, "");
 }
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index da6a03bc93eeb..dfbe5e3b30396 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -94,15 +94,37 @@ void test_cvt_scale_pk(global half8 *outh8, global bfloat8 *outy8, uint2 src2,
   *outf16 = __builtin_amdgcn_cvt_scale_pk16_f32_bf6(src3, scale, 16); // expected-error {{argument value 16 is outside the valid range [0, 15]}}
 }
 
-void test_amdgcn_load_monitor(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
-                              global int* b32out, global v2i* b64out, global v4i* b128out, int cpol)
+void test_amdgcn_atomic_load_monitor_ao_constant(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out, int ao)
 {
-  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, cpol); // expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant integer}}
-  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, cpol); // expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant integer}}
-  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, cpol); // expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant integer}}
-  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, cpol); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant integer}}
-  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, cpol); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant integer}}
-  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, cpol); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b128' must be a constant integer}}
+}
+
+void test_amdgcn_atomic_load_monitor_ao_valid(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out)
+{
+  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+}
+
+void test_amdgcn_atomic_load_monitor_scope_literal(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out, const char* scope)
+{
+  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
 }
 
 void test_amdgcn_cluster_load(global int* addr32, global v2i* addr64, global v4i* addr128, global int* b32out, global v2i* b64out, global v4i* b128out, int cpol, int mask)
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index cd5410a31b98f..14fa218c25d02 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1678,7 +1678,6 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
 
                                                    Instruction cache prefetches are unsafe on invalid address.
 
-
   llvm.amdgcn.s.barrier                            Performs a barrier *signal* operation immediately followed
                                                    by a barrier *wait* operation on the *workgroup barrier* object.
                                                    see :ref:`amdgpu-amdhsa-execution-barriers`.
@@ -1691,6 +1690,32 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
                                                    See :ref:`amdgpu-amdhsa-execution-barriers`.
                                                    Available starting GFX12.
 
+  llvm.amdgcn.flat.atomic.load.monitor             Available on GFX12.5 only.
+                                                   Corresponds to ``flat_load_monitor_b32/64/128`` (``.b32/64/128`` suffixes)
+                                                   instructions.
+                                                   For the purposes of the memory model, this is an atomic load operation in
+                                                   the generic (flat) address space.
+
+                                                   This intrinsic has 3 operands:
+
+                                                   * Flat pointer.
+                                                   * :ref:`Load Atomic Ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>`.
+                                                   * :ref:`Synchronization Scope<amdgpu-intrinsics-syncscope-metadata-operand>`.
+                                                     Note that the scope used must ensure that the L2 cache will be hit.
+
+  llvm.amdgcn.global.atomic.load.monitor           Available on GFX12.5 only.
+                                                   Corresponds to ``global_load_monitor_b32/64/128`` (``.b32/64/128`` suffixes)
+                                                   instructions.
+                                                   For the purposes of the memory model, this is an atomic load operation in
+                                                   the global address space.
+
+                                                   This intrinsic has 3 operands:
+
+                                                   * Flat pointer.
+                                                   * :ref:`Load Atomic Ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>`.
+                                                   * :ref:`Synchronization Scope<amdgpu-intrinsics-syncscope-metadata-operand>`.
+                                                     Note that the scope used must ensure that the L2 cache will be hit.
+
   ==============================================   ==========================================================
 
 .. TODO::
@@ -1756,28 +1781,64 @@ then this intrinsic causes undefined behavior.
 
 The intrinsics are available for the global (``.p1`` suffix) and generic (``.p0`` suffix) address spaces.
 
-The atomic ordering operand (3rd operand for ``.store``, 2nd for ``.load``) is an integer that follows the
-C ABI encoding of atomic memory orderings. The supported values are in
-:ref:`the table below<amdgpu-cooperative-atomic-intrinsics-atomic-memory-orderings-table>`.
+The 3rd operand for ``.store`` or 2nd for ``.load`` intrinsics is the
+:ref:`atomic ordering<amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand>` of the operation.
+
+The last operand of the intrinsic is the
+:ref:`synchronization scope<amdgpu-intrinsics-syncscope-metadata-operand>` of the operation.
+
+Intrinsic Operands
+~~~~~~~~~~~~~~~~~~
+
+.. _amdgpu-intrinsics-c-abi-atomic-memory-ordering-operand:
+
+C ABI Atomic Ordering Operand
++++++++++++++++++++++++++++++
+
+Intrinsic operands in this format are always ``i32`` integer constants whose value is
+determined by the C ABI encoding of atomic memory orderings. The supported values are in
+:ref:`the table below<amdgpu-intrinsics-c-abi-atomic-memory-orderings-table>`.
+
+  .. table:: AMDGPU Intrinsics C ABI Atomic Memory Ordering Values
+    :name: amdgpu-intrinsics-c-abi-atomic-memory-orderings-table
+
+    ========= ================ =================================
+    Value     Atomic Memory    Notes
+              Ordering
+    ========= ================ =================================
+    ``i32 0`` ``relaxed``      The default for unsupported values.
+
+    ``i32 2`` ``acquire``      Only for loads.
 
-  .. table:: AMDGPU Cooperative Atomic Intrinsics Atomic Memory Orderings
-    :name: amdgpu-cooperative-atomic-intrinsics-atomic-memory-orderings-table
+    ``i32 3`` ``release``      Only for stores.
 
-    ====== ================ =================================
-    Value  Atomic Memory    Notes
-           Ordering
-    ====== ================ =================================
-    ``0``  ``relaxed``      The default for unsupported values.
+    ``i32 5`` ``seq_cst``
+    ========= ================ =================================
 
-    ``2``  ``acquire``      Only for ``.load``
+Example:
 
-    ``3``  ``release``      Only for ``.store``
+.. code::
+
+  ; "i32 5" is the atomic ordering operand
+  %0 = tail call i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr %addr, i32 5, metadata !0)
 
-    ``5``  ``seq_cst``
-    ====== ================ =================================
+.. _amdgpu-intrinsics-syncscope-metadata-operand:
+
+Syncscope Metadata Operand
+++++++++++++++++++++++++++
 
-The last argument of the intrinsic is the synchronization scope
-as a metadata string, which must be one of the supported :ref:`memory scopes<amdgpu-memory-scopes>`.
+Intrinsics operand in this format are metadata strings which must be one of the supported
+:ref:`memory scopes<amdgpu-memory-scopes>`.
+The metadata node must be made of a single ``MDString`` at the top level.
+
+Example:
+
+.. code::
+
+  ; "metadata !0" is the syncscope metadata operand.
+  %0 = tail call i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr %addr, i32 4, metadata !0)
+
+  !0 = !{ !"agent" }
 
 .. _amdgpu_metadata:
 
@@ -17246,7 +17307,7 @@ For GFX125x:
 
   This section is currently incomplete as work on the compiler is still ongoing.
   The following is a non-exhaustive list of unimplemented/undocumented features:
-  non-volatile bit code sequences, monitor and wait, globally accessing scratch atomics,
+  non-volatile bit code sequences, globally accessing scratch atomics,
   multicast loads, barriers (including split barriers) and cooperative atomics.
   Scalar operations memory model needs more elaboration as well.
 
@@ -17347,6 +17408,17 @@ For GFX125x:
     issued to every address at the same time. They are kept in order with other
     memory operations from the same wave.
 
+* ``global_load_monitor_*`` and ``flat_load_monitor_*`` instructions load
+  data and request that the wave is notified (see ``s_monitor_sleep``) if
+  the L2 cache line that holds the data is evicted, or written to.
+
+  * In order to monitor a cache line in the L2 cache, these instructions must
+    ensure that the L2 cache is always hit by setting the ``SCOPE`` of the instruction
+    appropriately.
+  * For non-atomic and atomic code sequences, it is valid to replace
+    ``global_load_b32/64/128`` with a ``global_load_monitor_b32/64/128`` and a
+    ``flat_load_b32/64/128`` with a ``flat_load_monitor_b32/64/128``.
+
 Scalar memory operations are only used to access memory that is proven to not
 change during the execution of the kernel dispatch. This includes constant
 address space and global address space for program scope ``const`` variables.
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index a8eba9ed126b7..4e1bbe477d810 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -4204,25 +4204,24 @@ def int_amdgcn_cluster_load_b32         : AMDGPUClusterLoad<global_ptr_ty>;
 def int_amdgcn_cluster_load_b64         : AMDGPUClusterLoad<global_ptr_ty>;
 def int_amdgcn_cluster_load_b128        : AMDGPUClusterLoad<global_ptr_ty>;
 
-class AMDGPULoadMonitor<LLVMType ptr_ty>:
+class AMDGPUAtomicLoadMonitor<LLVMType ptr_ty>:
   Intrinsic<
     [llvm_any_ty],
     [ptr_ty,
-     llvm_i32_ty],  // gfx12+ cachepolicy:
-                    //   bits [0-2] = th
-                    //   bits [3-4] = scope
+     llvm_i32_ty,            // C ABI Atomic Ordering ID
+     llvm_metadata_ty],      // syncscope
     [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
      IntrWillReturn, IntrConvergent, IntrNoCallback, IntrNoFree],
     "",
-    [SDNPMemOperand]
+    [SDNPMemOperand, SDNPMayLoad]
   >;
 
-def int_amdgcn_flat_load_monitor_b32    : AMDGPULoadMonitor<flat_ptr_ty>;
-def int_amdgcn_flat_load_monitor_b64    : AMDGPULoadMonitor<flat_ptr_ty>;
-def int_amdgcn_flat_load_monitor_b128   : AMDGPULoadMonitor<flat_ptr_ty>;
-def int_amdgcn_global_load_monitor_b32  : AMDGPULoadMonitor<global_ptr_ty>;
-def int_amdgcn_global_load_monitor_b64  : AMDGPULoadMonitor<global_ptr_ty>;
-def int_amdgcn_global_load_monitor_b128 : AMDGPULoadMonitor<global_ptr_ty>;
+def int_amdgcn_flat_atomic_load_monitor_b32    : AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
+def int_amdgcn_flat_atomic_load_monitor_b64    : AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
+def int_amdgcn_flat_atomic_load_monitor_b128   : AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
+def int_amdgcn_global_atomic_load_monitor_b32  : AMDGPUAtomicLoadMonitor<global_ptr_ty>;
+def int_amdgcn_global_atomic_load_monitor_b64  : AMDGPUAtomicLoadMonitor<global_ptr_ty>;
+def int_amdgcn_global_atomic_load_monitor_b128 : AMDGPUAtomicLoadMonitor<global_ptr_ty>;
 
 /// Emit an addrspacecast without null pointer checking.
 /// Should only be inserted by a pass based on analysis of an addrspacecast's src.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 76c1be8690e23..fd26af4f31197 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -335,6 +335,10 @@ def : GINodeEquiv<G_AMDGPU_WHOLE_WAVE_FUNC_SETUP, AMDGPUwhole_wave_setup>;
 
 def : GINodeEquiv<G_AMDGPU_SPONENTRY, sponentry>;
 
+def : GINodeEquiv<G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR, AMDGPUflat_atomic_load_monitor>;
+def : GINodeEquiv<G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR, AMDGPUglobal_atomic_load_monitor>;
+
+
 class GISelSop2Pat <
   SDPatternOperator node,
   Instruction inst,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index 8a43c2da38346..1d109bce030e0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -402,6 +402,15 @@ def AMDGPUExportOp : SDTypeProfile<0, 8, [
 
 ]>;
 
+def AMDGPUflat_atomic_load_monitor : SDNode<
+  "AMDGPUISD::FLAT_ATOMIC_LOAD_MONITOR", SDTLoad,
+  [SDNPHasChain, SDNPMemOperand]
+>;
+
+def AMDGPUglobal_atomic_load_monitor : SDNode<
+  "AMDGPUISD::GLOBAL_ATOMIC_LOAD_MONITOR", SDTLoad,
+  [SDNPHasChain, SDNPMemOperand]
+>;
 
 //===----------------------------------------------------------------------===//
 // Flow Control Profile Types
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 857dace8d36d3..711fdccf1397a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -8199,6 +8199,26 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     B.buildStore(MI.getOperand(2), MI.getOperand(1), **MI.memoperands_begin());
     MI.eraseFromParent();
     return true;
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
+    assert(MI.hasOneMemOperand() && "Expected IRTranslator to set MemOp!");
+    B.buildInstr(AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR)
+        .add(MI.getOperand(0))
+        .add(MI.getOperand(2))
+        .addMemOperand(*MI.memoperands_begin());
+    MI.eraseFromParent();
+    return true;
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b128:
+    assert(MI.hasOneMemOperand() && "Expected IRTranslator to set MemOp!");
+    B.buildInstr(AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR)
+        .add(MI.getOperand(0))
+        .add(MI.getOperand(2))
+        .addMemOperand(*MI.memoperands_begin());
+    MI.eraseFromParent();
+    return true;
   default: {
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
             AMDGPU::getImageDimIntrinsicInfo(IntrID))
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 62a252add0091..9143ef9b700e1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3499,6 +3499,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
     executeInWaterfallLoop(B, make_range(Start, End), SGPROperandRegs);
     break;
   }
+  case AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR:
   case AMDGPU::G_LOAD:
   case AMDGPU::G_ZEXTLOAD:
   case AMDGPU::G_SEXTLOAD: {
@@ -5342,12 +5344,12 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     case Intrinsic::amdgcn_ds_load_tr16_b128:
     case Intrinsic::amdgcn_ds_load_tr4_b64:
     case Intrinsic::amdgcn_ds_load_tr6_b96:
-    case Intrinsic::amdgcn_flat_load_monitor_b32:
-    case Intrinsic::amdgcn_flat_load_monitor_b64:
-    case Intrinsic::amdgcn_flat_load_monitor_b128:
-    case Intrinsic::amdgcn_global_load_monitor_b32:
-    case Intrinsic::amdgcn_global_load_monitor_b64:
-    case Intrinsic::amdgcn_global_load_monitor_b128:
+    // case Intrinsic::amdgcn_flat_load_monitor_b32:
+    // case Intrinsic::amdgcn_flat_load_monitor_b64:
+    // case Intrinsic::amdgcn_flat_load_monitor_b128:
+    // case Intrinsic::amdgcn_global_load_monitor_b32:
+    // case Intrinsic::amdgcn_global_load_monitor_b64:
+    // case Intrinsic::amdgcn_global_load_monitor_b128:
     case Intrinsic::amdgcn_ds_read_tr4_b64:
     case Intrinsic::amdgcn_ds_read_tr6_b96:
     case Intrinsic::amdgcn_ds_read_tr8_b64:
@@ -5762,6 +5764,14 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   case AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_RETURN:
     OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
     break;
+  case AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR: {
+    unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
+    unsigned PtrSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
+    OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
+    OpdsMapping[1] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, PtrSize);
+    break;
+  }
   }
 
   return getInstructionMapping(/*ID*/1, /*Cost*/1,
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 3ad15ae28e51e..19089b1cf002b 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -2389,13 +2389,13 @@ let WaveSizePredicate = isWave32,  OtherPredicates = [HasTransposeLoadF4F6Insts]
 }
 
 let OtherPredicates = [isGFX125xOnly] in {
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B32,  int_amdgcn_flat_load_monitor_b32,  i32>;
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B64,  int_amdgcn_flat_load_monitor_b64,  v2i32>;
-  def  : FlatLoadPat_CPOL <FLAT_LOAD_MONITOR_B128, int_amdgcn_flat_load_monitor_b128, v4i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B32,  AMDGPUflat_atomic_load_monitor,  i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B64,  AMDGPUflat_atomic_load_monitor,  v2i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B128, AMDGPUflat_atomic_load_monitor, v4i32>;
 
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B32,  int_amdgcn_global_load_monitor_b32,  i32>;
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B64,  int_amdgcn_global_load_monitor_b64,  v2i32>;
-  defm : GlobalFLATLoadPats_CPOL <GLOBAL_LOAD_MONITOR_B128, int_amdgcn_global_load_monitor_b128, v4i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B32,  AMDGPUglobal_atomic_load_monitor,  i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B64,  AMDGPUglobal_atomic_load_monitor,  v2i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B128, AMDGPUglobal_atomic_load_monitor, v4i32>;
 } // End SubtargetPredicate = isGFX125xOnly
 
 let OtherPredicates = [isGFX1250Plus] in {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index cdf6fb97d0b3b..8a4e0cce09a88 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1293,51 +1293,54 @@ static unsigned getIntrMemWidth(unsigned IntrID) {
   case Intrinsic::amdgcn_global_store_async_from_lds_b32:
   case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
   case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
     return 32;
   case Intrinsic::amdgcn_global_load_async_to_lds_b64:
   case Intrinsic::amdgcn_cluster_load_async_to_lds_b64:
   case Intrinsic::amdgcn_global_store_async_from_lds_b64:
   case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
   case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
     return 64;
   case Intrinsic::amdgcn_global_load_async_to_lds_b128:
   case Intrinsic::amdgcn_cluster_load_async_to_lds_b128:
   case Intrinsic::amdgcn_global_store_async_from_lds_b128:
   case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
   case Intrinsic::amdgcn_cooperative_atomic_store_8x16B:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b128:
     return 128;
   default:
     llvm_unreachable("Unknown width");
   }
 }
 
-static void getCoopAtomicOperandsInfo(const CallBase &CI, bool IsLoad,
-                                      TargetLoweringBase::IntrinsicInfo &Info) {
-  Value *OrderingArg = CI.getArgOperand(IsLoad ? 1 : 2);
+static AtomicOrdering parseAtomicOrderingCABIArg(const CallBase &CI,
+                                                 unsigned ArgIdx) {
+  Value *OrderingArg = CI.getArgOperand(ArgIdx);
   unsigned Ord = cast<ConstantInt>(OrderingArg)->getZExtValue();
   switch (AtomicOrderingCABI(Ord)) {
   case AtomicOrderingCABI::acquire:
-    Info.order = AtomicOrdering::Acquire;
+    return AtomicOrdering::Acquire;
     break;
   case AtomicOrderingCABI::release:
-    Info.order = AtomicOrdering::Release;
+    return AtomicOrdering::Release;
     break;
   case AtomicOrderingCABI::seq_cst:
-    Info.order = AtomicOrdering::SequentiallyConsistent;
+    return AtomicOrdering::SequentiallyConsistent;
     break;
   default:
-    Info.order = AtomicOrdering::Monotonic;
-    break;
+    return AtomicOrdering::Monotonic;
   }
+}
 
-  Info.flags =
-      (IsLoad ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore);
-  Info.flags |= MOCooperative;
-
+static unsigned parseSyncscopeMDArg(const CallBase &CI, unsigned ArgIdx) {
   MDNode *ScopeMD = cast<MDNode>(
-      cast<MetadataAsValue>(CI.getArgOperand(IsLoad ? 2 : 3))->getMetadata());
+      cast<MetadataAsValue>(CI.getArgOperand(ArgIdx))->getMetadata());
   StringRef Scope = cast<MDString>(ScopeMD->getOperand(0))->getString();
-  Info.ssid = CI.getContext().getOrInsertSyncScopeID(Scope);
+  return CI.getContext().getOrInsertSyncScopeID(Scope);
 }
 
 void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
@@ -1578,12 +1581,6 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Infos.push_back(Info);
     return;
   }
-  case Intrinsic::amdgcn_flat_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_load_monitor_b64:
-  case Intrinsic::amdgcn_flat_load_monitor_b128:
-  case Intrinsic::amdgcn_global_load_monitor_b32:
-  case Intrinsic::amdgcn_global_load_monitor_b64:
-  case Intrinsic::amdgcn_global_load_monitor_b128:
   case Intrinsic::amdgcn_cluster_load_b32:
   case Intrinsic::amdgcn_cluster_load_b64:
   case Intrinsic::amdgcn_cluster_load_b128:
@@ -1607,6 +1604,21 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Infos.push_back(Info);
     return;
   }
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b128: {
+    Info.opc = ISD::INTRINSIC_W_CHAIN;
+    Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
+    Info.ptrVal = CI.getOperand(0);
+    Info.align.reset();
+    Info.flags = MachineMemOperand::MOLoad;
+    Info.order = parseAtomicOrderingCABIArg(CI, 1);
+    Info.ssid = parseSyncscopeMDArg(CI, 2);
+    return true;
+  }
   case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
   case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
   case Intrinsic::amdgcn_cooperative_atomic_load_8x16B: {
@@ -1614,8 +1626,9 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
     Info.ptrVal = CI.getOperand(0);
     Info.align.reset();
-    getCoopAtomicOperandsInfo(CI, /*IsLoad=*/true, Info);
-    Infos.push_back(Info);
+    Info.flags = (MachineMemOperand::MOLoad | MOCooperative);
+    Info.order = parseAtomicOrderingCABIArg(CI, 1);
+    Info.ssid = parseSyncscopeMDArg(CI, 2);
     return;
   }
   case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
@@ -1625,8 +1638,9 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
     Info.ptrVal = CI.getArgOperand(0);
     Info.align.reset();
-    getCoopAtomicOperandsInfo(CI, /*IsLoad=*/false, Info);
-    Infos.push_back(Info);
+    Info.flags = (MachineMemOperand::MOStore | MOCooperative);
+    Info.order = parseAtomicOrderingCABIArg(CI, 2);
+    Info.ssid = parseSyncscopeMDArg(CI, 3);
     return;
   }
   case Intrinsic::amdgcn_ds_gws_init:
@@ -1801,15 +1815,9 @@ bool SITargetLowering::getAddrModeArguments(const IntrinsicInst *II,
   case Intrinsic::amdgcn_ds_atomic_barrier_arrive_rtn_b64:
   case Intrinsic::amdgcn_flat_atomic_fmax_num:
   case Intrinsic::amdgcn_flat_atomic_fmin_num:
-  case Intrinsic::amdgcn_flat_load_monitor_b128:
-  case Intrinsic::amdgcn_flat_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_load_monitor_b64:
   case Intrinsic::amdgcn_global_atomic_fmax_num:
   case Intrinsic::amdgcn_global_atomic_fmin_num:
   case Intrinsic::amdgcn_global_atomic_ordered_add_b64:
-  case Intrinsic::amdgcn_global_load_monitor_b128:
-  case Intrinsic::amdgcn_global_load_monitor_b32:
-  case Intrinsic::amdgcn_global_load_monitor_b64:
   case Intrinsic::amdgcn_global_load_tr_b64:
   case Intrinsic::amdgcn_global_load_tr_b128:
   case Intrinsic::amdgcn_global_load_tr4_b64:
@@ -11265,6 +11273,26 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
     return DAG.getAtomicLoad(ISD::NON_EXTLOAD, DL, MII->getMemoryVT(), VT,
                              Chain, Ptr, MII->getMemOperand());
   }
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128: {
+    MemIntrinsicSDNode *MII = cast<MemIntrinsicSDNode>(Op);
+    SDValue Chain = Op->getOperand(0);
+    SDValue Ptr = Op->getOperand(2);
+    return DAG.getMemIntrinsicNode(AMDGPUISD::FLAT_ATOMIC_LOAD_MONITOR, DL,
+                                   Op->getVTList(), {Chain, Ptr},
+                                   MII->getMemoryVT(), MII->getMemOperand());
+  }
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_global_atomic_load_monitor_b128: {
+    MemIntrinsicSDNode *MII = cast<MemIntrinsicSDNode>(Op);
+    SDValue Chain = Op->getOperand(0);
+    SDValue Ptr = Op->getOperand(2);
+    return DAG.getMemIntrinsicNode(AMDGPUISD::GLOBAL_ATOMIC_LOAD_MONITOR, DL,
+                                   Op->getVTList(), {Chain, Ptr},
+                                   MII->getMemoryVT(), MII->getMemOperand());
+  }
   default:
 
     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 9ce1cfac8efd7..d9262335737d8 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4844,6 +4844,16 @@ def G_AMDGPU_SPONENTRY : AMDGPUGenericInstruction {
   let hasSideEffects = 0;
 }
 
+class AtomicLoadMonitorInstruction : AMDGPUGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins ptype1:$ptr);
+  let hasSideEffects = 0;
+  let mayLoad = 1;
+}
+
+def G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR : AtomicLoadMonitorInstruction;
+def G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR : AtomicLoadMonitorInstruction;
+
 //============================================================================//
 // Dummy Instructions
 //============================================================================//
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
index 910c55a041ede..7c2a78a4676db 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
@@ -2,77 +2,82 @@
 ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
 ; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
 
-declare i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1), i32)
-declare <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1), i32)
-declare <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1), i32)
-declare i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr, i32)
-declare <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr, i32)
-declare <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr, i32)
-
-define amdgpu_ps void @global_load_monitor_b32_vaddr(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b32_vaddr:
+declare i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1), i32, metadata)
+declare <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1), i32, metadata)
+declare <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1), i32, metadata)
+declare i32 @llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr, i32, metadata)
+declare <2 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr, i32, metadata)
+declare <4 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr, i32, metadata)
+
+
+define amdgpu_ps void @global_atomic_load_monitor_b32_vaddr_relaxed_sys(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b32_vaddr_relaxed_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b32 v0, v[0:1], off offset:32 th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b32 v0, v[0:1], off offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 1)
+  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 0, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b32_saddr(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b32_saddr:
+define amdgpu_ps void @global_atomic_load_monitor_b32_saddr_relaxed_sys(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b32_saddr_relaxed_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] offset:32 th:TH_LOAD_HT scope:SCOPE_SE
+; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[0:1], v2, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 10)
+  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 0, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_vaddr(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b64_vaddr:
+define amdgpu_ps void @global_atomic_load_monitor_b64_vaddr_acquire_agent(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b64_vaddr_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b64 v[0:1], v[0:1], off offset:32 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+; GFX1250-NEXT:    global_load_monitor_b64 v[0:1], v[0:1], off offset:32 scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 22)
+  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr:
+define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_acquire_agent(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b64_saddr_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] offset:32 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] offset:32 scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 27)
+  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b128_vaddr(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b128_vaddr:
+define amdgpu_ps void @global_atomic_load_monitor_b128_vaddr_seq_cst_workgroup(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b128_vaddr_seq_cst_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    global_load_monitor_b128 v[4:7], v[0:1], off offset:32
@@ -81,122 +86,137 @@ define amdgpu_ps void @global_load_monitor_b128_vaddr(ptr addrspace(1) %addr, pt
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 0)
+  %val = call <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 5, metadata !2)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b128_saddr(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_load_monitor_b128_saddr:
+define amdgpu_ps void @global_atomic_load_monitor_b128_saddr_seq_cst_workgroup(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_atomic_load_monitor_b128_saddr_seq_cst_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_load_monitor_b128 v[2:5], v2, s[0:1] offset:32 th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b128 v[2:5], v2, s[0:1] offset:32
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b128 v[0:1], v[2:5], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 1)
+  %val = call <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 5, metadata !2)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b32(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_load_monitor_b32:
+define amdgpu_ps void @flat_atomic_load_monitor_b32_seq_cst_sys(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_atomic_load_monitor_b32_seq_cst_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b32 v0, v[0:1] offset:32 th:TH_LOAD_HT scope:SCOPE_SE
+; GFX1250-NEXT:    flat_load_monitor_b32 v0, v[0:1] offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[2:3], v0, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr addrspace(0) %gep, i32 10)
+  %val = call i32 @llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr addrspace(0) %gep, i32 5, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b64(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_load_monitor_b64:
+define amdgpu_ps void @flat_atomic_load_monitor_b64_seq_cst_agent(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_atomic_load_monitor_b64_seq_cst_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b64 v[0:1], v[0:1] offset:32 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+; GFX1250-NEXT:    flat_load_monitor_b64 v[0:1], v[0:1] offset:32 scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[2:3], v[0:1], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr addrspace(0) %gep, i32 22)
+  %val = call <2 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr addrspace(0) %gep, i32 5, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_load_monitor_b128(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_load_monitor_b128:
+define amdgpu_ps void @flat_atomic_load_monitor_b128_acquire_sys(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_atomic_load_monitor_b128_acquire_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    flat_load_monitor_b128 v[4:7], v[0:1] offset:32 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+; GFX1250-NEXT:    flat_load_monitor_b128 v[4:7], v[0:1] offset:32 scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b128 v[2:3], v[4:7], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr addrspace(0) %gep, i32 27)
+  %val = call <4 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr addrspace(0) %gep, i32 2, metadata !0)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b32_saddr_scale_offset(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b32_saddr_scale_offset:
+define amdgpu_ps void @global_atomic_load_monitor_b32_saddr_scale_offset_acquire_agent(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_atomic_load_monitor_b32_saddr_scale_offset_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] scale_offset th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] scale_offset scope:SCOPE_DEV
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_DEV
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b32 v[0:1], v2, off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i32, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 1)
+  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr_scale_offset(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr_scale_offset:
+define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_scale_offset_acquire_workgroup(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_atomic_load_monitor_b64_saddr_scale_offset_acquire_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] scale_offset th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] scale_offset
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i64, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 1)
+  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !2)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_load_monitor_b64_saddr_no_scale_offset(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_load_monitor_b64_saddr_no_scale_offset:
+define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_atomic_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX1250-NEXT:    v_lshl_add_u64 v[2:3], v[2:3], 2, s[0:1]
-; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v[2:3], off th:TH_LOAD_NT
+; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v[2:3], off scope:SCOPE_SYS
+; GFX1250-NEXT:    s_wait_loadcnt 0x0
+; GFX1250-NEXT:    global_inv scope:SCOPE_SYS
 ; GFX1250-NEXT:    s_wait_loadcnt 0x0
 ; GFX1250-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i32, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 1)
+  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 5, metadata !0)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
+
+!0 = !{ !"" }
+!1 = !{ !"agent" }
+!2 = !{ !"workgroup" }
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX1250-GISEL: {{.*}}
 ; GFX1250-SDAG: {{.*}}

>From 6384a76397e1cde69c77ed1f0fd68b11d4de6e19 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Fri, 23 Jan 2026 10:07:00 +0100
Subject: [PATCH 02/10] Revert to old name

---
 clang/include/clang/Basic/BuiltinsAMDGPU.td   | 12 +--
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   | 36 ++++----
 clang/lib/Sema/SemaAMDGPU.cpp                 | 12 +--
 .../builtins-amdgcn-gfx1250-load-monitor.cl   | 48 +++++------
 .../builtins-amdgcn-error-gfx1250-param.cl    | 42 +++++-----
 llvm/docs/AMDGPUUsage.rst                     |  4 +-
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      | 14 ++--
 llvm/lib/Target/AMDGPU/AMDGPUGISel.td         |  4 +-
 llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td     |  8 +-
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 16 ++--
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  | 14 +---
 llvm/lib/Target/AMDGPU/FLATInstructions.td    | 12 +--
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 40 ++++-----
 llvm/lib/Target/AMDGPU/SIInstructions.td      |  6 +-
 .../llvm.amdgcn.load.monitor.gfx1250.ll       | 84 +++++++++----------
 15 files changed, 173 insertions(+), 179 deletions(-)

diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index 1902b35d83f07..d087d0745fb42 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -711,12 +711,12 @@ def __builtin_amdgcn_s_cluster_barrier : AMDGPUBuiltin<"void()", [], "gfx1250-in
 def __builtin_amdgcn_flat_prefetch : AMDGPUBuiltin<"void(void const address_space<0> *, _Constant int)", [Const], "vmem-pref-insts">;
 def __builtin_amdgcn_global_prefetch : AMDGPUBuiltin<"void(void const address_space<1> *, _Constant int)", [Const], "vmem-pref-insts">;
 
-def __builtin_amdgcn_global_atomic_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_atomic_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<1> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_atomic_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<1> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_atomic_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_atomic_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<0> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_atomic_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<0> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<1> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<1> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<0> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<0> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
 def __builtin_amdgcn_cluster_load_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index 07ce81d56a1a4..f81b432a0510c 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -797,32 +797,32 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
     return Builder.CreateCall(F, {Addr});
   }
-  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
-  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
-  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
-  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
-  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
-  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128: {
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128: {
 
     Intrinsic::ID IID;
     switch (BuiltinID) {
-    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
-      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b32;
+    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
+      IID = Intrinsic::amdgcn_global_load_monitor_b32;
       break;
-    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
-      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b64;
+    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
+      IID = Intrinsic::amdgcn_global_load_monitor_b64;
       break;
-    case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
-      IID = Intrinsic::amdgcn_global_atomic_load_monitor_b128;
+    case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
+      IID = Intrinsic::amdgcn_global_load_monitor_b128;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
-      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b32;
+    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
+      IID = Intrinsic::amdgcn_flat_load_monitor_b32;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
-      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b64;
+    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
+      IID = Intrinsic::amdgcn_flat_load_monitor_b64;
       break;
-    case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128:
-      IID = Intrinsic::amdgcn_flat_atomic_load_monitor_b128;
+    case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
+      IID = Intrinsic::amdgcn_flat_load_monitor_b128;
       break;
     }
 
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index a53cadd27a184..21b4bbfc5b162 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -119,12 +119,12 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B:
     return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true);
-  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b32:
-  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b64:
-  case AMDGPU::BI__builtin_amdgcn_flat_atomic_load_monitor_b128:
-  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b32:
-  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b64:
-  case AMDGPU::BI__builtin_amdgcn_global_atomic_load_monitor_b128:
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
+  case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
     return checkAtomicMonitorLoad(TheCall);
   case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32:
   case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32:
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
index efdbfc25714fb..49a74a8e16c6f 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
@@ -5,62 +5,62 @@
 typedef int    v2i   __attribute__((ext_vector_type(2)));
 typedef int    v4i   __attribute__((ext_vector_type(4)));
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b32(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b32(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) [[INPTR:%.*]], i32 0, metadata [[META8:![0-9]+]])
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) [[INPTR:%.*]], i32 0, metadata [[META8:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret i32 [[TMP0]]
 //
-int test_amdgcn_global_atomic_load_monitor_b32(global int* inptr)
+int test_amdgcn_global_load_monitor_b32(global int* inptr)
 {
-  return __builtin_amdgcn_global_atomic_load_monitor_b32(inptr, __ATOMIC_RELAXED, "");
+  return __builtin_amdgcn_global_load_monitor_b32(inptr, __ATOMIC_RELAXED, "");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b64(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b64(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) [[INPTR:%.*]], i32 2, metadata [[META9:![0-9]+]])
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) [[INPTR:%.*]], i32 2, metadata [[META9:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <2 x i32> [[TMP0]]
 //
-v2i test_amdgcn_global_atomic_load_monitor_b64(global v2i* inptr)
+v2i test_amdgcn_global_load_monitor_b64(global v2i* inptr)
 {
-  return __builtin_amdgcn_global_atomic_load_monitor_b64(inptr, __ATOMIC_ACQUIRE, "agent");
+  return __builtin_amdgcn_global_load_monitor_b64(inptr, __ATOMIC_ACQUIRE, "agent");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_global_atomic_load_monitor_b128(
+// CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b128(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1) [[INPTR:%.*]], i32 2, metadata [[META10:![0-9]+]])
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) [[INPTR:%.*]], i32 2, metadata [[META10:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <4 x i32> [[TMP0]]
 //
-v4i test_amdgcn_global_atomic_load_monitor_b128(global v4i* inptr)
+v4i test_amdgcn_global_load_monitor_b128(global v4i* inptr)
 {
-  return __builtin_amdgcn_global_atomic_load_monitor_b128(inptr, __ATOMIC_ACQUIRE, "workgroup");
+  return __builtin_amdgcn_global_load_monitor_b128(inptr, __ATOMIC_ACQUIRE, "workgroup");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b32(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b32(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr [[INPTR:%.*]], i32 0, metadata [[META8]])
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr [[INPTR:%.*]], i32 0, metadata [[META8]])
 // CHECK-GFX1250-NEXT:    ret i32 [[TMP0]]
 //
-int test_amdgcn_flat_atomic_load_monitor_b32(int* inptr)
+int test_amdgcn_flat_load_monitor_b32(int* inptr)
 {
-  return __builtin_amdgcn_flat_atomic_load_monitor_b32(inptr, __ATOMIC_RELAXED, "");
+  return __builtin_amdgcn_flat_load_monitor_b32(inptr, __ATOMIC_RELAXED, "");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b64(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b64(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr [[INPTR:%.*]], i32 5, metadata [[META11:![0-9]+]])
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr [[INPTR:%.*]], i32 5, metadata [[META11:![0-9]+]])
 // CHECK-GFX1250-NEXT:    ret <2 x i32> [[TMP0]]
 //
-v2i test_amdgcn_flat_atomic_load_monitor_b64(v2i* inptr)
+v2i test_amdgcn_flat_load_monitor_b64(v2i* inptr)
 {
-  return __builtin_amdgcn_flat_atomic_load_monitor_b64(inptr, __ATOMIC_SEQ_CST, "cluster");
+  return __builtin_amdgcn_flat_load_monitor_b64(inptr, __ATOMIC_SEQ_CST, "cluster");
 }
 
-// CHECK-GFX1250-LABEL: @test_amdgcn_flat_atomic_load_monitor_b128(
+// CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b128(
 // CHECK-GFX1250-NEXT:  entry:
-// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 0, metadata [[META8]])
+// CHECK-GFX1250-NEXT:    [[TMP0:%.*]] = tail call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 0, metadata [[META8]])
 // CHECK-GFX1250-NEXT:    ret <4 x i32> [[TMP0]]
 //
-v4i test_amdgcn_flat_atomic_load_monitor_b128(v4i* inptr)
+v4i test_amdgcn_flat_load_monitor_b128(v4i* inptr)
 {
-  return __builtin_amdgcn_flat_atomic_load_monitor_b128(inptr, __ATOMIC_RELAXED, "");
+  return __builtin_amdgcn_flat_load_monitor_b128(inptr, __ATOMIC_RELAXED, "");
 }
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index dfbe5e3b30396..5903060797bd5 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -94,37 +94,37 @@ void test_cvt_scale_pk(global half8 *outh8, global bfloat8 *outy8, uint2 src2,
   *outf16 = __builtin_amdgcn_cvt_scale_pk16_f32_bf6(src3, scale, 16); // expected-error {{argument value 16 is outside the valid range [0, 15]}}
 }
 
-void test_amdgcn_atomic_load_monitor_ao_constant(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+void test_amdgcn_load_monitor_ao_constant(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
                               global int* b32out, global v2i* b64out, global v4i* b128out, int ao)
 {
-  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b32' must be a constant integer}}
-  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b64' must be a constant integer}}
-  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_atomic_load_monitor_b128' must be a constant integer}}
-  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b32' must be a constant integer}}
-  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b64' must be a constant integer}}
-  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_atomic_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
 }
 
-void test_amdgcn_atomic_load_monitor_ao_valid(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+void test_amdgcn_load_monitor_ao_valid(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
                               global int* b32out, global v2i* b64out, global v4i* b128out)
 {
-  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
 }
 
-void test_amdgcn_atomic_load_monitor_scope_literal(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+void test_amdgcn_load_monitor_scope_literal(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
                               global int* b32out, global v2i* b64out, global v4i* b128out, const char* scope)
 {
-  *b32out  = __builtin_amdgcn_global_atomic_load_monitor_b32(b32gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b64out  = __builtin_amdgcn_global_atomic_load_monitor_b64(b64gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b128out = __builtin_amdgcn_global_atomic_load_monitor_b128(b128gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b32out  = __builtin_amdgcn_flat_atomic_load_monitor_b32(b32faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b64out  = __builtin_amdgcn_flat_atomic_load_monitor_b64(b64faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b128out = __builtin_amdgcn_flat_atomic_load_monitor_b128(b128faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
 }
 
 void test_amdgcn_cluster_load(global int* addr32, global v2i* addr64, global v4i* addr128, global int* b32out, global v2i* b64out, global v4i* b128out, int cpol, int mask)
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 14fa218c25d02..edf2922ebc527 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1690,7 +1690,7 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
                                                    See :ref:`amdgpu-amdhsa-execution-barriers`.
                                                    Available starting GFX12.
 
-  llvm.amdgcn.flat.atomic.load.monitor             Available on GFX12.5 only.
+  llvm.amdgcn.flat.load.monitor                    Available on GFX12.5 only.
                                                    Corresponds to ``flat_load_monitor_b32/64/128`` (``.b32/64/128`` suffixes)
                                                    instructions.
                                                    For the purposes of the memory model, this is an atomic load operation in
@@ -1703,7 +1703,7 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
                                                    * :ref:`Synchronization Scope<amdgpu-intrinsics-syncscope-metadata-operand>`.
                                                      Note that the scope used must ensure that the L2 cache will be hit.
 
-  llvm.amdgcn.global.atomic.load.monitor           Available on GFX12.5 only.
+  llvm.amdgcn.global.load.monitor                  Available on GFX12.5 only.
                                                    Corresponds to ``global_load_monitor_b32/64/128`` (``.b32/64/128`` suffixes)
                                                    instructions.
                                                    For the purposes of the memory model, this is an atomic load operation in
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 4e1bbe477d810..3545dbf4bc5aa 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -4204,7 +4204,7 @@ def int_amdgcn_cluster_load_b32         : AMDGPUClusterLoad<global_ptr_ty>;
 def int_amdgcn_cluster_load_b64         : AMDGPUClusterLoad<global_ptr_ty>;
 def int_amdgcn_cluster_load_b128        : AMDGPUClusterLoad<global_ptr_ty>;
 
-class AMDGPUAtomicLoadMonitor<LLVMType ptr_ty>:
+class AMDGPULoadMonitor<LLVMType ptr_ty>:
   Intrinsic<
     [llvm_any_ty],
     [ptr_ty,
@@ -4216,12 +4216,12 @@ class AMDGPUAtomicLoadMonitor<LLVMType ptr_ty>:
     [SDNPMemOperand, SDNPMayLoad]
   >;
 
-def int_amdgcn_flat_atomic_load_monitor_b32    : AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
-def int_amdgcn_flat_atomic_load_monitor_b64    : AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
-def int_amdgcn_flat_atomic_load_monitor_b128   : AMDGPUAtomicLoadMonitor<flat_ptr_ty>;
-def int_amdgcn_global_atomic_load_monitor_b32  : AMDGPUAtomicLoadMonitor<global_ptr_ty>;
-def int_amdgcn_global_atomic_load_monitor_b64  : AMDGPUAtomicLoadMonitor<global_ptr_ty>;
-def int_amdgcn_global_atomic_load_monitor_b128 : AMDGPUAtomicLoadMonitor<global_ptr_ty>;
+def int_amdgcn_flat_load_monitor_b32    : AMDGPULoadMonitor<flat_ptr_ty>;
+def int_amdgcn_flat_load_monitor_b64    : AMDGPULoadMonitor<flat_ptr_ty>;
+def int_amdgcn_flat_load_monitor_b128   : AMDGPULoadMonitor<flat_ptr_ty>;
+def int_amdgcn_global_load_monitor_b32  : AMDGPULoadMonitor<global_ptr_ty>;
+def int_amdgcn_global_load_monitor_b64  : AMDGPULoadMonitor<global_ptr_ty>;
+def int_amdgcn_global_load_monitor_b128 : AMDGPULoadMonitor<global_ptr_ty>;
 
 /// Emit an addrspacecast without null pointer checking.
 /// Should only be inserted by a pass based on analysis of an addrspacecast's src.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index fd26af4f31197..48ade591ca5f4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -335,8 +335,8 @@ def : GINodeEquiv<G_AMDGPU_WHOLE_WAVE_FUNC_SETUP, AMDGPUwhole_wave_setup>;
 
 def : GINodeEquiv<G_AMDGPU_SPONENTRY, sponentry>;
 
-def : GINodeEquiv<G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR, AMDGPUflat_atomic_load_monitor>;
-def : GINodeEquiv<G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR, AMDGPUglobal_atomic_load_monitor>;
+def : GINodeEquiv<G_AMDGPU_FLAT_LOAD_MONITOR, AMDGPUflat_load_monitor>;
+def : GINodeEquiv<G_AMDGPU_GLOBAL_LOAD_MONITOR, AMDGPUglobal_load_monitor>;
 
 
 class GISelSop2Pat <
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index 1d109bce030e0..8dc5d45aa73ba 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -402,13 +402,13 @@ def AMDGPUExportOp : SDTypeProfile<0, 8, [
 
 ]>;
 
-def AMDGPUflat_atomic_load_monitor : SDNode<
-  "AMDGPUISD::FLAT_ATOMIC_LOAD_MONITOR", SDTLoad,
+def AMDGPUflat_load_monitor : SDNode<
+  "AMDGPUISD::FLAT_LOAD_MONITOR", SDTLoad,
   [SDNPHasChain, SDNPMemOperand]
 >;
 
-def AMDGPUglobal_atomic_load_monitor : SDNode<
-  "AMDGPUISD::GLOBAL_ATOMIC_LOAD_MONITOR", SDTLoad,
+def AMDGPUglobal_load_monitor : SDNode<
+  "AMDGPUISD::GLOBAL_LOAD_MONITOR", SDTLoad,
   [SDNPHasChain, SDNPMemOperand]
 >;
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 711fdccf1397a..ada26b5202097 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -8199,21 +8199,21 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     B.buildStore(MI.getOperand(2), MI.getOperand(1), **MI.memoperands_begin());
     MI.eraseFromParent();
     return true;
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
+  case Intrinsic::amdgcn_flat_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_load_monitor_b128:
     assert(MI.hasOneMemOperand() && "Expected IRTranslator to set MemOp!");
-    B.buildInstr(AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR)
+    B.buildInstr(AMDGPU::G_AMDGPU_FLAT_LOAD_MONITOR)
         .add(MI.getOperand(0))
         .add(MI.getOperand(2))
         .addMemOperand(*MI.memoperands_begin());
     MI.eraseFromParent();
     return true;
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b128:
+  case Intrinsic::amdgcn_global_load_monitor_b32:
+  case Intrinsic::amdgcn_global_load_monitor_b64:
+  case Intrinsic::amdgcn_global_load_monitor_b128:
     assert(MI.hasOneMemOperand() && "Expected IRTranslator to set MemOp!");
-    B.buildInstr(AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR)
+    B.buildInstr(AMDGPU::G_AMDGPU_GLOBAL_LOAD_MONITOR)
         .add(MI.getOperand(0))
         .add(MI.getOperand(2))
         .addMemOperand(*MI.memoperands_begin());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 9143ef9b700e1..04dbc0721ebf4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3499,8 +3499,8 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
     executeInWaterfallLoop(B, make_range(Start, End), SGPROperandRegs);
     break;
   }
-  case AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR:
-  case AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_FLAT_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_GLOBAL_LOAD_MONITOR:
   case AMDGPU::G_LOAD:
   case AMDGPU::G_ZEXTLOAD:
   case AMDGPU::G_SEXTLOAD: {
@@ -5344,12 +5344,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     case Intrinsic::amdgcn_ds_load_tr16_b128:
     case Intrinsic::amdgcn_ds_load_tr4_b64:
     case Intrinsic::amdgcn_ds_load_tr6_b96:
-    // case Intrinsic::amdgcn_flat_load_monitor_b32:
-    // case Intrinsic::amdgcn_flat_load_monitor_b64:
-    // case Intrinsic::amdgcn_flat_load_monitor_b128:
-    // case Intrinsic::amdgcn_global_load_monitor_b32:
-    // case Intrinsic::amdgcn_global_load_monitor_b64:
-    // case Intrinsic::amdgcn_global_load_monitor_b128:
     case Intrinsic::amdgcn_ds_read_tr4_b64:
     case Intrinsic::amdgcn_ds_read_tr6_b96:
     case Intrinsic::amdgcn_ds_read_tr8_b64:
@@ -5764,8 +5758,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   case AMDGPU::G_AMDGPU_WHOLE_WAVE_FUNC_RETURN:
     OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VCCRegBankID, 1);
     break;
-  case AMDGPU::G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR:
-  case AMDGPU::G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR: {
+  case AMDGPU::G_AMDGPU_FLAT_LOAD_MONITOR:
+  case AMDGPU::G_AMDGPU_GLOBAL_LOAD_MONITOR: {
     unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, *TRI);
     unsigned PtrSize = getSizeInBits(MI.getOperand(1).getReg(), MRI, *TRI);
     OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::VGPRRegBankID, Size);
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 19089b1cf002b..feecd5825ac74 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -2389,13 +2389,13 @@ let WaveSizePredicate = isWave32,  OtherPredicates = [HasTransposeLoadF4F6Insts]
 }
 
 let OtherPredicates = [isGFX125xOnly] in {
-  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B32,  AMDGPUflat_atomic_load_monitor,  i32>;
-  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B64,  AMDGPUflat_atomic_load_monitor,  v2i32>;
-  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B128, AMDGPUflat_atomic_load_monitor, v4i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B32,  AMDGPUflat_load_monitor,  i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B64,  AMDGPUflat_load_monitor,  v2i32>;
+  def  : FlatLoadPat <FLAT_LOAD_MONITOR_B128, AMDGPUflat_load_monitor, v4i32>;
 
-  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B32,  AMDGPUglobal_atomic_load_monitor,  i32>;
-  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B64,  AMDGPUglobal_atomic_load_monitor,  v2i32>;
-  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B128, AMDGPUglobal_atomic_load_monitor, v4i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B32,  AMDGPUglobal_load_monitor,  i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B64,  AMDGPUglobal_load_monitor,  v2i32>;
+  defm : GlobalFLATLoadPats <GLOBAL_LOAD_MONITOR_B128, AMDGPUglobal_load_monitor, v4i32>;
 } // End SubtargetPredicate = isGFX125xOnly
 
 let OtherPredicates = [isGFX1250Plus] in {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8a4e0cce09a88..710d29ae949c3 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1293,24 +1293,24 @@ static unsigned getIntrMemWidth(unsigned IntrID) {
   case Intrinsic::amdgcn_global_store_async_from_lds_b32:
   case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
   case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_load_monitor_b32:
+  case Intrinsic::amdgcn_global_load_monitor_b32:
     return 32;
   case Intrinsic::amdgcn_global_load_async_to_lds_b64:
   case Intrinsic::amdgcn_cluster_load_async_to_lds_b64:
   case Intrinsic::amdgcn_global_store_async_from_lds_b64:
   case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
   case Intrinsic::amdgcn_cooperative_atomic_store_16x8B:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_load_monitor_b64:
+  case Intrinsic::amdgcn_global_load_monitor_b64:
     return 64;
   case Intrinsic::amdgcn_global_load_async_to_lds_b128:
   case Intrinsic::amdgcn_cluster_load_async_to_lds_b128:
   case Intrinsic::amdgcn_global_store_async_from_lds_b128:
   case Intrinsic::amdgcn_cooperative_atomic_load_8x16B:
   case Intrinsic::amdgcn_cooperative_atomic_store_8x16B:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b128:
+  case Intrinsic::amdgcn_flat_load_monitor_b128:
+  case Intrinsic::amdgcn_global_load_monitor_b128:
     return 128;
   default:
     llvm_unreachable("Unknown width");
@@ -1604,12 +1604,12 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Infos.push_back(Info);
     return;
   }
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b128: {
+  case Intrinsic::amdgcn_flat_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_load_monitor_b128:
+  case Intrinsic::amdgcn_global_load_monitor_b32:
+  case Intrinsic::amdgcn_global_load_monitor_b64:
+  case Intrinsic::amdgcn_global_load_monitor_b128: {
     Info.opc = ISD::INTRINSIC_W_CHAIN;
     Info.memVT = EVT::getIntegerVT(CI.getContext(), getIntrMemWidth(IntrID));
     Info.ptrVal = CI.getOperand(0);
@@ -11273,23 +11273,23 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
     return DAG.getAtomicLoad(ISD::NON_EXTLOAD, DL, MII->getMemoryVT(), VT,
                              Chain, Ptr, MII->getMemOperand());
   }
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b32:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b64:
-  case Intrinsic::amdgcn_flat_atomic_load_monitor_b128: {
+  case Intrinsic::amdgcn_flat_load_monitor_b32:
+  case Intrinsic::amdgcn_flat_load_monitor_b64:
+  case Intrinsic::amdgcn_flat_load_monitor_b128: {
     MemIntrinsicSDNode *MII = cast<MemIntrinsicSDNode>(Op);
     SDValue Chain = Op->getOperand(0);
     SDValue Ptr = Op->getOperand(2);
-    return DAG.getMemIntrinsicNode(AMDGPUISD::FLAT_ATOMIC_LOAD_MONITOR, DL,
+    return DAG.getMemIntrinsicNode(AMDGPUISD::FLAT_LOAD_MONITOR, DL,
                                    Op->getVTList(), {Chain, Ptr},
                                    MII->getMemoryVT(), MII->getMemOperand());
   }
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b32:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b64:
-  case Intrinsic::amdgcn_global_atomic_load_monitor_b128: {
+  case Intrinsic::amdgcn_global_load_monitor_b32:
+  case Intrinsic::amdgcn_global_load_monitor_b64:
+  case Intrinsic::amdgcn_global_load_monitor_b128: {
     MemIntrinsicSDNode *MII = cast<MemIntrinsicSDNode>(Op);
     SDValue Chain = Op->getOperand(0);
     SDValue Ptr = Op->getOperand(2);
-    return DAG.getMemIntrinsicNode(AMDGPUISD::GLOBAL_ATOMIC_LOAD_MONITOR, DL,
+    return DAG.getMemIntrinsicNode(AMDGPUISD::GLOBAL_LOAD_MONITOR, DL,
                                    Op->getVTList(), {Chain, Ptr},
                                    MII->getMemoryVT(), MII->getMemOperand());
   }
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index d9262335737d8..7d4f72ecd414a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4844,15 +4844,15 @@ def G_AMDGPU_SPONENTRY : AMDGPUGenericInstruction {
   let hasSideEffects = 0;
 }
 
-class AtomicLoadMonitorInstruction : AMDGPUGenericInstruction {
+class LoadMonitorInstruction : AMDGPUGenericInstruction {
   let OutOperandList = (outs type0:$dst);
   let InOperandList = (ins ptype1:$ptr);
   let hasSideEffects = 0;
   let mayLoad = 1;
 }
 
-def G_AMDGPU_FLAT_ATOMIC_LOAD_MONITOR : AtomicLoadMonitorInstruction;
-def G_AMDGPU_GLOBAL_ATOMIC_LOAD_MONITOR : AtomicLoadMonitorInstruction;
+def G_AMDGPU_FLAT_LOAD_MONITOR   : LoadMonitorInstruction;
+def G_AMDGPU_GLOBAL_LOAD_MONITOR : LoadMonitorInstruction;
 
 //============================================================================//
 // Dummy Instructions
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
index 7c2a78a4676db..f353deab46672 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.monitor.gfx1250.ll
@@ -2,16 +2,16 @@
 ; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
 ; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
 
-declare i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1), i32, metadata)
-declare <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1), i32, metadata)
-declare <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1), i32, metadata)
-declare i32 @llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr, i32, metadata)
-declare <2 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr, i32, metadata)
-declare <4 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr, i32, metadata)
+declare i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1), i32, metadata)
+declare <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1), i32, metadata)
+declare <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1), i32, metadata)
+declare i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr, i32, metadata)
+declare <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr, i32, metadata)
+declare <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr, i32, metadata)
 
 
-define amdgpu_ps void @global_atomic_load_monitor_b32_vaddr_relaxed_sys(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_atomic_load_monitor_b32_vaddr_relaxed_sys:
+define amdgpu_ps void @global_load_monitor_b32_vaddr_relaxed_sys(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b32_vaddr_relaxed_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    global_load_monitor_b32 v0, v[0:1], off offset:32 scope:SCOPE_SYS
@@ -20,13 +20,13 @@ define amdgpu_ps void @global_atomic_load_monitor_b32_vaddr_relaxed_sys(ptr addr
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 0, metadata !0)
+  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 0, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_atomic_load_monitor_b32_saddr_relaxed_sys(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_atomic_load_monitor_b32_saddr_relaxed_sys:
+define amdgpu_ps void @global_load_monitor_b32_saddr_relaxed_sys(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b32_saddr_relaxed_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
@@ -36,13 +36,13 @@ define amdgpu_ps void @global_atomic_load_monitor_b32_saddr_relaxed_sys(ptr addr
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 0, metadata !0)
+  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 0, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_atomic_load_monitor_b64_vaddr_acquire_agent(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_atomic_load_monitor_b64_vaddr_acquire_agent:
+define amdgpu_ps void @global_load_monitor_b64_vaddr_acquire_agent(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b64_vaddr_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    global_load_monitor_b64 v[0:1], v[0:1], off offset:32 scope:SCOPE_DEV
@@ -53,13 +53,13 @@ define amdgpu_ps void @global_atomic_load_monitor_b64_vaddr_acquire_agent(ptr ad
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !1)
+  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_acquire_agent(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_atomic_load_monitor_b64_saddr_acquire_agent:
+define amdgpu_ps void @global_load_monitor_b64_saddr_acquire_agent(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b64_saddr_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
@@ -71,13 +71,13 @@ define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_acquire_agent(ptr ad
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !1)
+  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_atomic_load_monitor_b128_vaddr_seq_cst_workgroup(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_atomic_load_monitor_b128_vaddr_seq_cst_workgroup:
+define amdgpu_ps void @global_load_monitor_b128_vaddr_seq_cst_workgroup(ptr addrspace(1) %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b128_vaddr_seq_cst_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    global_load_monitor_b128 v[4:7], v[0:1], off offset:32
@@ -86,13 +86,13 @@ define amdgpu_ps void @global_atomic_load_monitor_b128_vaddr_seq_cst_workgroup(p
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 5, metadata !2)
+  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 5, metadata !2)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_atomic_load_monitor_b128_saddr_seq_cst_workgroup(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: global_atomic_load_monitor_b128_saddr_seq_cst_workgroup:
+define amdgpu_ps void @global_load_monitor_b128_saddr_seq_cst_workgroup(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: global_load_monitor_b128_saddr_seq_cst_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
@@ -102,13 +102,13 @@ define amdgpu_ps void @global_atomic_load_monitor_b128_saddr_seq_cst_workgroup(p
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i64, ptr addrspace(1) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.global.atomic.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 5, metadata !2)
+  %val = call <4 x i32> @llvm.amdgcn.global.load.monitor.b128.v4i32(ptr addrspace(1) %gep, i32 5, metadata !2)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_atomic_load_monitor_b32_seq_cst_sys(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_atomic_load_monitor_b32_seq_cst_sys:
+define amdgpu_ps void @flat_load_monitor_b32_seq_cst_sys(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_load_monitor_b32_seq_cst_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    flat_load_monitor_b32 v0, v[0:1] offset:32 scope:SCOPE_SYS
@@ -119,13 +119,13 @@ define amdgpu_ps void @flat_atomic_load_monitor_b32_seq_cst_sys(ptr %addr, ptr a
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call i32 @llvm.amdgcn.flat.atomic.load.monitor.b32.i32(ptr addrspace(0) %gep, i32 5, metadata !0)
+  %val = call i32 @llvm.amdgcn.flat.load.monitor.b32.i32(ptr addrspace(0) %gep, i32 5, metadata !0)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_atomic_load_monitor_b64_seq_cst_agent(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_atomic_load_monitor_b64_seq_cst_agent:
+define amdgpu_ps void @flat_load_monitor_b64_seq_cst_agent(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_load_monitor_b64_seq_cst_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    flat_load_monitor_b64 v[0:1], v[0:1] offset:32 scope:SCOPE_DEV
@@ -136,13 +136,13 @@ define amdgpu_ps void @flat_atomic_load_monitor_b64_seq_cst_agent(ptr %addr, ptr
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call <2 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b64.v2i32(ptr addrspace(0) %gep, i32 5, metadata !1)
+  %val = call <2 x i32> @llvm.amdgcn.flat.load.monitor.b64.v2i32(ptr addrspace(0) %gep, i32 5, metadata !1)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @flat_atomic_load_monitor_b128_acquire_sys(ptr %addr, ptr addrspace(1) %use) {
-; GFX1250-LABEL: flat_atomic_load_monitor_b128_acquire_sys:
+define amdgpu_ps void @flat_load_monitor_b128_acquire_sys(ptr %addr, ptr addrspace(1) %use) {
+; GFX1250-LABEL: flat_load_monitor_b128_acquire_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    flat_load_monitor_b128 v[4:7], v[0:1] offset:32 scope:SCOPE_SYS
@@ -153,13 +153,13 @@ define amdgpu_ps void @flat_atomic_load_monitor_b128_acquire_sys(ptr %addr, ptr
 ; GFX1250-NEXT:    s_endpgm
 entry:
   %gep = getelementptr inbounds i64, ptr addrspace(0) %addr, i32 4
-  %val = call <4 x i32> @llvm.amdgcn.flat.atomic.load.monitor.b128.v4i32(ptr addrspace(0) %gep, i32 2, metadata !0)
+  %val = call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr addrspace(0) %gep, i32 2, metadata !0)
   store <4 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_atomic_load_monitor_b32_saddr_scale_offset_acquire_agent(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_atomic_load_monitor_b32_saddr_scale_offset_acquire_agent:
+define amdgpu_ps void @global_load_monitor_b32_saddr_scale_offset_acquire_agent(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_load_monitor_b32_saddr_scale_offset_acquire_agent:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    global_load_monitor_b32 v2, v2, s[0:1] scale_offset scope:SCOPE_DEV
@@ -171,13 +171,13 @@ define amdgpu_ps void @global_atomic_load_monitor_b32_saddr_scale_offset_acquire
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i32, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call i32 @llvm.amdgcn.global.atomic.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 2, metadata !1)
+  %val = call i32 @llvm.amdgcn.global.load.monitor.b32.i32(ptr addrspace(1) %gep, i32 2, metadata !1)
   store i32 %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_scale_offset_acquire_workgroup(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_atomic_load_monitor_b64_saddr_scale_offset_acquire_workgroup:
+define amdgpu_ps void @global_load_monitor_b64_saddr_scale_offset_acquire_workgroup(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_load_monitor_b64_saddr_scale_offset_acquire_workgroup:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    global_load_monitor_b64 v[2:3], v2, s[0:1] scale_offset
@@ -187,13 +187,13 @@ define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_scale_offset_acquire
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i64, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !2)
+  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 2, metadata !2)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }
 
-define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
-; GFX1250-LABEL: global_atomic_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys:
+define amdgpu_ps void @global_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys(ptr addrspace(1) inreg %addr, ptr addrspace(1) %use, i32 %idx) {
+; GFX1250-LABEL: global_load_monitor_b64_saddr_no_scale_offset_seq_cst_sys:
 ; GFX1250:       ; %bb.0: ; %entry
 ; GFX1250-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
 ; GFX1250-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
@@ -208,7 +208,7 @@ define amdgpu_ps void @global_atomic_load_monitor_b64_saddr_no_scale_offset_seq_
 entry:
   %idxprom = sext i32 %idx to i64
   %gep = getelementptr i32, ptr addrspace(1) %addr, i64 %idxprom
-  %val = call <2 x i32> @llvm.amdgcn.global.atomic.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 5, metadata !0)
+  %val = call <2 x i32> @llvm.amdgcn.global.load.monitor.b64.v2i32(ptr addrspace(1) %gep, i32 5, metadata !0)
   store <2 x i32> %val, ptr addrspace(1) %use
   ret void
 }

>From 087495fa8bd74f082a6239b2c5792b55a916ae75 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Mon, 26 Jan 2026 11:05:54 +0100
Subject: [PATCH 03/10] Use macros for syncscope instead

---
 clang/include/clang/Basic/BuiltinsAMDGPU.td   | 12 ++---
 clang/include/clang/Sema/SemaAMDGPU.h         |  4 --
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   | 49 +++++++++++++++---
 clang/lib/Sema/SemaAMDGPU.cpp                 | 31 ++++++-----
 .../builtins-amdgcn-gfx1250-load-monitor.cl   | 12 ++---
 .../builtins-amdgcn-error-gfx1250-param.cl    | 51 +++++++++++--------
 6 files changed, 103 insertions(+), 56 deletions(-)

diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index d087d0745fb42..47b545ee4aac4 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -711,12 +711,12 @@ def __builtin_amdgcn_s_cluster_barrier : AMDGPUBuiltin<"void()", [], "gfx1250-in
 def __builtin_amdgcn_flat_prefetch : AMDGPUBuiltin<"void(void const address_space<0> *, _Constant int)", [Const], "vmem-pref-insts">;
 def __builtin_amdgcn_global_prefetch : AMDGPUBuiltin<"void(void const address_space<1> *, _Constant int)", [Const], "vmem-pref-insts">;
 
-def __builtin_amdgcn_global_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<1> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<1> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<0> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<0> *>, _Constant int, char const *)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<1> *>, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<1> *>, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<0> *>, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<0> *>, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
 def __builtin_amdgcn_cluster_load_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
diff --git a/clang/include/clang/Sema/SemaAMDGPU.h b/clang/include/clang/Sema/SemaAMDGPU.h
index eb6e73dd7322f..2b0761936daf9 100644
--- a/clang/include/clang/Sema/SemaAMDGPU.h
+++ b/clang/include/clang/Sema/SemaAMDGPU.h
@@ -26,10 +26,6 @@ class SemaAMDGPU : public SemaBase {
 
   bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 
-  /// Emits a diagnostic if the arg at \p ArgIdx of \p TheCall is not a string
-  /// literal. \returns true if a diagnostic was emitted.
-  bool checkStringLiteralArg(CallExpr *TheCall, unsigned ArgIdx);
-
   /// Emits a diagnostic if the arg at \p ArgIdx of \p TheCall is not atomic
   /// ordering encoded in the C ABI format, or if the atomic ordering is not
   /// valid for the operation type as defined by \p MayLoad and \p MayStore.
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index f81b432a0510c..fb498ac139fcf 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -366,11 +366,38 @@ void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
   Inst->setMetadata(LLVMContext::MD_mmra, MMRAMetadata::getMD(Ctx, MMRAs));
 }
 
-static llvm::MetadataAsValue *getStringAsMDValue(llvm::LLVMContext &Ctx,
-                                                 const clang::Expr *E) {
-  StringRef Arg =
-      cast<clang::StringLiteral>(E->IgnoreParenCasts())->getString();
-  llvm::MDNode *MD = llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, Arg)});
+static llvm::MetadataAsValue *getSyncscopeIDAsMDString(llvm::LLVMContext &Ctx,
+                                                       clang::SyncScope Scope) {
+  StringRef Name;
+  switch (Scope) {
+  case clang::SyncScope::HIPSingleThread:
+  case clang::SyncScope::SingleScope:
+    Name = "singlethread";
+    break;
+  case clang::SyncScope::HIPWavefront:
+  case clang::SyncScope::OpenCLSubGroup:
+  case clang::SyncScope::WavefrontScope:
+    Name = "wavefront";
+    break;
+  case clang::SyncScope::HIPCluster:
+  case clang::SyncScope::ClusterScope:
+  case clang::SyncScope::HIPWorkgroup:
+  case clang::SyncScope::OpenCLWorkGroup:
+  case clang::SyncScope::WorkgroupScope:
+    Name = "workgroup";
+    break;
+  case clang::SyncScope::HIPAgent:
+  case clang::SyncScope::OpenCLDevice:
+  case clang::SyncScope::DeviceScope:
+    Name = "agent";
+    break;
+  case clang::SyncScope::SystemScope:
+  case clang::SyncScope::HIPSystem:
+  case clang::SyncScope::OpenCLAllSVMDevices:
+    Name = "";
+    break;
+  }
+  llvm::MDNode *MD = llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, Name)});
   return llvm::MetadataAsValue::get(Ctx, MD);
 }
 
@@ -830,9 +857,12 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     llvm::Type *LoadTy = ConvertType(E->getType());
     llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
     llvm::Value *AO = EmitScalarExpr(E->getArg(1));
-    llvm::Value *Scope = getStringAsMDValue(Ctx, E->getArg(2));
+
+    auto Scope = dyn_cast<llvm::ConstantInt>(EmitScalarExpr(E->getArg(2)));
+    llvm::Value *ScopeMD = getSyncscopeIDAsMDString(
+        Ctx, static_cast<clang::SyncScope>(Scope->getZExtValue()));
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
-    return Builder.CreateCall(F, {Addr, AO, Scope});
+    return Builder.CreateCall(F, {Addr, AO, ScopeMD});
   }
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b32:
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b64:
@@ -894,7 +924,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     const unsigned ScopeArg = E->getNumArgs() - 1;
     for (unsigned i = 0; i != ScopeArg; ++i)
       Args.push_back(EmitScalarExpr(E->getArg(i)));
-    Args.push_back(getStringAsMDValue(Ctx, E->getArg(ScopeArg)));
+    StringRef Arg = cast<StringLiteral>(E->getArg(ScopeArg)->IgnoreParenCasts())
+                        ->getString();
+    llvm::MDNode *MD = llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, Arg)});
+    Args.push_back(llvm::MetadataAsValue::get(Ctx, MD));
     // Intrinsic is typed based on the pointer AS. Pointer is always the first
     // argument.
     llvm::Function *F = CGM.getIntrinsic(IID, {Args[0]->getType()});
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index 21b4bbfc5b162..9e12f19860dbc 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -371,16 +371,6 @@ bool SemaAMDGPU::checkAtomicOrderingCABIArg(CallExpr *TheCall, unsigned ArgIdx,
   return false;
 }
 
-bool SemaAMDGPU::checkStringLiteralArg(CallExpr *TheCall, unsigned ArgIdx) {
-  Expr *Arg = TheCall->getArg(TheCall->getNumArgs() - 1);
-  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
-    Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
-        << Arg->getSourceRange();
-    return true;
-  }
-  return false;
-}
-
 bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
   bool Fail = false;
 
@@ -398,8 +388,14 @@ bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
   // Check atomic ordering
   Fail |= checkAtomicOrderingCABIArg(
       TheCall, IsStore ? 2 : 1, /*MayLoad=*/!IsStore, /*MayStore=*/IsStore);
+
   // Last argument is the syncscope as a string literal.
-  Fail |= checkStringLiteralArg(TheCall, TheCall->getNumArgs() - 1);
+  Expr *Arg = TheCall->getArg(TheCall->getNumArgs() - 1);
+  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
+    Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
+        << Arg->getSourceRange();
+    Fail = true;
+  }
 
   return Fail;
 }
@@ -408,7 +404,18 @@ bool SemaAMDGPU::checkAtomicMonitorLoad(CallExpr *TheCall) {
   bool Fail = false;
   Fail |= checkAtomicOrderingCABIArg(TheCall, 1, /*MayLoad=*/true,
                                      /*MayStore=*/false);
-  Fail |= checkStringLiteralArg(TheCall, 2);
+
+  auto ScopeModel = AtomicScopeModel::create(AtomicScopeModelKind::Generic);
+  auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
+  if (std::optional<llvm::APSInt> Result =
+          Scope->getIntegerConstantExpr(SemaRef.Context)) {
+    if (!ScopeModel->isValid(Result->getZExtValue())) {
+      Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_sync_scope)
+          << Scope->getSourceRange();
+      Fail = true;
+    }
+  }
+
   return Fail;
 }
 
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
index 49a74a8e16c6f..8ecd6ba61a03e 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250-load-monitor.cl
@@ -12,7 +12,7 @@ typedef int    v4i   __attribute__((ext_vector_type(4)));
 //
 int test_amdgcn_global_load_monitor_b32(global int* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b32(inptr, __ATOMIC_RELAXED, "");
+  return __builtin_amdgcn_global_load_monitor_b32(inptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b64(
@@ -22,7 +22,7 @@ int test_amdgcn_global_load_monitor_b32(global int* inptr)
 //
 v2i test_amdgcn_global_load_monitor_b64(global v2i* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b64(inptr, __ATOMIC_ACQUIRE, "agent");
+  return __builtin_amdgcn_global_load_monitor_b64(inptr, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_DEVICE);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_global_load_monitor_b128(
@@ -32,7 +32,7 @@ v2i test_amdgcn_global_load_monitor_b64(global v2i* inptr)
 //
 v4i test_amdgcn_global_load_monitor_b128(global v4i* inptr)
 {
-  return __builtin_amdgcn_global_load_monitor_b128(inptr, __ATOMIC_ACQUIRE, "workgroup");
+  return __builtin_amdgcn_global_load_monitor_b128(inptr, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_WRKGRP);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b32(
@@ -42,7 +42,7 @@ v4i test_amdgcn_global_load_monitor_b128(global v4i* inptr)
 //
 int test_amdgcn_flat_load_monitor_b32(int* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b32(inptr, __ATOMIC_RELAXED, "");
+  return __builtin_amdgcn_flat_load_monitor_b32(inptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b64(
@@ -52,7 +52,7 @@ int test_amdgcn_flat_load_monitor_b32(int* inptr)
 //
 v2i test_amdgcn_flat_load_monitor_b64(v2i* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b64(inptr, __ATOMIC_SEQ_CST, "cluster");
+  return __builtin_amdgcn_flat_load_monitor_b64(inptr, __ATOMIC_SEQ_CST, __MEMORY_SCOPE_CLUSTR);
 }
 
 // CHECK-GFX1250-LABEL: @test_amdgcn_flat_load_monitor_b128(
@@ -62,5 +62,5 @@ v2i test_amdgcn_flat_load_monitor_b64(v2i* inptr)
 //
 v4i test_amdgcn_flat_load_monitor_b128(v4i* inptr)
 {
-  return __builtin_amdgcn_flat_load_monitor_b128(inptr, __ATOMIC_RELAXED, "");
+  return __builtin_amdgcn_flat_load_monitor_b128(inptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
 }
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index 5903060797bd5..8ab4f43d70c40 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -97,34 +97,45 @@ void test_cvt_scale_pk(global half8 *outh8, global bfloat8 *outy8, uint2 src2,
 void test_amdgcn_load_monitor_ao_constant(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
                               global int* b32out, global v2i* b64out, global v4i* b128out, int ao)
 {
-  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant integer}}
-  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant integer}}
-  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, ao, ""); // expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant integer}}
-  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant integer}}
-  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant integer}}
-  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, ao, ""); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, ao, __MEMORY_SCOPE_SYSTEM); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
 }
 
 void test_amdgcn_load_monitor_ao_valid(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
                               global int* b32out, global v2i* b64out, global v4i* b128out)
 {
-  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_ACQ_REL, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
-  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELEASE, ""); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_ACQ_REL, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_ACQ_REL, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_ACQ_REL, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM); // expected-warning {{memory order argument to atomic operation is invalid}}
 }
 
-void test_amdgcn_load_monitor_scope_literal(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
-                              global int* b32out, global v2i* b64out, global v4i* b128out, const char* scope)
+void test_amdgcn_load_monitor_scope_constant(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out, int sc)
 {
-  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
-  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELAXED, scope); // expected-error {{expression is not a string literal}}
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_global_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_global_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_global_load_monitor_b128' must be a constant integer}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b32' must be a constant integer}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b64' must be a constant integer}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELAXED, sc); // expected-error {{'__builtin_amdgcn_flat_load_monitor_b128' must be a constant integer}}
+}
+
+void test_amdgcn_load_monitor_scope_valid(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
+                              global int* b32out, global v2i* b64out, global v4i* b128out)
+{
+  *b32out  = __builtin_amdgcn_global_load_monitor_b32(b32gaddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_global_load_monitor_b64(b64gaddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_global_load_monitor_b128(b128gaddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b32out  = __builtin_amdgcn_flat_load_monitor_b32(b32faddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b64out  = __builtin_amdgcn_flat_load_monitor_b64(b64faddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
+  *b128out = __builtin_amdgcn_flat_load_monitor_b128(b128faddr, __ATOMIC_RELAXED, 42); // expected-error {{synchronization scope argument to atomic operation is invalid}}
 }
 
 void test_amdgcn_cluster_load(global int* addr32, global v2i* addr64, global v4i* addr128, global int* b32out, global v2i* b64out, global v4i* b128out, int cpol, int mask)

>From 96cf7d99d79f9faf300ca36663ea524270e497df Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Fri, 30 Jan 2026 11:15:03 +0100
Subject: [PATCH 04/10] Address comments

---
 clang/include/clang/Sema/SemaAMDGPU.h         | 11 ++--
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   | 29 +++++------
 clang/lib/Sema/SemaAMDGPU.cpp                 | 38 +++++++++-----
 ...-gfx1250-cooperative-atomics-templated.hip | 51 +++++++++++++++++++
 ...-amdgcn-gfx1250-load-monitor-templated.hip | 25 +++++++++
 ...-gfx1250-cooperative-atomics-templated.hip | 32 ++++++++++++
 ...-amdgcn-gfx1250-load-monitor-templated.hip | 26 ++++++++++
 7 files changed, 175 insertions(+), 37 deletions(-)
 create mode 100644 clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
 create mode 100644 clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
 create mode 100644 clang/test/SemaHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
 create mode 100644 clang/test/SemaHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip

diff --git a/clang/include/clang/Sema/SemaAMDGPU.h b/clang/include/clang/Sema/SemaAMDGPU.h
index 2b0761936daf9..e080ccd008863 100644
--- a/clang/include/clang/Sema/SemaAMDGPU.h
+++ b/clang/include/clang/Sema/SemaAMDGPU.h
@@ -26,12 +26,11 @@ class SemaAMDGPU : public SemaBase {
 
   bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
 
-  /// Emits a diagnostic if the arg at \p ArgIdx of \p TheCall is not atomic
-  /// ordering encoded in the C ABI format, or if the atomic ordering is not
-  /// valid for the operation type as defined by \p MayLoad and \p MayStore.
-  /// \returns true if a diagnostic was emitted.
-  bool checkAtomicOrderingCABIArg(CallExpr *TheCall, unsigned ArgIdx,
-                                  bool MayLoad, bool MayStore);
+  /// Emits a diagnostic if the \p E is not an atomic ordering encoded in the C
+  /// ABI format, or if the atomic ordering is not valid for the operation type
+  /// as defined by \p MayLoad and \p MayStore. \returns true if a diagnostic
+  /// was emitted.
+  bool checkAtomicOrderingCABIArg(Expr *E, bool MayLoad, bool MayStore);
 
   bool checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore);
   bool checkAtomicMonitorLoad(CallExpr *TheCall);
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index fb498ac139fcf..d9e16bdede6bf 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -366,39 +366,31 @@ void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
   Inst->setMetadata(LLVMContext::MD_mmra, MMRAMetadata::getMD(Ctx, MMRAs));
 }
 
-static llvm::MetadataAsValue *getSyncscopeIDAsMDString(llvm::LLVMContext &Ctx,
-                                                       clang::SyncScope Scope) {
-  StringRef Name;
+static StringRef getSyncscopeIDAsString(llvm::LLVMContext &Ctx,
+                                        clang::SyncScope Scope) {
   switch (Scope) {
   case clang::SyncScope::HIPSingleThread:
   case clang::SyncScope::SingleScope:
-    Name = "singlethread";
-    break;
+    return "singlethread";
   case clang::SyncScope::HIPWavefront:
   case clang::SyncScope::OpenCLSubGroup:
   case clang::SyncScope::WavefrontScope:
-    Name = "wavefront";
-    break;
+    return "wavefront";
   case clang::SyncScope::HIPCluster:
   case clang::SyncScope::ClusterScope:
   case clang::SyncScope::HIPWorkgroup:
   case clang::SyncScope::OpenCLWorkGroup:
   case clang::SyncScope::WorkgroupScope:
-    Name = "workgroup";
-    break;
+    return "workgroup";
   case clang::SyncScope::HIPAgent:
   case clang::SyncScope::OpenCLDevice:
   case clang::SyncScope::DeviceScope:
-    Name = "agent";
-    break;
+    return "agent";
   case clang::SyncScope::SystemScope:
   case clang::SyncScope::HIPSystem:
   case clang::SyncScope::OpenCLAllSVMDevices:
-    Name = "";
-    break;
+    return "";
   }
-  llvm::MDNode *MD = llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, Name)});
-  return llvm::MetadataAsValue::get(Ctx, MD);
 }
 
 static Intrinsic::ID getIntrinsicIDforWaveReduction(unsigned BuiltinID) {
@@ -858,9 +850,12 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
     llvm::Value *AO = EmitScalarExpr(E->getArg(1));
 
-    auto Scope = dyn_cast<llvm::ConstantInt>(EmitScalarExpr(E->getArg(2)));
-    llvm::Value *ScopeMD = getSyncscopeIDAsMDString(
+    auto *Scope = dyn_cast<llvm::ConstantInt>(EmitScalarExpr(E->getArg(2)));
+    StringRef ScopeStr = getSyncscopeIDAsString(
         Ctx, static_cast<clang::SyncScope>(Scope->getZExtValue()));
+    llvm::MDNode *MD =
+        llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, ScopeStr)});
+    llvm::Value *ScopeMD = llvm::MetadataAsValue::get(Ctx, MD);
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
     return Builder.CreateCall(F, {Addr, AO, ScopeMD});
   }
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index 9e12f19860dbc..e1ab5c1d71801 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -348,11 +348,10 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
   return false;
 }
 
-bool SemaAMDGPU::checkAtomicOrderingCABIArg(CallExpr *TheCall, unsigned ArgIdx,
-                                            bool MayLoad, bool MayStore) {
-  Expr *AtomicOrdArg = TheCall->getArg(ArgIdx);
+bool SemaAMDGPU::checkAtomicOrderingCABIArg(Expr *E, bool MayLoad,
+                                            bool MayStore) {
   Expr::EvalResult AtomicOrdArgRes;
-  if (!AtomicOrdArg->EvaluateAsInt(AtomicOrdArgRes, getASTContext()))
+  if (!E->EvaluateAsInt(AtomicOrdArgRes, getASTContext()))
     llvm_unreachable("Intrinsic requires imm for atomic ordering argument!");
   auto Ord =
       llvm::AtomicOrderingCABI(AtomicOrdArgRes.Val.getInt().getZExtValue());
@@ -363,9 +362,8 @@ bool SemaAMDGPU::checkAtomicOrderingCABIArg(CallExpr *TheCall, unsigned ArgIdx,
       (!(MayLoad && MayStore) && (Ord == llvm::AtomicOrderingCABI::acq_rel)) ||
       (!MayLoad && Ord == llvm::AtomicOrderingCABI::acquire) ||
       (!MayStore && Ord == llvm::AtomicOrderingCABI::release)) {
-    return Diag(AtomicOrdArg->getBeginLoc(),
-                diag::warn_atomic_op_has_invalid_memory_order)
-           << 0 << AtomicOrdArg->getSourceRange();
+    return Diag(E->getBeginLoc(), diag::warn_atomic_op_has_invalid_memory_order)
+           << 0 << E->getSourceRange();
   }
 
   return false;
@@ -385,15 +383,21 @@ bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
         << PtrArg->getSourceRange();
   }
 
+  Expr *AO = TheCall->getArg(IsStore ? 2 : 1);
+  Expr *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
+
+  if (AO->isValueDependent() || Scope->isValueDependent())
+    return false;
+
   // Check atomic ordering
-  Fail |= checkAtomicOrderingCABIArg(
-      TheCall, IsStore ? 2 : 1, /*MayLoad=*/!IsStore, /*MayStore=*/IsStore);
+  Fail |=
+      checkAtomicOrderingCABIArg(TheCall->getArg(IsStore ? 2 : 1),
+                                 /*MayLoad=*/!IsStore, /*MayStore=*/IsStore);
 
   // Last argument is the syncscope as a string literal.
-  Expr *Arg = TheCall->getArg(TheCall->getNumArgs() - 1);
-  if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) {
+  if (!isa<StringLiteral>(Scope->IgnoreParenImpCasts())) {
     Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
-        << Arg->getSourceRange();
+        << Scope->getSourceRange();
     Fail = true;
   }
 
@@ -402,11 +406,17 @@ bool SemaAMDGPU::checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore) {
 
 bool SemaAMDGPU::checkAtomicMonitorLoad(CallExpr *TheCall) {
   bool Fail = false;
-  Fail |= checkAtomicOrderingCABIArg(TheCall, 1, /*MayLoad=*/true,
+
+  Expr *AO = TheCall->getArg(1);
+  auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
+
+  if (AO->isValueDependent() || Scope->isValueDependent())
+    return false;
+
+  Fail |= checkAtomicOrderingCABIArg(TheCall->getArg(1), /*MayLoad=*/true,
                                      /*MayStore=*/false);
 
   auto ScopeModel = AtomicScopeModel::create(AtomicScopeModelKind::Generic);
-  auto *Scope = TheCall->getArg(TheCall->getNumArgs() - 1);
   if (std::optional<llvm::APSInt> Result =
           Scope->getIntegerConstantExpr(SemaRef.Context)) {
     if (!ScopeModel->isValid(Result->getZExtValue())) {
diff --git a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
new file mode 100644
index 0000000000000..e2d41f86c2ff6
--- /dev/null
+++ b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
@@ -0,0 +1,51 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -O3 -triple amdgcn-unknown-unknown -target-cpu gfx1250 -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -O3 -triple amdgcn-unknown-unknown -target-cpu gfx1251 -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
+
+#define __device__ __attribute__((device))
+
+typedef int    v2i   __attribute__((ext_vector_type(2)));
+typedef int    v4i   __attribute__((ext_vector_type(4)));
+
+template<unsigned AO>
+__device__ void template_cooperative_atomic_store_32x4B(int* gaddr, int val) {
+  __builtin_amdgcn_cooperative_atomic_store_32x4B(gaddr, val, AO, "agent");
+}
+
+// CHECK-LABEL: define dso_local void @_Z42test_amdgcn_cooperative_atomic_store_32x4BPii(
+// CHECK-SAME: ptr noundef writeonly captures(none) [[GADDR:%.*]], i32 noundef [[VAL:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    tail call void @llvm.amdgcn.cooperative.atomic.store.32x4B.p0(ptr [[GADDR]], i32 [[VAL]], i32 5, metadata [[META8:![0-9]+]])
+// CHECK-NEXT:    ret void
+//
+__device__ void test_amdgcn_cooperative_atomic_store_32x4B(int* gaddr, int val)
+{
+  template_cooperative_atomic_store_32x4B<__ATOMIC_SEQ_CST>(gaddr, val);
+}
+
+template<unsigned AO>
+__device__ int template_cooperative_atomic_load_32x4B(int* gaddr) {
+  return __builtin_amdgcn_cooperative_atomic_load_32x4B(gaddr, AO, "");
+}
+
+// CHECK-LABEL: define dso_local void @_Z41test_amdgcn_cooperative_atomic_load_32x4BPiS_(
+// CHECK-SAME: ptr noundef readonly captures(none) [[ADDR:%.*]], ptr noundef writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call noundef i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr [[ADDR]], i32 5, metadata [[META9:![0-9]+]])
+// CHECK-NEXT:    store i32 [[TMP0]], ptr [[OUT]], align 4, !tbaa [[TBAA4:![0-9]+]]
+// CHECK-NEXT:    ret void
+//
+__device__ void test_amdgcn_cooperative_atomic_load_32x4B(int* addr, int *out)
+{
+  *out = template_cooperative_atomic_load_32x4B<__ATOMIC_SEQ_CST>(addr);
+}
+//.
+// CHECK: [[TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0}
+// CHECK: [[META5]] = !{!"int", [[META6:![0-9]+]], i64 0}
+// CHECK: [[META6]] = !{!"omnipotent char", [[META7:![0-9]+]], i64 0}
+// CHECK: [[META7]] = !{!"Simple C++ TBAA"}
+// CHECK: [[META8]] = !{!"agent"}
+// CHECK: [[META9]] = !{!""}
+//.
diff --git a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
new file mode 100644
index 0000000000000..86a3d0a39cab2
--- /dev/null
+++ b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
@@ -0,0 +1,25 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -O3 -target-cpu gfx1250 -emit-llvm -fcuda-is-device -o - %s | FileCheck %s
+
+#define __device__ __attribute__((device))
+
+typedef int    v4i   __attribute__((ext_vector_type(4)));
+
+template<unsigned AO, unsigned Scope>
+__device__ v4i templated_amdgcn_flat_load_monitor_b128(v4i* inptr)
+{
+  return __builtin_amdgcn_flat_load_monitor_b128(inptr, AO, Scope);
+}
+
+
+// CHECK-LABEL: @_Z48test_amdgcn_flat_load_monitor_b128_from_templatePDv4_iS0_(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = tail call noundef <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 5, metadata [[META8:![0-9]+]])
+// CHECK-NEXT:    store <4 x i32> [[TMP0]], ptr [[OUT:%.*]], align 16, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-NEXT:    ret void
+//
+__device__ void test_amdgcn_flat_load_monitor_b128_from_template(v4i* inptr, v4i *out)
+{
+  *out = templated_amdgcn_flat_load_monitor_b128<__ATOMIC_SEQ_CST, __MEMORY_SCOPE_SYSTEM>(inptr);
+}
diff --git a/clang/test/SemaHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip b/clang/test/SemaHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
new file mode 100644
index 0000000000000..b57d059d7767e
--- /dev/null
+++ b/clang/test/SemaHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
@@ -0,0 +1,32 @@
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -fsyntax-only -fcuda-is-device -verify %s
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1251 -fsyntax-only -fcuda-is-device -verify %s
+
+#define __device__ __attribute__((device))
+
+typedef int    v2i   __attribute__((ext_vector_type(2)));
+typedef int    v4i   __attribute__((ext_vector_type(4)));
+
+template<unsigned AO>
+__device__ void template_cooperative_atomic_store_32x4B(int* gaddr, int val) {
+  // expected-warning at +1 {{memory order argument to atomic operation is invalid}}
+  __builtin_amdgcn_cooperative_atomic_store_32x4B(gaddr, val, AO, "agent");
+}
+
+__device__ void test_amdgcn_cooperative_atomic_store_32x4B_invalid_ao(int* gaddr, int val)
+{
+  // expected-note at +1 {{in instantiation of function template specialization 'template_cooperative_atomic_store_32x4B<42U>' requested here}}
+  template_cooperative_atomic_store_32x4B<42>(gaddr, val);
+}
+
+template<unsigned AO>
+__device__ int template_cooperative_atomic_load_32x4B(int* gaddr) {
+  // expected-warning at +1 {{memory order argument to atomic operation is invalid}}
+  return __builtin_amdgcn_cooperative_atomic_load_32x4B(gaddr, AO, "");
+}
+
+__device__ void test_amdgcn_cooperative_atomic_load_32x4B_invalid_ao(int* addr, int *out)
+{
+  // expected-note at +1 {{in instantiation of function template specialization 'template_cooperative_atomic_load_32x4B<42U>' requested here}}
+  *out = template_cooperative_atomic_load_32x4B<42>(addr);
+}
diff --git a/clang/test/SemaHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip b/clang/test/SemaHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
new file mode 100644
index 0000000000000..a47e9a7ef2822
--- /dev/null
+++ b/clang/test/SemaHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
@@ -0,0 +1,26 @@
+// REQUIRES: amdgpu-registered-target
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -fsyntax-only -fcuda-is-device -verify %s
+
+#define __device__ __attribute__((device))
+
+typedef int    v4i   __attribute__((ext_vector_type(4)));
+
+template<unsigned AO, unsigned Scope>
+__device__ v4i templated_amdgcn_flat_load_monitor_b128(v4i* inptr)
+{
+  // expected-error at +2 {{synchronization scope argument to atomic operation is invalid}}
+  // expected-warning at +1 {{memory order argument to atomic operation is invalid}}
+  return __builtin_amdgcn_flat_load_monitor_b128(inptr, AO, Scope);
+}
+
+__device__ void test_amdgcn_flat_load_monitor_b128_invalid_ao_from_template(v4i* inptr, v4i *out)
+{
+  // expected-note at +1 {{in instantiation of function template specialization 'templated_amdgcn_flat_load_monitor_b128<42U, 0U>' requested here}}
+  *out = templated_amdgcn_flat_load_monitor_b128<42, __MEMORY_SCOPE_SYSTEM>(inptr);
+}
+
+__device__ void test_amdgcn_flat_load_monitor_b128_invalid_sc_from_template(v4i* inptr, v4i *out)
+{
+  // expected-note at +1 {{in instantiation of function template specialization 'templated_amdgcn_flat_load_monitor_b128<5U, 42U>' requested here}}
+  *out = templated_amdgcn_flat_load_monitor_b128<__ATOMIC_SEQ_CST, 42>(inptr);
+}

>From 68747f012d3b8c2a6ad67d86f33faceccb6ab6e1 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Mon, 2 Feb 2026 10:12:56 +0100
Subject: [PATCH 05/10] Address comments

---
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   | 84 +++++++-----------
 clang/lib/CodeGen/TargetInfo.cpp              | 10 ++-
 clang/lib/CodeGen/TargetInfo.h                | 15 ++--
 clang/lib/CodeGen/Targets/AMDGPU.cpp          | 16 ++--
 clang/lib/CodeGen/Targets/SPIR.cpp            | 64 ++++++--------
 ...-gfx1250-cooperative-atomics-templated.hip | 86 ++++++++++++++-----
 ...-amdgcn-gfx1250-load-monitor-templated.hip | 43 ++++++++--
 7 files changed, 184 insertions(+), 134 deletions(-)

diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index d9e16bdede6bf..55b1db5bbc5a2 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -12,6 +12,7 @@
 
 #include "CGBuiltin.h"
 #include "CodeGenFunction.h"
+#include "TargetInfo.h"
 #include "clang/Basic/DiagnosticFrontend.h"
 #include "clang/Basic/SyncScope.h"
 #include "clang/Basic/TargetBuiltins.h"
@@ -21,6 +22,7 @@
 #include "llvm/IR/IntrinsicsR600.h"
 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
 #include "llvm/Support/AMDGPUAddrSpace.h"
+#include "llvm/Support/AtomicOrdering.h"
 
 using namespace clang;
 using namespace CodeGen;
@@ -272,6 +274,26 @@ static inline StringRef mapScopeToSPIRV(StringRef AMDGCNScope) {
   return AMDGCNScope;
 }
 
+static llvm::AtomicOrdering mapCABIAtomicOrdering(unsigned AO) {
+  // Map C11/C++11 memory ordering to LLVM memory ordering
+  assert(llvm::isValidAtomicOrderingCABI(AO));
+  switch (static_cast<llvm::AtomicOrderingCABI>(AO)) {
+  case llvm::AtomicOrderingCABI::acquire:
+  case llvm::AtomicOrderingCABI::consume:
+    return llvm::AtomicOrdering::Acquire;
+    break;
+  case llvm::AtomicOrderingCABI::release:
+    return llvm::AtomicOrdering::Release;
+  case llvm::AtomicOrderingCABI::acq_rel:
+    return llvm::AtomicOrdering::AcquireRelease;
+  case llvm::AtomicOrderingCABI::seq_cst:
+    return llvm::AtomicOrdering::SequentiallyConsistent;
+  case llvm::AtomicOrderingCABI::relaxed:
+    return llvm::AtomicOrdering::Monotonic;
+  }
+  llvm_unreachable("unknown CABI Ordering");
+}
+
 // For processing memory ordering and memory scope arguments of various
 // amdgcn builtins.
 // \p Order takes a C++11 compatible memory-ordering specifier and converts
@@ -284,25 +306,7 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
   int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
 
   // Map C11/C++11 memory ordering to LLVM memory ordering
-  assert(llvm::isValidAtomicOrderingCABI(ord));
-  switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
-  case llvm::AtomicOrderingCABI::acquire:
-  case llvm::AtomicOrderingCABI::consume:
-    AO = llvm::AtomicOrdering::Acquire;
-    break;
-  case llvm::AtomicOrderingCABI::release:
-    AO = llvm::AtomicOrdering::Release;
-    break;
-  case llvm::AtomicOrderingCABI::acq_rel:
-    AO = llvm::AtomicOrdering::AcquireRelease;
-    break;
-  case llvm::AtomicOrderingCABI::seq_cst:
-    AO = llvm::AtomicOrdering::SequentiallyConsistent;
-    break;
-  case llvm::AtomicOrderingCABI::relaxed:
-    AO = llvm::AtomicOrdering::Monotonic;
-    break;
-  }
+  AO = mapCABIAtomicOrdering(ord);
 
   // Some of the atomic builtins take the scope as a string name.
   StringRef scp;
@@ -366,33 +370,6 @@ void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
   Inst->setMetadata(LLVMContext::MD_mmra, MMRAMetadata::getMD(Ctx, MMRAs));
 }
 
-static StringRef getSyncscopeIDAsString(llvm::LLVMContext &Ctx,
-                                        clang::SyncScope Scope) {
-  switch (Scope) {
-  case clang::SyncScope::HIPSingleThread:
-  case clang::SyncScope::SingleScope:
-    return "singlethread";
-  case clang::SyncScope::HIPWavefront:
-  case clang::SyncScope::OpenCLSubGroup:
-  case clang::SyncScope::WavefrontScope:
-    return "wavefront";
-  case clang::SyncScope::HIPCluster:
-  case clang::SyncScope::ClusterScope:
-  case clang::SyncScope::HIPWorkgroup:
-  case clang::SyncScope::OpenCLWorkGroup:
-  case clang::SyncScope::WorkgroupScope:
-    return "workgroup";
-  case clang::SyncScope::HIPAgent:
-  case clang::SyncScope::OpenCLDevice:
-  case clang::SyncScope::DeviceScope:
-    return "agent";
-  case clang::SyncScope::SystemScope:
-  case clang::SyncScope::HIPSystem:
-  case clang::SyncScope::OpenCLAllSVMDevices:
-    return "";
-  }
-}
-
 static Intrinsic::ID getIntrinsicIDforWaveReduction(unsigned BuiltinID) {
   switch (BuiltinID) {
   default:
@@ -848,16 +825,21 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     LLVMContext &Ctx = CGM.getLLVMContext();
     llvm::Type *LoadTy = ConvertType(E->getType());
     llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
-    llvm::Value *AO = EmitScalarExpr(E->getArg(1));
 
-    auto *Scope = dyn_cast<llvm::ConstantInt>(EmitScalarExpr(E->getArg(2)));
-    StringRef ScopeStr = getSyncscopeIDAsString(
-        Ctx, static_cast<clang::SyncScope>(Scope->getZExtValue()));
+    auto *AOExpr = cast<llvm::ConstantInt>(EmitScalarExpr(E->getArg(1)));
+    auto *ScopeExpr = cast<llvm::ConstantInt>(EmitScalarExpr(E->getArg(2)));
+
+    auto Scope = static_cast<SyncScope>(ScopeExpr->getZExtValue());
+    llvm::AtomicOrdering AO = mapCABIAtomicOrdering(AOExpr->getZExtValue());
+
+    std::string ScopeStr = CGM.getTargetCodeGenInfo().getLLVMSyncScopeStr(
+        CGM.getLangOpts(), Scope, AO);
+
     llvm::MDNode *MD =
         llvm::MDNode::get(Ctx, {llvm::MDString::get(Ctx, ScopeStr)});
     llvm::Value *ScopeMD = llvm::MetadataAsValue::get(Ctx, MD);
     llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
-    return Builder.CreateCall(F, {Addr, AO, ScopeMD});
+    return Builder.CreateCall(F, {Addr, AOExpr, ScopeMD});
   }
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b32:
   case AMDGPU::BI__builtin_amdgcn_cluster_load_b64:
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 51d3ad384f934..187355023030b 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -148,12 +148,20 @@ LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
   return D ? D->getType().getAddressSpace() : LangAS::Default;
 }
 
+std::string
+TargetCodeGenInfo::getLLVMSyncScopeStr(const LangOptions &LangOpts,
+                                       SyncScope Scope,
+                                       llvm::AtomicOrdering Ordering) const {
+  return ""; /* default sync scope */
+}
+
 llvm::SyncScope::ID
 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
                                       SyncScope Scope,
                                       llvm::AtomicOrdering Ordering,
                                       llvm::LLVMContext &Ctx) const {
-  return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
+  return Ctx.getOrInsertSyncScopeID(
+      getLLVMSyncScopeStr(LangOpts, Scope, Ordering));
 }
 
 void TargetCodeGenInfo::addStackProbeTargetAttributes(
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index 6394ef4cb0180..1d34eeb98582d 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -326,11 +326,16 @@ class TargetCodeGenInfo {
     return LangAS::Default;
   }
 
-  /// Get the syncscope used in LLVM IR.
-  virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
-                                                 SyncScope Scope,
-                                                 llvm::AtomicOrdering Ordering,
-                                                 llvm::LLVMContext &Ctx) const;
+  /// Get the syncscope used in LLVM IR as a string
+  virtual std::string getLLVMSyncScopeStr(const LangOptions &LangOpts,
+                                          SyncScope Scope,
+                                          llvm::AtomicOrdering Ordering) const;
+
+  /// Get the syncscope used in LLVM IR as a SyncScope ID.
+  llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
+                                         SyncScope Scope,
+                                         llvm::AtomicOrdering Ordering,
+                                         llvm::LLVMContext &Ctx) const;
 
   /// Allow the target to apply other metadata to an atomic instruction
   virtual void setTargetAtomicMetadata(CodeGenFunction &CGF,
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 7ba32b92cfd55..a8fdadb3aa583 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -310,10 +310,8 @@ class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
   }
   LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
                                   const VarDecl *D) const override;
-  llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
-                                         SyncScope Scope,
-                                         llvm::AtomicOrdering Ordering,
-                                         llvm::LLVMContext &Ctx) const override;
+  std::string getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
+                                  llvm::AtomicOrdering Ordering) const override;
   void setTargetAtomicMetadata(CodeGenFunction &CGF,
                                llvm::Instruction &AtomicInst,
                                const AtomicExpr *Expr = nullptr) const override;
@@ -493,11 +491,9 @@ AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
   return DefaultGlobalAS;
 }
 
-llvm::SyncScope::ID
-AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
-                                            SyncScope Scope,
-                                            llvm::AtomicOrdering Ordering,
-                                            llvm::LLVMContext &Ctx) const {
+std::string AMDGPUTargetCodeGenInfo::getLLVMSyncScopeStr(
+    const LangOptions &LangOpts, SyncScope Scope,
+    llvm::AtomicOrdering Ordering) const {
   std::string Name;
   switch (Scope) {
   case SyncScope::HIPSingleThread:
@@ -541,7 +537,7 @@ AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
     Name = Twine(Twine(Name) + Twine("one-as")).str();
   }
 
-  return Ctx.getOrInsertSyncScopeID(Name);
+  return Name;
 }
 
 void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index 32998bb5d60d5..60727aced9c6a 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -131,42 +131,13 @@ class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
                                   const VarDecl *D) const override;
   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
                            CodeGen::CodeGenModule &M) const override;
-  llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
-                                         SyncScope Scope,
-                                         llvm::AtomicOrdering Ordering,
-                                         llvm::LLVMContext &Ctx) const override;
+  std::string getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
+                                  llvm::AtomicOrdering Ordering) const override;
   bool supportsLibCall() const override {
     return getABIInfo().getTarget().getTriple().getVendor() !=
            llvm::Triple::AMD;
   }
 };
-
-inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) {
-  switch (Scope) {
-  case SyncScope::HIPSingleThread:
-  case SyncScope::SingleScope:
-    return "singlethread";
-  case SyncScope::HIPWavefront:
-  case SyncScope::OpenCLSubGroup:
-  case SyncScope::WavefrontScope:
-    return "subgroup";
-  case SyncScope::HIPCluster:
-  case SyncScope::ClusterScope:
-  case SyncScope::HIPWorkgroup:
-  case SyncScope::OpenCLWorkGroup:
-  case SyncScope::WorkgroupScope:
-    return "workgroup";
-  case SyncScope::HIPAgent:
-  case SyncScope::OpenCLDevice:
-  case SyncScope::DeviceScope:
-    return "device";
-  case SyncScope::SystemScope:
-  case SyncScope::HIPSystem:
-  case SyncScope::OpenCLAllSVMDevices:
-    return "";
-  }
-  return "";
-}
 } // End anonymous namespace.
 
 void CommonSPIRABIInfo::setCCs() {
@@ -563,11 +534,32 @@ void SPIRVTargetCodeGenInfo::setTargetAttributes(
                  llvm::MDNode::get(M.getLLVMContext(), AttrMDArgs));
 }
 
-llvm::SyncScope::ID
-SPIRVTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &, SyncScope Scope,
-                                           llvm::AtomicOrdering,
-                                           llvm::LLVMContext &Ctx) const {
-  return Ctx.getOrInsertSyncScopeID(mapClangSyncScopeToLLVM(Scope));
+std::string SPIRVTargetCodeGenInfo::getLLVMSyncScopeStr(
+    const LangOptions &, SyncScope Scope, llvm::AtomicOrdering) const {
+  switch (Scope) {
+  case SyncScope::HIPSingleThread:
+  case SyncScope::SingleScope:
+    return "singlethread";
+  case SyncScope::HIPWavefront:
+  case SyncScope::OpenCLSubGroup:
+  case SyncScope::WavefrontScope:
+    return "subgroup";
+  case SyncScope::HIPCluster:
+  case SyncScope::ClusterScope:
+  case SyncScope::HIPWorkgroup:
+  case SyncScope::OpenCLWorkGroup:
+  case SyncScope::WorkgroupScope:
+    return "workgroup";
+  case SyncScope::HIPAgent:
+  case SyncScope::OpenCLDevice:
+  case SyncScope::DeviceScope:
+    return "device";
+  case SyncScope::SystemScope:
+  case SyncScope::HIPSystem:
+  case SyncScope::OpenCLAllSVMDevices:
+    return "";
+  }
+  return "";
 }
 
 /// Construct a SPIR-V target extension type for the given OpenCL image type.
diff --git a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
index e2d41f86c2ff6..dc345a4d83140 100644
--- a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
+++ b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
@@ -1,8 +1,8 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --version 5
 
 // REQUIRES: amdgpu-registered-target
-// RUN: %clang_cc1 -O3 -triple amdgcn-unknown-unknown -target-cpu gfx1250 -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
-// RUN: %clang_cc1 -O3 -triple amdgcn-unknown-unknown -target-cpu gfx1251 -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1250 -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple amdgcn-unknown-unknown -target-cpu gfx1251 -fcuda-is-device -emit-llvm -o - %s | FileCheck %s
 
 #define __device__ __attribute__((device))
 
@@ -14,12 +14,6 @@ __device__ void template_cooperative_atomic_store_32x4B(int* gaddr, int val) {
   __builtin_amdgcn_cooperative_atomic_store_32x4B(gaddr, val, AO, "agent");
 }
 
-// CHECK-LABEL: define dso_local void @_Z42test_amdgcn_cooperative_atomic_store_32x4BPii(
-// CHECK-SAME: ptr noundef writeonly captures(none) [[GADDR:%.*]], i32 noundef [[VAL:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    tail call void @llvm.amdgcn.cooperative.atomic.store.32x4B.p0(ptr [[GADDR]], i32 [[VAL]], i32 5, metadata [[META8:![0-9]+]])
-// CHECK-NEXT:    ret void
-//
 __device__ void test_amdgcn_cooperative_atomic_store_32x4B(int* gaddr, int val)
 {
   template_cooperative_atomic_store_32x4B<__ATOMIC_SEQ_CST>(gaddr, val);
@@ -30,22 +24,70 @@ __device__ int template_cooperative_atomic_load_32x4B(int* gaddr) {
   return __builtin_amdgcn_cooperative_atomic_load_32x4B(gaddr, AO, "");
 }
 
-// CHECK-LABEL: define dso_local void @_Z41test_amdgcn_cooperative_atomic_load_32x4BPiS_(
-// CHECK-SAME: ptr noundef readonly captures(none) [[ADDR:%.*]], ptr noundef writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call noundef i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr [[ADDR]], i32 5, metadata [[META9:![0-9]+]])
-// CHECK-NEXT:    store i32 [[TMP0]], ptr [[OUT]], align 4, !tbaa [[TBAA4:![0-9]+]]
-// CHECK-NEXT:    ret void
-//
 __device__ void test_amdgcn_cooperative_atomic_load_32x4B(int* addr, int *out)
 {
   *out = template_cooperative_atomic_load_32x4B<__ATOMIC_SEQ_CST>(addr);
 }
+
+// CHECK-LABEL: define dso_local void @_Z42test_amdgcn_cooperative_atomic_store_32x4BPii(
+// CHECK-SAME: ptr noundef [[GADDR:%.*]], i32 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[GADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT:    [[GADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GADDR_ADDR]] to ptr
+// CHECK-NEXT:    [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[GADDR]], ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store i32 [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @_Z39template_cooperative_atomic_store_32x4BILj5EEvPii(ptr noundef [[TMP0]], i32 noundef [[TMP1]]) #[[ATTR3:[0-9]+]]
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define linkonce_odr void @_Z39template_cooperative_atomic_store_32x4BILj5EEvPii(
+// CHECK-SAME: ptr noundef [[GADDR:%.*]], i32 noundef [[VAL:%.*]]) #[[ATTR0]] comdat {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[GADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT:    [[GADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GADDR_ADDR]] to ptr
+// CHECK-NEXT:    [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[GADDR]], ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store i32 [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[VAL_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.cooperative.atomic.store.32x4B.p0(ptr [[TMP0]], i32 [[TMP1]], i32 5, metadata [[META4:![0-9]+]])
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define dso_local void @_Z41test_amdgcn_cooperative_atomic_load_32x4BPiS_(
+// CHECK-SAME: ptr noundef [[ADDR:%.*]], ptr noundef [[OUT:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[ADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
+// CHECK-NEXT:    [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[ADDR]], ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store ptr [[OUT]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z38template_cooperative_atomic_load_32x4BILj5EEiPi(ptr noundef [[TMP0]]) #[[ATTR3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store i32 [[CALL]], ptr [[TMP1]], align 4
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define linkonce_odr noundef i32 @_Z38template_cooperative_atomic_load_32x4BILj5EEiPi(
+// CHECK-SAME: ptr noundef [[GADDR:%.*]]) #[[ATTR0]] comdat {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT:    [[GADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[GADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GADDR_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[GADDR]], ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GADDR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.cooperative.atomic.load.32x4B.p0(ptr [[TMP0]], i32 5, metadata [[META5:![0-9]+]])
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
 //.
-// CHECK: [[TBAA4]] = !{[[META5:![0-9]+]], [[META5]], i64 0}
-// CHECK: [[META5]] = !{!"int", [[META6:![0-9]+]], i64 0}
-// CHECK: [[META6]] = !{!"omnipotent char", [[META7:![0-9]+]], i64 0}
-// CHECK: [[META7]] = !{!"Simple C++ TBAA"}
-// CHECK: [[META8]] = !{!"agent"}
-// CHECK: [[META9]] = !{!""}
+// CHECK: [[META4]] = !{!"agent"}
+// CHECK: [[META5]] = !{!""}
 //.
diff --git a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
index 86a3d0a39cab2..2de1b51f57337 100644
--- a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
+++ b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
@@ -1,6 +1,6 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --version 5
 // REQUIRES: amdgpu-registered-target
-// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -O3 -target-cpu gfx1250 -emit-llvm -fcuda-is-device -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -emit-llvm -fcuda-is-device -o - %s | FileCheck %s
 
 #define __device__ __attribute__((device))
 
@@ -12,14 +12,39 @@ __device__ v4i templated_amdgcn_flat_load_monitor_b128(v4i* inptr)
   return __builtin_amdgcn_flat_load_monitor_b128(inptr, AO, Scope);
 }
 
-
-// CHECK-LABEL: @_Z48test_amdgcn_flat_load_monitor_b128_from_templatePDv4_iS0_(
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = tail call noundef <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[INPTR:%.*]], i32 5, metadata [[META8:![0-9]+]])
-// CHECK-NEXT:    store <4 x i32> [[TMP0]], ptr [[OUT:%.*]], align 16, !tbaa [[TBAA9:![0-9]+]]
-// CHECK-NEXT:    ret void
-//
 __device__ void test_amdgcn_flat_load_monitor_b128_from_template(v4i* inptr, v4i *out)
 {
   *out = templated_amdgcn_flat_load_monitor_b128<__ATOMIC_SEQ_CST, __MEMORY_SCOPE_SYSTEM>(inptr);
 }
+
+// CHECK-LABEL: define dso_local void @_Z48test_amdgcn_flat_load_monitor_b128_from_templatePDv4_iS0_(
+// CHECK-SAME: ptr noundef [[INPTR:%.*]], ptr noundef [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[INPTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[INPTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[INPTR_ADDR]] to ptr
+// CHECK-NEXT:    [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[INPTR]], ptr [[INPTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store ptr [[OUT]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[INPTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[CALL:%.*]] = call noundef <4 x i32> @_Z39templated_amdgcn_flat_load_monitor_b128ILj5ELj0EEDv4_iPS0_(ptr noundef [[TMP0]]) #[[ATTR2:[0-9]+]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    store <4 x i32> [[CALL]], ptr [[TMP1]], align 16
+// CHECK-NEXT:    ret void
+//
+//
+// CHECK-LABEL: define linkonce_odr noundef <4 x i32> @_Z39templated_amdgcn_flat_load_monitor_b128ILj5ELj0EEDv4_iPS0_(
+// CHECK-SAME: ptr noundef [[INPTR:%.*]]) #[[ATTR0]] comdat {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
+// CHECK-NEXT:    [[INPTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[INPTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[INPTR_ADDR]] to ptr
+// CHECK-NEXT:    store ptr [[INPTR]], ptr [[INPTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[INPTR_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.amdgcn.flat.load.monitor.b128.v4i32(ptr [[TMP0]], i32 5, metadata [[META4:![0-9]+]])
+// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
+//
+//.
+// CHECK: [[META4]] = !{!""}
+//.

>From 399199af1cefca9302d5a9884383b4949195ba3c Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 5 Feb 2026 10:56:38 +0100
Subject: [PATCH 06/10] rebase

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 710d29ae949c3..f72189c2b4507 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1617,7 +1617,8 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Info.flags = MachineMemOperand::MOLoad;
     Info.order = parseAtomicOrderingCABIArg(CI, 1);
     Info.ssid = parseSyncscopeMDArg(CI, 2);
-    return true;
+    Infos.push_back(Info);
+    return;
   }
   case Intrinsic::amdgcn_cooperative_atomic_load_32x4B:
   case Intrinsic::amdgcn_cooperative_atomic_load_16x8B:
@@ -1629,6 +1630,7 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Info.flags = (MachineMemOperand::MOLoad | MOCooperative);
     Info.order = parseAtomicOrderingCABIArg(CI, 1);
     Info.ssid = parseSyncscopeMDArg(CI, 2);
+    Infos.push_back(Info);
     return;
   }
   case Intrinsic::amdgcn_cooperative_atomic_store_32x4B:
@@ -1641,6 +1643,7 @@ void SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     Info.flags = (MachineMemOperand::MOStore | MOCooperative);
     Info.order = parseAtomicOrderingCABIArg(CI, 2);
     Info.ssid = parseSyncscopeMDArg(CI, 3);
+    Infos.push_back(Info);
     return;
   }
   case Intrinsic::amdgcn_ds_gws_init:

>From c55515e791d84dab02d07bceb2f5385c18576014 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 5 Feb 2026 13:11:01 +0100
Subject: [PATCH 07/10] fix tests

---
 .../builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip   | 2 --
 .../builtins-amdgcn-gfx1250-load-monitor-templated.hip          | 2 --
 2 files changed, 4 deletions(-)

diff --git a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
index dc345a4d83140..68d3ed674a15a 100644
--- a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
+++ b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-cooperative-atomics-templated.hip
@@ -78,9 +78,7 @@ __device__ void test_amdgcn_cooperative_atomic_load_32x4B(int* addr, int *out)
 // CHECK-LABEL: define linkonce_odr noundef i32 @_Z38template_cooperative_atomic_load_32x4BILj5EEiPi(
 // CHECK-SAME: ptr noundef [[GADDR:%.*]]) #[[ATTR0]] comdat {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4, addrspace(5)
 // CHECK-NEXT:    [[GADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
-// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
 // CHECK-NEXT:    [[GADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GADDR_ADDR]] to ptr
 // CHECK-NEXT:    store ptr [[GADDR]], ptr [[GADDR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[GADDR_ADDR_ASCAST]], align 8
diff --git a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
index 2de1b51f57337..bc7a0878c9e57 100644
--- a/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
+++ b/clang/test/CodeGenHIP/builtins-amdgcn-gfx1250-load-monitor-templated.hip
@@ -36,9 +36,7 @@ __device__ void test_amdgcn_flat_load_monitor_b128_from_template(v4i* inptr, v4i
 // CHECK-LABEL: define linkonce_odr noundef <4 x i32> @_Z39templated_amdgcn_flat_load_monitor_b128ILj5ELj0EEDv4_iPS0_(
 // CHECK-SAME: ptr noundef [[INPTR:%.*]]) #[[ATTR0]] comdat {
 // CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[RETVAL:%.*]] = alloca <4 x i32>, align 16, addrspace(5)
 // CHECK-NEXT:    [[INPTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
-// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
 // CHECK-NEXT:    [[INPTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[INPTR_ADDR]] to ptr
 // CHECK-NEXT:    store ptr [[INPTR]], ptr [[INPTR_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[INPTR_ADDR_ASCAST]], align 8

>From daef4d911f99d95707c4c4e0c30f7d5f633c2af6 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 5 Feb 2026 14:02:11 +0100
Subject: [PATCH 08/10] Comment

---
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp | 2 --
 1 file changed, 2 deletions(-)

diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index 55b1db5bbc5a2..453dd0ec18ebc 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -281,7 +281,6 @@ static llvm::AtomicOrdering mapCABIAtomicOrdering(unsigned AO) {
   case llvm::AtomicOrderingCABI::acquire:
   case llvm::AtomicOrderingCABI::consume:
     return llvm::AtomicOrdering::Acquire;
-    break;
   case llvm::AtomicOrderingCABI::release:
     return llvm::AtomicOrdering::Release;
   case llvm::AtomicOrderingCABI::acq_rel:
@@ -291,7 +290,6 @@ static llvm::AtomicOrdering mapCABIAtomicOrdering(unsigned AO) {
   case llvm::AtomicOrderingCABI::relaxed:
     return llvm::AtomicOrdering::Monotonic;
   }
-  llvm_unreachable("unknown CABI Ordering");
 }
 
 // For processing memory ordering and memory scope arguments of various

>From 46c25b2ed55d2a7961211d285d0fd97053cbf9c9 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Fri, 6 Feb 2026 11:17:16 +0100
Subject: [PATCH 09/10] Use StringRef

---
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp |  2 +-
 clang/lib/CodeGen/TargetInfo.cpp            |  2 +-
 clang/lib/CodeGen/TargetInfo.h              |  6 +--
 clang/lib/CodeGen/Targets/AMDGPU.cpp        | 45 ++++++++-------------
 clang/lib/CodeGen/Targets/SPIR.cpp          |  6 +--
 5 files changed, 24 insertions(+), 37 deletions(-)

diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index 453dd0ec18ebc..e923c285eff28 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -830,7 +830,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
     auto Scope = static_cast<SyncScope>(ScopeExpr->getZExtValue());
     llvm::AtomicOrdering AO = mapCABIAtomicOrdering(AOExpr->getZExtValue());
 
-    std::string ScopeStr = CGM.getTargetCodeGenInfo().getLLVMSyncScopeStr(
+    StringRef ScopeStr = CGM.getTargetCodeGenInfo().getLLVMSyncScopeStr(
         CGM.getLangOpts(), Scope, AO);
 
     llvm::MDNode *MD =
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 187355023030b..dc0b392fd37f7 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -148,7 +148,7 @@ LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
   return D ? D->getType().getAddressSpace() : LangAS::Default;
 }
 
-std::string
+StringRef
 TargetCodeGenInfo::getLLVMSyncScopeStr(const LangOptions &LangOpts,
                                        SyncScope Scope,
                                        llvm::AtomicOrdering Ordering) const {
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index 1d34eeb98582d..98ee894fe557f 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -327,9 +327,9 @@ class TargetCodeGenInfo {
   }
 
   /// Get the syncscope used in LLVM IR as a string
-  virtual std::string getLLVMSyncScopeStr(const LangOptions &LangOpts,
-                                          SyncScope Scope,
-                                          llvm::AtomicOrdering Ordering) const;
+  virtual StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts,
+                                        SyncScope Scope,
+                                        llvm::AtomicOrdering Ordering) const;
 
   /// Get the syncscope used in LLVM IR as a SyncScope ID.
   llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index a8fdadb3aa583..27e937b81129d 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -310,8 +310,8 @@ class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
   }
   LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
                                   const VarDecl *D) const override;
-  std::string getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
-                                  llvm::AtomicOrdering Ordering) const override;
+  StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
+                                llvm::AtomicOrdering Ordering) const override;
   void setTargetAtomicMetadata(CodeGenFunction &CGF,
                                llvm::Instruction &AtomicInst,
                                const AtomicExpr *Expr = nullptr) const override;
@@ -491,53 +491,40 @@ AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
   return DefaultGlobalAS;
 }
 
-std::string AMDGPUTargetCodeGenInfo::getLLVMSyncScopeStr(
+StringRef AMDGPUTargetCodeGenInfo::getLLVMSyncScopeStr(
     const LangOptions &LangOpts, SyncScope Scope,
     llvm::AtomicOrdering Ordering) const {
-  std::string Name;
+
+  // OpenCL assumes by default that atomic scopes are per-address space for
+  // non-sequentially consistent operations.
+  bool IsOneAs = (Scope >= SyncScope::OpenCLWorkGroup &&
+                  Scope <= SyncScope::OpenCLSubGroup &&
+                  Ordering != llvm::AtomicOrdering::SequentiallyConsistent);
+
   switch (Scope) {
   case SyncScope::HIPSingleThread:
   case SyncScope::SingleScope:
-    Name = "singlethread";
-    break;
+    return IsOneAs ? "singlethread-one-as" : "singlethread";
   case SyncScope::HIPWavefront:
   case SyncScope::OpenCLSubGroup:
   case SyncScope::WavefrontScope:
-    Name = "wavefront";
-    break;
+    return IsOneAs ? "wavefront-one-as" : "wavefront";
   case SyncScope::HIPCluster:
   case SyncScope::ClusterScope:
-    Name = "cluster";
-    break;
+    return IsOneAs ? "cluster-one-as" : "cluster";
   case SyncScope::HIPWorkgroup:
   case SyncScope::OpenCLWorkGroup:
   case SyncScope::WorkgroupScope:
-    Name = "workgroup";
-    break;
+    return IsOneAs ? "workgroup-one-as" : "workgroup";
   case SyncScope::HIPAgent:
   case SyncScope::OpenCLDevice:
   case SyncScope::DeviceScope:
-    Name = "agent";
-    break;
+    return IsOneAs ? "agent-one-as" : "agent";
   case SyncScope::SystemScope:
   case SyncScope::HIPSystem:
   case SyncScope::OpenCLAllSVMDevices:
-    Name = "";
-    break;
-  }
-
-  // OpenCL assumes by default that atomic scopes are per-address space for
-  // non-sequentially consistent operations.
-  if (Scope >= SyncScope::OpenCLWorkGroup &&
-      Scope <= SyncScope::OpenCLSubGroup &&
-      Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
-    if (!Name.empty())
-      Name = Twine(Twine(Name) + Twine("-")).str();
-
-    Name = Twine(Twine(Name) + Twine("one-as")).str();
+    return IsOneAs ? "one-as" : "";
   }
-
-  return Name;
 }
 
 void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index 60727aced9c6a..52d019b855dbc 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -131,8 +131,8 @@ class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
                                   const VarDecl *D) const override;
   void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
                            CodeGen::CodeGenModule &M) const override;
-  std::string getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
-                                  llvm::AtomicOrdering Ordering) const override;
+  StringRef getLLVMSyncScopeStr(const LangOptions &LangOpts, SyncScope Scope,
+                                llvm::AtomicOrdering Ordering) const override;
   bool supportsLibCall() const override {
     return getABIInfo().getTarget().getTriple().getVendor() !=
            llvm::Triple::AMD;
@@ -534,7 +534,7 @@ void SPIRVTargetCodeGenInfo::setTargetAttributes(
                  llvm::MDNode::get(M.getLLVMContext(), AttrMDArgs));
 }
 
-std::string SPIRVTargetCodeGenInfo::getLLVMSyncScopeStr(
+StringRef SPIRVTargetCodeGenInfo::getLLVMSyncScopeStr(
     const LangOptions &, SyncScope Scope, llvm::AtomicOrdering) const {
   switch (Scope) {
   case SyncScope::HIPSingleThread:

>From 25ccda73b47d1584e1e6348e09862d6728379441 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Fri, 6 Feb 2026 12:21:03 +0100
Subject: [PATCH 10/10] rebase

---
 clang/include/clang/Basic/BuiltinsAMDGPU.td | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index 47b545ee4aac4..2b3bad9d5bf7a 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -712,11 +712,11 @@ def __builtin_amdgcn_flat_prefetch : AMDGPUBuiltin<"void(void const address_spac
 def __builtin_amdgcn_global_prefetch : AMDGPUBuiltin<"void(void const address_space<1> *, _Constant int)", [Const], "vmem-pref-insts">;
 
 def __builtin_amdgcn_global_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<1> *>, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<1> *>, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_global_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
 def __builtin_amdgcn_flat_load_monitor_b32 : AMDGPUBuiltin<"int(int address_space<0> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int address_space<0> *>, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
-def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int address_space<0> *>, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<0> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
+def __builtin_amdgcn_flat_load_monitor_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<0> *, _Constant int, _Constant int)", [Const], "gfx1250-insts">;
 def __builtin_amdgcn_cluster_load_b32 : AMDGPUBuiltin<"int(int address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b64 : AMDGPUBuiltin<"_ExtVector<2, int>(_ExtVector<2, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;
 def __builtin_amdgcn_cluster_load_b128 : AMDGPUBuiltin<"_ExtVector<4, int>(_ExtVector<4, int> address_space<1> *, _Constant int, int)", [Const], "mcast-load-insts,wavefrontsize32">;



More information about the cfe-commits mailing list