[clang] Add clang atomic control options and attribute (PR #114841)

Yaxun Liu via cfe-commits cfe-commits at lists.llvm.org
Tue Jan 28 13:57:07 PST 2025


https://github.com/yxsamliu updated https://github.com/llvm/llvm-project/pull/114841

>From f09c8de16261aaf50ecffdab4701e6140bdae7f1 Mon Sep 17 00:00:00 2001
From: "Yaxun (Sam) Liu" <yaxun.liu at amd.com>
Date: Wed, 17 Jul 2024 09:39:28 -0400
Subject: [PATCH] Add clang atomic control options and attribute

Add option and statement attribute for controlling emitting of target-specific
metadata to atomicrmw instructions in IR.

The RFC for this attribute and option is
https://discourse.llvm.org/t/rfc-add-clang-atomic-control-options-and-pragmas/80641,
Originally a pragma was proposed, then it was changed to clang attribute.

This attribute allows users to specify one, two, or all three options and must be applied
to a compound statement. The attribute can also be nested, with inner attributes
overriding the options specified by outer attributes or the target's default
options. These options will then determine the target-specific metadata added to atomic
instructions in the IR.

In addition to the attribute, a new compiler option is introduced:
-fatomic=no_remote_memory:{on|off},no_fine_grained_memory:{on|off},ignore_denormal_mode{on|off}.
This compiler option allows users to override the target's default options through the
Clang driver and front end.

In terms of implementation, the atomic attribute is represented in the AST by the
existing AttributedStmt, with minimal changes to AST and Sema.

During code generation in Clang, the CodeGenModule maintains the current atomic options,
which are used to emit the relevant metadata for atomic instructions. RAII is used
to manage the saving and restoring of atomic options when entering
and exiting nested AttributedStmt.
---
 clang/include/clang/Basic/AtomicOptions.def   |  19 +
 clang/include/clang/Basic/Attr.td             |  56 +++
 .../clang/Basic/DiagnosticDriverKinds.td      |   7 +
 .../clang/Basic/DiagnosticSemaKinds.td        |   2 +
 clang/include/clang/Basic/LangOptions.h       | 155 ++++++
 clang/include/clang/Basic/TargetInfo.h        |   6 +
 clang/include/clang/Driver/Options.td         |   8 +
 clang/include/clang/Parse/Parser.h            |   5 +
 clang/lib/Basic/LangOptions.cpp               |  46 ++
 clang/lib/Basic/Targets/AMDGPU.cpp            |   7 +
 clang/lib/CodeGen/CGStmt.cpp                  |   5 +
 clang/lib/CodeGen/CodeGenFunction.h           |  17 +
 clang/lib/CodeGen/CodeGenModule.cpp           |   3 +-
 clang/lib/CodeGen/CodeGenModule.h             |   8 +
 clang/lib/CodeGen/Targets/AMDGPU.cpp          |  19 +-
 clang/lib/Driver/ToolChains/Clang.cpp         |  26 +
 clang/lib/Parse/ParseDecl.cpp                 |  71 +++
 clang/lib/Sema/SemaStmtAttr.cpp               |  43 ++
 clang/test/AST/ast-dump-atomic-options.hip    | 102 ++++
 .../test/CodeGen/AMDGPU/amdgpu-atomic-float.c | 247 ++++------
 clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu   | 195 ++++----
 clang/test/CodeGenCUDA/atomic-ops.cu          | 200 ++++----
 clang/test/CodeGenCUDA/atomic-options.hip     | 456 ++++++++++++++++++
 clang/test/CodeGenOpenCL/atomic-ops.cl        |  20 +-
 clang/test/Driver/atomic-options.hip          |  31 ++
 .../test/OpenMP/amdgpu-unsafe-fp-atomics.cpp  |  10 +-
 clang/test/Parser/Inputs/cuda.h               |  54 +++
 clang/test/Parser/atomic-options.hip          |  30 ++
 28 files changed, 1458 insertions(+), 390 deletions(-)
 create mode 100644 clang/include/clang/Basic/AtomicOptions.def
 create mode 100644 clang/test/AST/ast-dump-atomic-options.hip
 create mode 100644 clang/test/CodeGenCUDA/atomic-options.hip
 create mode 100644 clang/test/Driver/atomic-options.hip
 create mode 100644 clang/test/Parser/Inputs/cuda.h
 create mode 100644 clang/test/Parser/atomic-options.hip

diff --git a/clang/include/clang/Basic/AtomicOptions.def b/clang/include/clang/Basic/AtomicOptions.def
new file mode 100644
index 00000000000000..5939b59db120f9
--- /dev/null
+++ b/clang/include/clang/Basic/AtomicOptions.def
@@ -0,0 +1,19 @@
+//===--- AtomicOptions.def - Atomic Options database -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file defines the Atomic language options. Users of this file
+// must define the OPTION macro to make use of this information.
+#ifndef OPTION
+#  error Define the OPTION macro to handle atomic language options
+#endif
+
+// OPTION(name, type, width, previousName)
+OPTION(NoRemoteMemory, bool, 1, First)
+OPTION(NoFineGrainedMemory, bool, 1, NoRemoteMemory)
+OPTION(IgnoreDenormalMode, bool, 1, NoFineGrainedMemory)
+
+#undef OPTION
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index f4ba2bc3c6de31..aa89cecf696f7c 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -4972,3 +4972,59 @@ def NoTrivialAutoVarInit: InheritableAttr {
   let Documentation = [NoTrivialAutoVarInitDocs];
   let SimpleHandler = 1;
 }
+
+def Atomic : StmtAttr {
+  let Spellings = [Clang<"atomic">];
+  let Args = [
+    EnumArgument<"NoRemoteMemory", "NoRemoteMemoryTy", /*IsString*/ false,
+      ["no_remote_memory", "!no_remote_memory", ""],
+      ["NoRemoteMemoryOn", "NoRemoteMemoryOff", "NoRemoteMemoryUnset"]>,
+    EnumArgument<"NoFineGrainedMemory", "NoFineGrainedMemoryTy", /*IsString*/ false,
+      ["no_fine_grained_memory", "!no_fine_grained_memory", ""],
+      ["NoFineGrainedMemoryOn", "NoFineGrainedMemoryOff", "NoFineGrainedMemoryUnset"]>,
+    EnumArgument<"IgnoreDenormalMode", "IgnoreDenormalModeTy", /*IsString*/ false,
+      ["ignore_denormal_mode", "!ignore_denormal_mode", ""],
+      ["IgnoreDenormalModeOn", "IgnoreDenormalModeOff", "IgnoreDenormalModeUnset"]>
+  ];
+  let Subjects = SubjectList<[CompoundStmt], ErrorDiag, "compound statements">;
+  let HasCustomParsing = 1;
+  let Documentation = [Undocumented];
+  let AdditionalMembers = [{
+    AtomicOptionsOverride AOO;
+    AtomicOptionsOverride getAtomicOptionsOverride() const { return AOO; }
+    void updateAtomicOptionsOverride() {
+      switch (getNoRemoteMemory()) {
+      case NoRemoteMemoryOn:
+        AOO.setNoRemoteMemoryOverride(true);
+        break;
+      case NoRemoteMemoryOff:
+        AOO.setNoRemoteMemoryOverride(false);
+        break;
+      case NoRemoteMemoryUnset:
+        AOO.clearNoRemoteMemoryOverride();
+      }
+
+      switch (getNoFineGrainedMemory()) {
+      case NoFineGrainedMemoryOn:
+        AOO.setNoFineGrainedMemoryOverride(true);
+        break;
+      case NoFineGrainedMemoryOff:
+        AOO.setNoFineGrainedMemoryOverride(false);
+        break;
+      case NoFineGrainedMemoryUnset:
+        AOO.clearNoFineGrainedMemoryOverride();
+      }
+
+      switch (getIgnoreDenormalMode()) {
+      case IgnoreDenormalModeOn:
+        AOO.setIgnoreDenormalModeOverride(true);
+        break;
+      case IgnoreDenormalModeOff:
+        AOO.setIgnoreDenormalModeOverride(false);
+        break;
+      case IgnoreDenormalModeUnset:
+        AOO.clearIgnoreDenormalModeOverride();
+      }
+    }
+  }];
+}
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 288786b8ce9399..76306823539f0e 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -305,6 +305,13 @@ def err_drv_invalid_int_value : Error<"invalid integral value '%1' in '%0'">;
 def err_drv_invalid_value_with_suggestion : Error<
     "invalid value '%1' in '%0', expected one of: %2">;
 def err_drv_alignment_not_power_of_two : Error<"alignment is not a power of 2 in '%0'">;
+
+def err_drv_invalid_atomic_option : Error<
+  "invalid argument '%0' to -fatomic=; must be a "
+  "comma-separated list of key:value pairs, where allowed keys are "
+  "'no_fine_grained_memory', 'no_remote_memory', 'ignore_denormal_mode', "
+  "and values are 'on' or 'off', and each key must be unique">;
+
 def err_drv_invalid_remap_file : Error<
     "invalid option '%0' not of the form <from-file>;<to-file>">;
 def err_drv_invalid_gcc_install_dir : Error<"'%0' does not contain a GCC installation">;
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index db911ed121e951..c0dc1d53adc208 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -3292,6 +3292,8 @@ def err_invalid_branch_protection_spec : Error<
   "invalid or misplaced branch protection specification '%0'">;
 def warn_unsupported_branch_protection_spec : Warning<
   "unsupported branch protection specification '%0'">, InGroup<BranchProtection>;
+def err_attribute_invalid_atomic_argument : Error<
+  "invalid argument '%0' to atomic attribute; valid options are: 'no_remote_memory', 'no_fine_grained_memory', 'ignore_denormal_mode' (optionally prefixed with '!')">;
 
 def warn_unsupported_target_attribute
     : Warning<"%select{unsupported|duplicate|unknown}0%select{| CPU|"
diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h
index 114a5d34a008bd..b8f6b133140bd6 100644
--- a/clang/include/clang/Basic/LangOptions.h
+++ b/clang/include/clang/Basic/LangOptions.h
@@ -623,6 +623,10 @@ class LangOptions : public LangOptionsBase {
   // WebAssembly target.
   bool NoWasmOpt = false;
 
+  /// The default atomic codegen options specified by command line in the
+  /// format of key:{on|off}.
+  std::vector<std::string> AtomicOptionsAsWritten;
+
   LangOptions();
 
   /// Set language defaults for the given input language and
@@ -1094,6 +1098,157 @@ inline void FPOptions::applyChanges(FPOptionsOverride FPO) {
   *this = FPO.applyOverrides(*this);
 }
 
+/// Atomic control options
+class AtomicOptionsOverride;
+class AtomicOptions {
+public:
+  using storage_type = uint16_t;
+
+  static constexpr unsigned StorageBitSize = 8 * sizeof(storage_type);
+
+  static constexpr storage_type FirstShift = 0, FirstWidth = 0;
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS)                                    \
+  static constexpr storage_type NAME##Shift =                                  \
+      PREVIOUS##Shift + PREVIOUS##Width;                                       \
+  static constexpr storage_type NAME##Width = WIDTH;                           \
+  static constexpr storage_type NAME##Mask = ((1 << NAME##Width) - 1)          \
+                                             << NAME##Shift;
+#include "clang/Basic/AtomicOptions.def"
+
+  static constexpr storage_type TotalWidth = 0
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) +WIDTH
+#include "clang/Basic/AtomicOptions.def"
+      ;
+  static_assert(TotalWidth <= StorageBitSize,
+                "Too short type for AtomicOptions");
+
+private:
+  storage_type Value;
+
+  AtomicOptionsOverride getChangesSlow(const AtomicOptions &Base) const;
+
+public:
+  AtomicOptions() : Value(0) {
+    setNoRemoteMemory(false);
+    setNoFineGrainedMemory(false);
+    setIgnoreDenormalMode(false);
+  }
+
+  bool operator==(AtomicOptions other) const { return Value == other.Value; }
+
+  /// Return the default value of AtomicOptions that's used when trailing
+  /// storage isn't required.
+  static AtomicOptions defaultWithoutTrailingStorage(const LangOptions &LO);
+
+  storage_type getAsOpaqueInt() const { return Value; }
+  static AtomicOptions getFromOpaqueInt(storage_type Value) {
+    AtomicOptions Opts;
+    Opts.Value = Value;
+    return Opts;
+  }
+
+  /// Return difference with the given option set.
+  AtomicOptionsOverride getChangesFrom(const AtomicOptions &Base) const;
+
+  void applyChanges(AtomicOptionsOverride AO);
+
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS)                                    \
+  TYPE get##NAME() const {                                                     \
+    return static_cast<TYPE>((Value & NAME##Mask) >> NAME##Shift);             \
+  }                                                                            \
+  void set##NAME(TYPE value) {                                                 \
+    Value = (Value & ~NAME##Mask) | (storage_type(value) << NAME##Shift);      \
+  }
+#include "clang/Basic/AtomicOptions.def"
+  LLVM_DUMP_METHOD void dump();
+};
+
+/// Represents difference between two AtomicOptions values.
+class AtomicOptionsOverride {
+  AtomicOptions Options = AtomicOptions::getFromOpaqueInt(0);
+  AtomicOptions::storage_type OverrideMask = 0;
+
+public:
+  /// The type suitable for storing values of AtomicOptionsOverride. Must be
+  /// twice as wide as bit size of AtomicOption.
+  using storage_type = uint32_t;
+  static_assert(sizeof(storage_type) >= 2 * sizeof(AtomicOptions::storage_type),
+                "Too short type for AtomicOptionsOverride");
+
+  /// Bit mask selecting bits of OverrideMask in serialized representation of
+  /// AtomicOptionsOverride.
+  static constexpr storage_type OverrideMaskBits =
+      (static_cast<storage_type>(1) << AtomicOptions::StorageBitSize) - 1;
+
+  AtomicOptionsOverride() {}
+  AtomicOptionsOverride(const LangOptions &LO);
+  AtomicOptionsOverride(AtomicOptions AO)
+      : Options(AO), OverrideMask(OverrideMaskBits) {}
+  AtomicOptionsOverride(AtomicOptions AO, AtomicOptions::storage_type Mask)
+      : Options(AO), OverrideMask(Mask) {}
+
+  bool requiresTrailingStorage() const { return OverrideMask != 0; }
+
+  storage_type getAsOpaqueInt() const {
+    return (static_cast<storage_type>(Options.getAsOpaqueInt())
+            << AtomicOptions::StorageBitSize) |
+           OverrideMask;
+  }
+
+  static AtomicOptionsOverride getFromOpaqueInt(storage_type I) {
+    AtomicOptionsOverride Opts;
+    Opts.OverrideMask = I & OverrideMaskBits;
+    Opts.Options =
+        AtomicOptions::getFromOpaqueInt(I >> AtomicOptions::StorageBitSize);
+    return Opts;
+  }
+
+  AtomicOptions applyOverrides(AtomicOptions Base) {
+    AtomicOptions Result = AtomicOptions::getFromOpaqueInt(
+        (Base.getAsOpaqueInt() & ~OverrideMask) |
+        (Options.getAsOpaqueInt() & OverrideMask));
+    return Result;
+  }
+
+  bool operator==(AtomicOptionsOverride other) const {
+    return Options == other.Options && OverrideMask == other.OverrideMask;
+  }
+  bool operator!=(AtomicOptionsOverride other) const {
+    return !(*this == other);
+  }
+
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS)                                    \
+  bool has##NAME##Override() const {                                           \
+    return OverrideMask & AtomicOptions::NAME##Mask;                           \
+  }                                                                            \
+  TYPE get##NAME##Override() const {                                           \
+    assert(has##NAME##Override());                                             \
+    return Options.get##NAME();                                                \
+  }                                                                            \
+  void clear##NAME##Override() {                                               \
+    Options.set##NAME(TYPE(0));                                                \
+    OverrideMask &= ~AtomicOptions::NAME##Mask;                                \
+  }                                                                            \
+  void set##NAME##Override(TYPE value) {                                       \
+    Options.set##NAME(value);                                                  \
+    OverrideMask |= AtomicOptions::NAME##Mask;                                 \
+  }
+#include "clang/Basic/AtomicOptions.def"
+
+  LLVM_DUMP_METHOD void dump();
+};
+
+inline AtomicOptionsOverride
+AtomicOptions::getChangesFrom(const AtomicOptions &Base) const {
+  if (Value == Base.Value)
+    return AtomicOptionsOverride();
+  return getChangesSlow(Base);
+}
+
+inline void AtomicOptions::applyChanges(AtomicOptionsOverride AO) {
+  *this = AO.applyOverrides(*this);
+}
+
 /// Describes the kind of translation unit being processed.
 enum TranslationUnitKind {
   /// The translation unit is a complete translation unit.
diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index 43c09cf1f973e3..3a11d4a9019c88 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -295,6 +295,9 @@ class TargetInfo : public TransferrableTargetInfo,
   // in function attributes in IR.
   llvm::StringSet<> ReadOnlyFeatures;
 
+  // Default atomic options
+  AtomicOptions AtomicOpts;
+
 public:
   /// Construct a target for the given options.
   ///
@@ -1687,6 +1690,9 @@ class TargetInfo : public TransferrableTargetInfo,
     return CC_C;
   }
 
+  /// Get the default atomic options.
+  AtomicOptions getAtomicOpts() const { return AtomicOpts; }
+
   enum CallingConvCheckResult {
     CCCR_OK,
     CCCR_Warning,
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 1af633e59d0bba..e36ce955219a83 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -2355,6 +2355,14 @@ def fsymbol_partition_EQ : Joined<["-"], "fsymbol-partition=">, Group<f_Group>,
   Visibility<[ClangOption, CC1Option]>,
   MarshallingInfoString<CodeGenOpts<"SymbolPartition">>;
 
+def fatomic_EQ : CommaJoined<["-"], "fatomic=">, Group<f_Group>,
+  Visibility<[ClangOption, CC1Option]>,
+  HelpText<"Specify atomic codegen options as a comma-separated list of "
+           "key:value pairs, allowed keys and values are "
+           "no_fine_grained_memory:on|off, no_remote_memory:on|off, "
+           "ignore_denormal_mode:on|off">,
+  MarshallingInfoStringVector<LangOpts<"AtomicOptionsAsWritten">>;
+
 defm memory_profile : OptInCC1FFlag<"memory-profile", "Enable", "Disable", " heap memory profiling">;
 def fmemory_profile_EQ : Joined<["-"], "fmemory-profile=">,
     Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h
index e99d2cf2eaa409..61cb50378a980a 100644
--- a/clang/include/clang/Parse/Parser.h
+++ b/clang/include/clang/Parse/Parser.h
@@ -3101,6 +3101,11 @@ class Parser : public CodeCompletionHandler {
   std::optional<AvailabilitySpec> ParseAvailabilitySpec();
   ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
 
+  void ParseAtomicAttribute(IdentifierInfo &AttrName,
+                            SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
+                            SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+                            SourceLocation ScopeLoc, ParsedAttr::Form Form);
+
   void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
                                           SourceLocation Loc,
                                           ParsedAttributes &Attrs,
diff --git a/clang/lib/Basic/LangOptions.cpp b/clang/lib/Basic/LangOptions.cpp
index e3037ec819add2..0b01d209f9ff56 100644
--- a/clang/lib/Basic/LangOptions.cpp
+++ b/clang/lib/Basic/LangOptions.cpp
@@ -240,3 +240,49 @@ LLVM_DUMP_METHOD void FPOptionsOverride::dump() {
 #include "clang/Basic/FPOptions.def"
   llvm::errs() << "\n";
 }
+
+AtomicOptionsOverride
+AtomicOptions::getChangesSlow(const AtomicOptions &Base) const {
+  AtomicOptions::storage_type OverrideMask = 0;
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS)                                    \
+  if (get##NAME() != Base.get##NAME())                                         \
+    OverrideMask |= NAME##Mask;
+#include "clang/Basic/AtomicOptions.def"
+  return AtomicOptionsOverride(*this, OverrideMask);
+}
+
+LLVM_DUMP_METHOD void AtomicOptions::dump() {
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS)                                    \
+  llvm::errs() << "\n " #NAME " " << get##NAME();
+#include "clang/Basic/AtomicOptions.def"
+  llvm::errs() << "\n";
+}
+
+LLVM_DUMP_METHOD void AtomicOptionsOverride::dump() {
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS)                                    \
+  if (has##NAME##Override())                                                   \
+    llvm::errs() << "\n " #NAME " Override is " << get##NAME##Override();
+#include "clang/Basic/AtomicOptions.def"
+  llvm::errs() << "\n";
+}
+
+AtomicOptionsOverride::AtomicOptionsOverride(const LangOptions &LO) {
+  for (const auto &Setting : LO.AtomicOptionsAsWritten) {
+    SmallVector<StringRef, 2> KeyValue;
+    StringRef(Setting).split(KeyValue, ":");
+    // Assuming option string has been checked elsewhere and is valid.
+    assert(KeyValue.size() == 2 && "Invalid atomic option format");
+    StringRef Key = KeyValue[0];
+    StringRef Val = KeyValue[1];
+    bool IsEnabled = (Val == "on");
+
+    if (Key == "no_fine_grained_memory")
+      setNoFineGrainedMemoryOverride(IsEnabled);
+    else if (Key == "no_remote_memory")
+      setNoRemoteMemoryOverride(IsEnabled);
+    else if (Key == "ignore_denormal_mode")
+      setIgnoreDenormalModeOverride(IsEnabled);
+    else
+      assert(false && "Unknown atomic option key");
+  }
+}
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index 0d308cb6af9699..9ad0553d2d1409 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -242,6 +242,11 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
   WavefrontSize = (GPUFeatures & llvm::AMDGPU::FEATURE_WAVE32) ? 32 : 64;
   AllowAMDGPUUnsafeFPAtomics = Opts.AllowAMDGPUUnsafeFPAtomics;
 
+  // Set the default atomic options
+  AtomicOpts.setNoRemoteMemory(true);
+  AtomicOpts.setNoFineGrainedMemory(true);
+  AtomicOpts.setIgnoreDenormalMode(Opts.AllowAMDGPUUnsafeFPAtomics);
+
   // Set pointer width and alignment for the generic address space.
   PointerWidth = PointerAlign = getPointerWidthV(LangAS::Default);
   if (getMaxPointerWidth() == 64) {
@@ -265,6 +270,8 @@ void AMDGPUTargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
   // to OpenCL can be removed from the following line.
   setAddressSpaceMap((Opts.OpenCL && !Opts.OpenCLGenericAddressSpace) ||
                      !isAMDGCN(getTriple()));
+
+  AtomicOpts.applyChanges(AtomicOptionsOverride(Opts));
 }
 
 ArrayRef<Builtin::Info> AMDGPUTargetInfo::getTargetBuiltins() const {
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 7c944fe85a352d..23f49859f84ce9 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -781,6 +781,7 @@ void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
   HLSLControlFlowHintAttr::Spelling flattenOrBranch =
       HLSLControlFlowHintAttr::SpellingNotCalculated;
   const CallExpr *musttail = nullptr;
+  AtomicOptionsOverride AOO;
 
   for (const auto *A : S.getAttrs()) {
     switch (A->getKind()) {
@@ -811,6 +812,9 @@ void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
         Builder.CreateAssumption(AssumptionVal);
       }
     } break;
+    case attr::Atomic: {
+      AOO = cast<AtomicAttr>(A)->getAtomicOptionsOverride();
+    } break;
     case attr::HLSLControlFlowHint: {
       flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
     } break;
@@ -822,6 +826,7 @@ void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
   SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
   SaveAndRestore save_musttail(MustTailCall, musttail);
   SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
+  CGAtomicOptionsRAII AORAII(CGM, AOO);
   EmitStmt(S.getSubStmt(), S.getAttrs());
 }
 
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index e978cad4336238..1960183ed5477a 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -829,6 +829,23 @@ class CodeGenFunction : public CodeGenTypeCache {
   };
   FPOptions CurFPFeatures;
 
+  class CGAtomicOptionsRAII {
+  public:
+    CGAtomicOptionsRAII(CodeGenModule &CGM_, AtomicOptions AO)
+        : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
+      CGM.setAtomicOpts(AO);
+    }
+    CGAtomicOptionsRAII(CodeGenModule &CGM_, AtomicOptionsOverride AOO)
+        : CGM(CGM_), SavedAtomicOpts(CGM.getAtomicOpts()) {
+      CGM.setAtomicOpts(AOO.applyOverrides(SavedAtomicOpts));
+    }
+    ~CGAtomicOptionsRAII() { CGM.setAtomicOpts(SavedAtomicOpts); }
+
+  private:
+    CodeGenModule &CGM;
+    AtomicOptions SavedAtomicOpts;
+  };
+
 public:
   /// ObjCEHValueStack - Stack of Objective-C exception values, used for
   /// rethrows.
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index eb8d3ceeeba4c0..9b1fa361bf0ecb 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -342,7 +342,8 @@ CodeGenModule::CodeGenModule(ASTContext &C,
       PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
       Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
       VMContext(M.getContext()), VTables(*this), StackHandler(diags),
-      SanitizerMD(new SanitizerMetadata(*this)) {
+      SanitizerMD(new SanitizerMetadata(*this)),
+      AtomicOpts(Target.getAtomicOpts()) {
 
   // Initialize the type cache.
   Types.reset(new CodeGenTypes(*this));
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index 0956296e2d5d86..c8dab554abb364 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -676,6 +676,8 @@ class CodeGenModule : public CodeGenTypeCache {
   std::optional<PointerAuthQualifier>
   computeVTPointerAuthentication(const CXXRecordDecl *ThisClass);
 
+  AtomicOptions AtomicOpts;
+
 public:
   CodeGenModule(ASTContext &C, IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
                 const HeaderSearchOptions &headersearchopts,
@@ -691,6 +693,12 @@ class CodeGenModule : public CodeGenTypeCache {
   /// Finalize LLVM code generation.
   void Release();
 
+  /// Get the current Atomic options.
+  AtomicOptions getAtomicOpts() { return AtomicOpts; }
+
+  /// Set the current Atomic options.
+  void setAtomicOpts(AtomicOptions AO) { AtomicOpts = AO; }
+
   /// Return true if we should emit location information for expressions.
   bool getExpressionLocationsEnabled() const;
 
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 788eac5f28231e..15fb38bb476f20 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -584,19 +584,20 @@ void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
     AtomicInst.setMetadata(llvm::LLVMContext::MD_noalias_addrspace, ASRange);
   }
 
-  if (!RMW || !CGF.getTarget().allowAMDGPUUnsafeFPAtomics())
+  if (!RMW)
     return;
 
-  // TODO: Introduce new, more controlled options that also work for integers,
-  // and deprecate allowAMDGPUUnsafeFPAtomics.
-  llvm::AtomicRMWInst::BinOp RMWOp = RMW->getOperation();
-  if (llvm::AtomicRMWInst::isFPOperation(RMWOp)) {
-    llvm::MDNode *Empty = llvm::MDNode::get(CGF.getLLVMContext(), {});
+  AtomicOptions AO = CGF.CGM.getAtomicOpts();
+  llvm::MDNode *Empty = llvm::MDNode::get(CGF.getLLVMContext(), {});
+  if (AO.getNoFineGrainedMemory())
     RMW->setMetadata("amdgpu.no.fine.grained.memory", Empty);
+  if (AO.getNoRemoteMemory())
+    RMW->setMetadata("amdgpu.no.remote.memory", Empty);
 
-    if (RMWOp == llvm::AtomicRMWInst::FAdd && RMW->getType()->isFloatTy())
-      RMW->setMetadata("amdgpu.ignore.denormal.mode", Empty);
-  }
+  if (AO.getIgnoreDenormalMode() &&
+      RMW->getOperation() == llvm::AtomicRMWInst::FAdd &&
+      RMW->getType()->isFloatTy())
+    RMW->setMetadata("amdgpu.ignore.denormal.mode", Empty);
 }
 
 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 589de953be5be1..59194dbf882bee 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -6032,6 +6032,32 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
 
   RenderFloatingPointOptions(TC, D, OFastEnabled, Args, CmdArgs, JA);
 
+  if (Arg *AtomicArg = Args.getLastArg(options::OPT_fatomic_EQ)) {
+    if (!AtomicArg->getNumValues()) {
+      D.Diag(clang::diag::warn_drv_empty_joined_argument)
+          << AtomicArg->getAsString(Args);
+    } else {
+      bool Valid = true;
+      std::set<StringRef> Keys;
+      for (StringRef Option : AtomicArg->getValues()) {
+        SmallVector<StringRef, 2> KeyValue;
+        Option.split(KeyValue, ":");
+        if (KeyValue.size() != 2 ||
+            (KeyValue[1] != "on" && KeyValue[1] != "off") ||
+            (KeyValue[0] != "no_fine_grained_memory" &&
+             KeyValue[0] != "no_remote_memory" &&
+             KeyValue[0] != "ignore_denormal_mode") ||
+            !Keys.insert(KeyValue[0]).second) {
+          Valid = false;
+          D.Diag(diag::err_drv_invalid_atomic_option) << Option;
+          break;
+        }
+      }
+      if (Valid)
+        CmdArgs.push_back(Args.MakeArgString(AtomicArg->getAsString(Args)));
+    }
+  }
+
   if (Arg *A = Args.getLastArg(options::OPT_fextend_args_EQ)) {
     const llvm::Triple::ArchType Arch = TC.getArch();
     if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index f136d5007e8a5f..cab63b8abbdc19 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -764,6 +764,73 @@ void Parser::ParseGNUAttributeArgs(
                            ScopeLoc, Form);
 }
 
+void Parser::ParseAtomicAttribute(
+    IdentifierInfo &AttrName, SourceLocation AttrNameLoc,
+    ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+    SourceLocation ScopeLoc, ParsedAttr::Form Form) {
+  BalancedDelimiterTracker T(*this, tok::l_paren);
+  if (T.expectAndConsume())
+    return;
+
+  SmallVector<IdentifierLoc *, 4> Identifiers;
+
+  // Parse first argument
+  bool HasNot = false;
+  if (Tok.is(tok::exclaim)) {
+    HasNot = true;
+    ConsumeToken();
+  }
+
+  if (Tok.isNot(tok::identifier)) {
+    Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+    SkipUntil(tok::r_paren, StopAtSemi);
+    return;
+  }
+
+  IdentifierLoc *IL = ParseIdentifierLoc();
+  if (HasNot) {
+    std::string Name = "!" + IL->Ident->getName().str();
+    IL->Ident = &Actions.getPreprocessor().getIdentifierTable().get(Name);
+  }
+  Identifiers.push_back(IL);
+
+  // Parse optional second and third arguments
+  while (TryConsumeToken(tok::comma)) {
+    HasNot = false;
+    if (Tok.is(tok::exclaim)) {
+      HasNot = true;
+      ConsumeToken();
+    }
+
+    if (Tok.isNot(tok::identifier)) {
+      Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+      SkipUntil(tok::r_paren, StopAtSemi);
+      return;
+    }
+
+    IL = ParseIdentifierLoc();
+    if (HasNot) {
+      std::string Name = "!" + IL->Ident->getName().str();
+      IL->Ident = &Actions.getPreprocessor().getIdentifierTable().get(Name);
+    }
+    Identifiers.push_back(IL);
+  }
+
+  if (T.consumeClose())
+    return;
+
+  if (EndLoc)
+    *EndLoc = T.getCloseLocation();
+
+  SmallVector<ArgsUnion, 4> Args;
+  for (auto *IdLoc : Identifiers) {
+    Args.push_back(IdLoc);
+  }
+
+  Attrs.addNew(&AttrName, SourceRange(AttrNameLoc, T.getCloseLocation()),
+               ScopeName, ScopeLoc, Args.data(), Args.size(), Form);
+}
+
 unsigned Parser::ParseClangAttributeArgs(
     IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
     ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
@@ -801,6 +868,10 @@ unsigned Parser::ParseClangAttributeArgs(
   case ParsedAttr::AT_CXXAssume:
     ParseCXXAssumeAttributeArg(Attrs, AttrName, AttrNameLoc, EndLoc, Form);
     break;
+  case ParsedAttr::AT_Atomic:
+    ParseAtomicAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+                         ScopeLoc, Form);
+    break;
   }
   return !Attrs.empty() ? Attrs.begin()->getNumArgs() : 0;
 }
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index 422d8abc1028aa..ab77e87ba4e60f 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -625,6 +625,47 @@ static Attr *handleHLSLControlFlowHint(Sema &S, Stmt *St, const ParsedAttr &A,
   return ::new (S.Context) HLSLControlFlowHintAttr(S.Context, A);
 }
 
+static Attr *handleAtomicAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+                              SourceRange Range) {
+  if (!isa<CompoundStmt>(St)) {
+    S.Diag(St->getBeginLoc(), diag::err_attribute_wrong_decl_type)
+        << A << "compound statement";
+    return nullptr;
+  }
+  AtomicAttr::NoRemoteMemoryTy NRM = AtomicAttr::NoRemoteMemoryUnset;
+  AtomicAttr::NoFineGrainedMemoryTy NFGM = AtomicAttr::NoFineGrainedMemoryUnset;
+  AtomicAttr::IgnoreDenormalModeTy ID = AtomicAttr::IgnoreDenormalModeUnset;
+  for (unsigned i = 0; i < A.getNumArgs(); ++i) {
+    IdentifierLoc *Arg = A.getArgAsIdent(i);
+    if (!Arg || !Arg->Ident) {
+      S.Diag(A.getLoc(), diag::err_attribute_argument_type)
+          << A << AANT_ArgumentIdentifier;
+      return nullptr;
+    }
+    StringRef ArgName = Arg->Ident->getName();
+    if (ArgName.starts_with("!no_remote_memory") ||
+        ArgName == "no_remote_memory") {
+      NRM = ArgName.starts_with("!") ? AtomicAttr::NoRemoteMemoryOff
+                                     : AtomicAttr::NoRemoteMemoryOn;
+    } else if (ArgName.starts_with("!no_fine_grained_memory") ||
+               ArgName == "no_fine_grained_memory") {
+      NFGM = ArgName.starts_with("!") ? AtomicAttr::NoFineGrainedMemoryOff
+                                      : AtomicAttr::NoFineGrainedMemoryOn;
+    } else if (ArgName.starts_with("!ignore_denormal_mode") ||
+               ArgName == "ignore_denormal_mode") {
+      ID = ArgName.starts_with("!") ? AtomicAttr::IgnoreDenormalModeOff
+                                    : AtomicAttr::IgnoreDenormalModeOn;
+    } else {
+      // Use the new diagnostic with the invalid argument name
+      S.Diag(Arg->Loc, diag::err_attribute_invalid_atomic_argument)
+          << ArgName << A;
+      return nullptr;
+    }
+  }
+  auto *AA = ::new (S.Context) AtomicAttr(S.Context, A, NRM, NFGM, ID);
+  AA->updateAtomicOptionsOverride();
+  return AA;
+}
 static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
                                   SourceRange Range) {
   if (A.isInvalid() || A.getKind() == ParsedAttr::IgnoredAttribute)
@@ -685,6 +726,8 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
     return handleNoConvergentAttr(S, St, A, Range);
   case ParsedAttr::AT_Annotate:
     return S.CreateAnnotationAttr(A);
+  case ParsedAttr::AT_Atomic:
+    return handleAtomicAttr(S, St, A, Range);
   default:
     if (Attr *AT = nullptr; A.getInfo().handleStmtAttribute(S, St, A, AT) !=
                             ParsedAttrInfo::NotHandled) {
diff --git a/clang/test/AST/ast-dump-atomic-options.hip b/clang/test/AST/ast-dump-atomic-options.hip
new file mode 100644
index 00000000000000..4d46036ce83216
--- /dev/null
+++ b/clang/test/AST/ast-dump-atomic-options.hip
@@ -0,0 +1,102 @@
+// RUN: %clang_cc1 -ast-dump %s | FileCheck %s
+// RUN: %clang_cc1 -ast-dump -fcuda-is-device %s | FileCheck %s
+// RUN: %clang_cc1 -ast-dump -fcuda-is-device %s \
+// RUN:   -fatomic=no_fine_grained_memory:off,no_remote_memory:on,ignore_denormal_mode:on \
+// RUN:   | FileCheck %s
+
+#include "Inputs/cuda.h"
+
+// CHECK-LABEL: FunctionDecl {{.*}} test_default
+// CHECK-NOT: AttributedStmt
+// CHECK-NOT: AtomicAttr
+// CHECK: CompoundStmt
+// CHECK-NEXT: `-AtomicExpr
+__device__ __host__ void test_default(float *a) {
+  __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+}
+
+// CHECK-LABEL: FunctionDecl {{.*}} test_one
+// CHECK: `-AttributedStmt
+// CHECK-NEXT: |-AtomicAttr {{.*}} NoRemoteMemoryOn NoFineGrainedMemoryUnset IgnoreDenormalModeUnset
+// CHECK-NEXT: `-CompoundStmt
+// CHECK-NEXT:   `-AtomicExpr
+__device__ __host__ void test_one(float *a) {
+  [[clang::atomic(no_remote_memory)]] {
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+// CHECK-LABEL: FunctionDecl {{.*}} test_two
+// CHECK: `-AttributedStmt
+// CHECK-NEXT: |-AtomicAttr {{.*}} NoRemoteMemoryOff NoFineGrainedMemoryUnset IgnoreDenormalModeOn
+// CHECK-NEXT: `-CompoundStmt
+// CHECK-NEXT:   `-AtomicExpr
+__device__ __host__ void test_two(float *a) {
+  [[clang::atomic(!no_remote_memory, ignore_denormal_mode)]] {
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+// CHECK-LABEL: FunctionDecl {{.*}} test_three
+// CHECK: `-AttributedStmt
+// CHECK-NEXT: |-AtomicAttr {{.*}} NoRemoteMemoryOn NoFineGrainedMemoryOff IgnoreDenormalModeOff
+// CHECK-NEXT: `-CompoundStmt
+// CHECK-NEXT:   `-AtomicExpr
+__device__ __host__ void test_three(float *a) {
+  [[clang::atomic(no_remote_memory, !no_fine_grained_memory, !ignore_denormal_mode)]] {
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+// CHECK-LABEL: FunctionDecl {{.*}} test_nested
+// CHECK: CompoundStmt
+// CHECK: |-AtomicExpr
+// CHECK: `-AttributedStmt
+// CHECK-NEXT: |-AtomicAttr {{.*}} NoRemoteMemoryOff NoFineGrainedMemoryOff IgnoreDenormalModeOff
+// CHECK-NEXT: `-CompoundStmt
+// CHECK:     |-AtomicExpr
+// CHECK:     |-AttributedStmt
+// CHECK-NEXT:     |-AtomicAttr {{.*}} NoRemoteMemoryOn NoFineGrainedMemoryUnset IgnoreDenormalModeUnset
+// CHECK-NEXT:     `-CompoundStmt
+// CHECK-NEXT:       `-AtomicExpr
+// CHECK:     `-AttributedStmt
+// CHECK-NEXT:       |-AtomicAttr {{.*}} NoRemoteMemoryUnset NoFineGrainedMemoryOn IgnoreDenormalModeUnset
+// CHECK-NEXT:       `-CompoundStmt
+// CHECK-NEXT:         `-AtomicExpr
+__device__ __host__ void test_nested(float *a) {
+  __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  [[clang::atomic(!no_remote_memory, !no_fine_grained_memory, !ignore_denormal_mode)]] {
+    __scoped_atomic_fetch_max(a, 2, __ATOMIC_SEQ_CST, __MEMORY_SCOPE_DEVICE);
+    [[clang::atomic(no_remote_memory)]] {
+      __scoped_atomic_fetch_min(a, 3, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_WRKGRP);
+    }
+    [[clang::atomic(no_fine_grained_memory)]] {
+      __scoped_atomic_fetch_sub(a, 4, __ATOMIC_RELEASE, __MEMORY_SCOPE_WVFRNT);
+    }
+  }
+}
+
+// CHECK-LABEL: FunctionTemplateDecl {{.*}} test_template
+// CHECK: |-FunctionDecl {{.*}} test_template 'void (T *)'
+// CHECK: | |-CompoundStmt
+// CHECK: | | `-AttributedStmt
+// CHECK: | |   |-AtomicAttr {{.*}} NoRemoteMemoryOn NoFineGrainedMemoryOff IgnoreDenormalModeOff
+// CHECK: | |   `-CompoundStmt
+// CHECK: | |     `-CallExpr {{.*}} '<dependent type>'
+// CHECK: `-FunctionDecl {{.*}} used test_template 'void (float *)' implicit_instantiation
+// CHECK:   |-CompoundStmt
+// CHECK:   | `-AttributedStmt
+// CHECK:   |   |-AtomicAttr {{.*}} NoRemoteMemoryOn NoFineGrainedMemoryOff IgnoreDenormalModeOff
+// CHECK:   |   `-CompoundStmt
+// CHECK:   |     `-AtomicExpr {{.*}} 'float'
+template<typename T>
+__device__ __host__ void test_template(T *a) {
+  [[clang::atomic(no_remote_memory, !no_fine_grained_memory, !ignore_denormal_mode)]] {
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+__device__ __host__ void test_template_caller() {
+  float *p;
+  test_template(p);
+}
diff --git a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
index a8fb989b64de50..d74470304c69e1 100644
--- a/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
+++ b/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c
@@ -7,7 +7,7 @@
 // SAFE-NEXT:  [[ENTRY:.*:]]
 // SAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
 // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr), float 1.000000e+00 seq_cst, align 4
+// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3:![0-9]+]], !amdgpu.no.remote.memory [[META3]]
 // SAFE-NEXT:    ret float [[TMP0]]
 //
 // UNSAFE-LABEL: define dso_local float @test_float_post_inc(
@@ -15,7 +15,7 @@
 // UNSAFE-NEXT:  [[ENTRY:.*:]]
 // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
 // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3:![0-9]+]], !amdgpu.ignore.denormal.mode [[META3]]
+// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3:![0-9]+]], !amdgpu.no.remote.memory [[META3]], !amdgpu.ignore.denormal.mode [[META3]]
 // UNSAFE-NEXT:    ret float [[TMP0]]
 //
 float test_float_post_inc()
@@ -24,21 +24,13 @@ float test_float_post_inc()
     return n++;
 }
 
-// SAFE-LABEL: define dso_local float @test_float_post_dc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_post_dc.n to ptr), float 1.000000e+00 seq_cst, align 4
-// SAFE-NEXT:    ret float [[TMP0]]
-//
-// UNSAFE-LABEL: define dso_local float @test_float_post_dc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_post_dc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    ret float [[TMP0]]
+// CHECK-LABEL: define dso_local float @test_float_post_dc(
+// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_post_dc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3:![0-9]+]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    ret float [[TMP0]]
 //
 float test_float_post_dc()
 {
@@ -46,23 +38,14 @@ float test_float_post_dc()
     return n--;
 }
 
-// SAFE-LABEL: define dso_local float @test_float_pre_dc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_pre_dc.n to ptr), float 1.000000e+00 seq_cst, align 4
-// SAFE-NEXT:    [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
-// SAFE-NEXT:    ret float [[TMP1]]
-//
-// UNSAFE-LABEL: define dso_local float @test_float_pre_dc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_pre_dc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
-// UNSAFE-NEXT:    ret float [[TMP1]]
+// CHECK-LABEL: define dso_local float @test_float_pre_dc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_pre_dc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
+// CHECK-NEXT:    ret float [[TMP1]]
 //
 float test_float_pre_dc()
 {
@@ -75,7 +58,7 @@ float test_float_pre_dc()
 // SAFE-NEXT:  [[ENTRY:.*:]]
 // SAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
 // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr), float 1.000000e+00 seq_cst, align 4
+// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
 // SAFE-NEXT:    [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
 // SAFE-NEXT:    ret float [[TMP1]]
 //
@@ -84,7 +67,7 @@ float test_float_pre_dc()
 // UNSAFE-NEXT:  [[ENTRY:.*:]]
 // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
 // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.ignore.denormal.mode [[META3]]
+// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]], !amdgpu.ignore.denormal.mode [[META3]]
 // UNSAFE-NEXT:    [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
 // UNSAFE-NEXT:    ret float [[TMP1]]
 //
@@ -94,21 +77,13 @@ float test_float_pre_inc()
     return ++n;
 }
 
-// SAFE-LABEL: define dso_local double @test_double_post_inc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_post_inc.n to ptr), double 1.000000e+00 seq_cst, align 8
-// SAFE-NEXT:    ret double [[TMP0]]
-//
-// UNSAFE-LABEL: define dso_local double @test_double_post_inc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_post_inc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    ret double [[TMP0]]
+// CHECK-LABEL: define dso_local double @test_double_post_inc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_post_inc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    ret double [[TMP0]]
 //
 double test_double_post_inc()
 {
@@ -116,21 +91,13 @@ double test_double_post_inc()
     return n++;
 }
 
-// SAFE-LABEL: define dso_local double @test_double_post_dc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_post_dc.n to ptr), double 1.000000e+00 seq_cst, align 8
-// SAFE-NEXT:    ret double [[TMP0]]
-//
-// UNSAFE-LABEL: define dso_local double @test_double_post_dc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_post_dc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    ret double [[TMP0]]
+// CHECK-LABEL: define dso_local double @test_double_post_dc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_post_dc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    ret double [[TMP0]]
 //
 double test_double_post_dc()
 {
@@ -138,23 +105,14 @@ double test_double_post_dc()
     return n--;
 }
 
-// SAFE-LABEL: define dso_local double @test_double_pre_dc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_pre_dc.n to ptr), double 1.000000e+00 seq_cst, align 8
-// SAFE-NEXT:    [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
-// SAFE-NEXT:    ret double [[TMP1]]
-//
-// UNSAFE-LABEL: define dso_local double @test_double_pre_dc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_pre_dc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
-// UNSAFE-NEXT:    ret double [[TMP1]]
+// CHECK-LABEL: define dso_local double @test_double_pre_dc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_pre_dc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
+// CHECK-NEXT:    ret double [[TMP1]]
 //
 double test_double_pre_dc()
 {
@@ -162,23 +120,14 @@ double test_double_pre_dc()
     return --n;
 }
 
-// SAFE-LABEL: define dso_local double @test_double_pre_inc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_pre_inc.n to ptr), double 1.000000e+00 seq_cst, align 8
-// SAFE-NEXT:    [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
-// SAFE-NEXT:    ret double [[TMP1]]
-//
-// UNSAFE-LABEL: define dso_local double @test_double_pre_inc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_pre_inc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
-// UNSAFE-NEXT:    ret double [[TMP1]]
+// CHECK-LABEL: define dso_local double @test_double_pre_inc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_pre_inc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
+// CHECK-NEXT:    ret double [[TMP1]]
 //
 double test_double_pre_inc()
 {
@@ -186,21 +135,13 @@ double test_double_pre_inc()
     return ++n;
 }
 
-// SAFE-LABEL: define dso_local half @test__Float16_post_inc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2
-// SAFE-NEXT:    ret half [[TMP0]]
-//
-// UNSAFE-LABEL: define dso_local half @test__Float16_post_inc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    ret half [[TMP0]]
+// CHECK-LABEL: define dso_local half @test__Float16_post_inc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    ret half [[TMP0]]
 //
 _Float16 test__Float16_post_inc()
 {
@@ -208,21 +149,13 @@ _Float16 test__Float16_post_inc()
     return n++;
 }
 
-// SAFE-LABEL: define dso_local half @test__Float16_post_dc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2
-// SAFE-NEXT:    ret half [[TMP0]]
-//
-// UNSAFE-LABEL: define dso_local half @test__Float16_post_dc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    ret half [[TMP0]]
+// CHECK-LABEL: define dso_local half @test__Float16_post_dc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    ret half [[TMP0]]
 //
 _Float16 test__Float16_post_dc()
 {
@@ -230,23 +163,14 @@ _Float16 test__Float16_post_dc()
     return n--;
 }
 
-// SAFE-LABEL: define dso_local half @test__Float16_pre_dc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_dc.n to ptr), half 0xH3C00 seq_cst, align 2
-// SAFE-NEXT:    [[TMP1:%.*]] = fsub half [[TMP0]], 0xH3C00
-// SAFE-NEXT:    ret half [[TMP1]]
-//
-// UNSAFE-LABEL: define dso_local half @test__Float16_pre_dc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    [[TMP1:%.*]] = fsub half [[TMP0]], 0xH3C00
-// UNSAFE-NEXT:    ret half [[TMP1]]
+// CHECK-LABEL: define dso_local half @test__Float16_pre_dc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = fsub half [[TMP0]], 0xH3C00
+// CHECK-NEXT:    ret half [[TMP1]]
 //
 _Float16 test__Float16_pre_dc()
 {
@@ -254,23 +178,14 @@ _Float16 test__Float16_pre_dc()
     return --n;
 }
 
-// SAFE-LABEL: define dso_local half @test__Float16_pre_inc(
-// SAFE-SAME: ) #[[ATTR0]] {
-// SAFE-NEXT:  [[ENTRY:.*:]]
-// SAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
-// SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_inc.n to ptr), half 0xH3C00 seq_cst, align 2
-// SAFE-NEXT:    [[TMP1:%.*]] = fadd half [[TMP0]], 0xH3C00
-// SAFE-NEXT:    ret half [[TMP1]]
-//
-// UNSAFE-LABEL: define dso_local half @test__Float16_pre_inc(
-// UNSAFE-SAME: ) #[[ATTR0]] {
-// UNSAFE-NEXT:  [[ENTRY:.*:]]
-// UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
-// UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
-// UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
-// UNSAFE-NEXT:    [[TMP1:%.*]] = fadd half [[TMP0]], 0xH3C00
-// UNSAFE-NEXT:    ret half [[TMP1]]
+// CHECK-LABEL: define dso_local half @test__Float16_pre_inc(
+// CHECK-SAME: ) #[[ATTR0]] {
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
+// CHECK-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.no.remote.memory [[META3]]
+// CHECK-NEXT:    [[TMP1:%.*]] = fadd half [[TMP0]], 0xH3C00
+// CHECK-NEXT:    ret half [[TMP1]]
 //
 _Float16 test__Float16_pre_inc()
 {
@@ -278,7 +193,7 @@ _Float16 test__Float16_pre_inc()
     return ++n;
 }
 //.
+// SAFE: [[META3]] = !{}
+//.
 // UNSAFE: [[META3]] = !{}
 //.
-//// NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-// CHECK: {{.*}}
diff --git a/clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu b/clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu
index 47fa3967fe2373..f578929d34659c 100644
--- a/clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu
+++ b/clang/test/CodeGenCUDA/amdgpu-atomic-ops.cu
@@ -1,19 +1,19 @@
 // RUN: %clang_cc1 -x hip %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
 // RUN:   -fcuda-is-device -target-cpu gfx906 -fnative-half-type \
-// RUN:   -fnative-half-arguments-and-returns | FileCheck -check-prefixes=CHECK,SAFEIR %s
+// RUN:   -fnative-half-arguments-and-returns | FileCheck -check-prefixes=FUN,CHECK,SAFEIR %s
 
 // RUN: %clang_cc1 -x hip %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
 // RUN:   -fcuda-is-device -target-cpu gfx906 -fnative-half-type \
-// RUN:   -fnative-half-arguments-and-returns -munsafe-fp-atomics | FileCheck -check-prefixes=CHECK,UNSAFEIR %s
+// RUN:   -fnative-half-arguments-and-returns -munsafe-fp-atomics | FileCheck -check-prefixes=FUN,CHECK,UNSAFEIR %s
 
 // RUN: %clang_cc1 -x hip %s -O3 -S -o - -triple=amdgcn-amd-amdhsa \
 // RUN:   -fcuda-is-device -target-cpu gfx1100 -fnative-half-type \
-// RUN:   -fnative-half-arguments-and-returns | FileCheck -check-prefix=SAFE %s
+// RUN:   -fnative-half-arguments-and-returns | FileCheck -check-prefixes=FUN,SAFE %s
 
 // RUN: %clang_cc1 -x hip %s -O3 -S -o - -triple=amdgcn-amd-amdhsa \
 // RUN:   -fcuda-is-device -target-cpu gfx940 -fnative-half-type \
 // RUN:   -fnative-half-arguments-and-returns -munsafe-fp-atomics \
-// RUN:   | FileCheck -check-prefix=UNSAFE %s
+// RUN:   | FileCheck -check-prefixes=FUN,UNSAFE %s
 
 // REQUIRES: amdgpu-registered-target
 
@@ -21,34 +21,32 @@
 #include <stdatomic.h>
 
 __global__ void ffp1(float *p) {
-  // CHECK-LABEL: @_Z4ffp1Pf
-  // SAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 4{{$}}
-  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 4{{$}}
-  // SAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 4{{$}}
-  // SAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 4{{$}}
-  // SAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE:[0-9]+]]{{$}}
-  // SAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-
-  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+}}, !amdgpu.ignore.denormal.mode !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 4, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE:[0-9]+]], !amdgpu.no.fine.grained.memory !{{[0-9]+}}, !amdgpu.ignore.denormal.mode !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-
-  // SAFE: _Z4ffp1Pf
-  // SAFE: global_atomic_cmpswap
-  // SAFE: global_atomic_cmpswap
-  // SAFE: global_atomic_cmpswap
-  // SAFE: global_atomic_cmpswap
-  // SAFE: global_atomic_cmpswap
+  // FUN-LABEL: @_Z4ffp1Pf
+  // SAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 4, [[DEFMD:!amdgpu.no.fine.grained.memory ![0-9]+, !amdgpu.no.remote.memory ![0-9]+$]]
+  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 4, [[DEFMD]]
+  // SAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 4, [[DEFMD]]
+  // SAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 4, [[DEFMD]]
+  // SAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE:[0-9]+]], [[DEFMD]]
+  // SAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+
+  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 4, [[FADDMD:!amdgpu.no.fine.grained.memory ![0-9]+, !amdgpu.no.remote.memory ![0-9]+, !amdgpu.ignore.denormal.mode ![0-9]+$]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 4, [[DEFMD:!amdgpu.no.fine.grained.memory ![0-9]+, !amdgpu.no.remote.memory ![0-9]+$]]
+  // UNSAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 4, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 4, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE:[0-9]+]], [[FADDMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+
+  // SAFE: global_atomic_add_f32
   // SAFE: global_atomic_cmpswap
+  // SAFE: global_atomic_max
+  // SAFE: global_atomic_min
+  // SAFE: global_atomic_max
+  // SAFE: global_atomic_min
 
-  // UNSAFE: _Z4ffp1Pf
   // UNSAFE: global_atomic_add_f32
   // UNSAFE: global_atomic_cmpswap
   // UNSAFE: global_atomic_cmpswap
@@ -68,26 +66,25 @@ __global__ void ffp1(float *p) {
 }
 
 __global__ void ffp2(double *p) {
-  // CHECK-LABEL: @_Z4ffp2Pd
-  // SAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 8{{$}}
-  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8{{$}}
-  // SAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 8{{$}}
-  // SAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 8{{$}}
-  // SAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-
-  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-
-  // SAFE-LABEL: @_Z4ffp2Pd
+  // FUN-LABEL: @_Z4ffp2Pd
+  // SAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // SAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // SAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // SAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+
+  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+
   // SAFE: global_atomic_cmpswap_b64
   // SAFE: global_atomic_cmpswap_b64
   // SAFE: global_atomic_cmpswap_b64
@@ -95,7 +92,6 @@ __global__ void ffp2(double *p) {
   // SAFE: global_atomic_cmpswap_b64
   // SAFE: global_atomic_cmpswap_b64
 
-  // UNSAFE-LABEL: @_Z4ffp2Pd
   // UNSAFE: global_atomic_add_f64
   // UNSAFE: global_atomic_cmpswap_x2
   // UNSAFE: global_atomic_max_f64
@@ -114,32 +110,31 @@ __global__ void ffp2(double *p) {
 
 // long double is the same as double for amdgcn.
 __global__ void ffp3(long double *p) {
-  // CHECK-LABEL: @_Z4ffp3Pe
-  // SAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 8{{$}}
-  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8{{$}}
-  // SAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 8{{$}}
-  // SAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 8{{$}}
-  // SAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-
-  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-
-  // SAFE-LABEL: @_Z4ffp3Pe
+  // FUN-LABEL: @_Z4ffp3Pe
+  // SAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // SAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // SAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // SAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+
+  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+
   // SAFE: global_atomic_cmpswap_b64
   // SAFE: global_atomic_cmpswap_b64
   // SAFE: global_atomic_cmpswap_b64
   // SAFE: global_atomic_cmpswap_b64
   // SAFE: global_atomic_cmpswap_b64
-  // UNSAFE-LABEL: @_Z4ffp3Pe
+  
   // UNSAFE: global_atomic_cmpswap_x2
   // UNSAFE: global_atomic_max_f64
   // UNSAFE: global_atomic_min_f64
@@ -156,48 +151,48 @@ __global__ void ffp3(long double *p) {
 }
 
 __device__ double ffp4(double *p, float f) {
-  // CHECK-LABEL: @_Z4ffp4Pdf
+  // FUN-LABEL: @_Z4ffp4Pdf
   // CHECK: fpext contract float {{.*}} to double
-  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8{{$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, [[DEFMD]]
 
-  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
   __atomic_fetch_sub(p, f, memory_order_relaxed);
   return __hip_atomic_fetch_sub(p, f, memory_order_relaxed, __HIP_MEMORY_SCOPE_AGENT);
 }
 
 __device__ double ffp5(double *p, int i) {
-  // CHECK-LABEL: @_Z4ffp5Pdi
+  // FUN-LABEL: @_Z4ffp5Pdi
   // CHECK: sitofp i32 {{.*}} to double
-  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8{{$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, [[DEFMD]]
   __atomic_fetch_sub(p, i, memory_order_relaxed);
 
-  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 8, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
   return __hip_atomic_fetch_sub(p, i, memory_order_relaxed, __HIP_MEMORY_SCOPE_AGENT);
 }
 
 __global__ void ffp6(_Float16 *p) {
-  // CHECK-LABEL: @_Z4ffp6PDF16
-  // SAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 2{{$}}
-  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 2{{$}}
-  // SAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 2{{$}}
-  // SAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 2{{$}}
-  // SAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-  // SAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]]{{$}}
-
-  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 2, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 2, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 2, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 2, !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
-  // UNSAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], !amdgpu.no.fine.grained.memory !{{[0-9]+$}}
+  // FUN-LABEL: @_Z4ffp6PDF16
+  // SAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 2, [[DEFMD]]
+  // SAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 2, [[DEFMD]]
+  // SAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 2, [[DEFMD]]
+  // SAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 2, [[DEFMD]]
+  // SAFEIR: atomicrmw fadd ptr {{.*}} syncscope("agent") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fsub ptr {{.*}} syncscope("workgroup") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // SAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+
+  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 2, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 2, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmax ptr {{.*}} monotonic, align 2, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmin ptr {{.*}} monotonic, align 2, [[DEFMD]]
+  // UNSAFEIR: atomicrmw fadd ptr {{.*}} monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fsub ptr {{.*}} monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmax ptr {{.*}} syncscope("agent") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
+  // UNSAFEIR: atomicrmw fmin ptr {{.*}} syncscope("workgroup") monotonic, align 2, !noalias.addrspace ![[$NO_PRIVATE]], [[DEFMD]]
 
   // SAFE: _Z4ffp6PDF16
   // SAFE: global_atomic_cmpswap
diff --git a/clang/test/CodeGenCUDA/atomic-ops.cu b/clang/test/CodeGenCUDA/atomic-ops.cu
index d8489b438015d0..a41e6a6fb2dc72 100644
--- a/clang/test/CodeGenCUDA/atomic-ops.cu
+++ b/clang/test/CodeGenCUDA/atomic-ops.cu
@@ -4,14 +4,14 @@
 // CHECK-LABEL: @_Z24atomic32_op_singlethreadPiii
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK:[0-9]+]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD:!amdgpu.no.fine.grained.memory ![0-9]+, !amdgpu.no.remote.memory ![0-9]+$]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: load atomic i32, ptr {{%[0-9]+}} syncscope("singlethread") monotonic, align 4{{$}}
 // CHECK: store atomic i32 %{{.*}}, ptr %{{.*}} syncscope("singlethread") monotonic, align 4{{$}}
 __device__ int atomic32_op_singlethread(int *ptr, int val, int desired) {
@@ -31,8 +31,8 @@ __device__ int atomic32_op_singlethread(int *ptr, int val, int desired) {
 }
 
 // CHECK-LABEL: @_Z25atomicu32_op_singlethreadPjjj
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("singlethread") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 __device__ unsigned int atomicu32_op_singlethread(unsigned int *ptr, unsigned int val, unsigned int desired) {
   val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
   val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
@@ -42,14 +42,14 @@ __device__ unsigned int atomicu32_op_singlethread(unsigned int *ptr, unsigned in
 // CHECK-LABEL: @_Z21atomic32_op_wavefrontPiii
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: load atomic i32, ptr {{%[0-9]+}} syncscope("wavefront") monotonic, align 4{{$}}
 // CHECK: store atomic i32 %{{.*}}, ptr %{{.*}} syncscope("wavefront") monotonic, align 4{{$}}
 __device__ int atomic32_op_wavefront(int *ptr, int val, int desired) {
@@ -69,8 +69,8 @@ __device__ int atomic32_op_wavefront(int *ptr, int val, int desired) {
 }
 
 // CHECK-LABEL: @_Z22atomicu32_op_wavefrontPjjj
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("wavefront") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 __device__ unsigned int atomicu32_op_wavefront(unsigned int *ptr, unsigned int val, unsigned int desired) {
   val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
   val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WAVEFRONT);
@@ -80,14 +80,14 @@ __device__ unsigned int atomicu32_op_wavefront(unsigned int *ptr, unsigned int v
 // CHECK-LABEL: @_Z21atomic32_op_workgroupPiii
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: store atomic i32 %{{.*}}, ptr %{{.*}} syncscope("workgroup") monotonic, align 4{{$}}
 __device__ int atomic32_op_workgroup(int *ptr, int val, int desired) {
   bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
@@ -105,8 +105,8 @@ __device__ int atomic32_op_workgroup(int *ptr, int val, int desired) {
 }
 
 // CHECK-LABEL: @_Z22atomicu32_op_workgroupPjjj
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("workgroup") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 __device__ unsigned int atomicu32_op_workgroup(unsigned int *ptr, unsigned int val, unsigned int desired) {
   val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
   val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
@@ -116,14 +116,14 @@ __device__ unsigned int atomicu32_op_workgroup(unsigned int *ptr, unsigned int v
 // CHECK-LABEL: @_Z17atomic32_op_agentPiii
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: store atomic i32 %{{.*}}, ptr %{{.*}} syncscope("agent") monotonic, align 4{{$}}
 __device__ int atomic32_op_agent(int *ptr, int val, int desired) {
   bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
@@ -141,8 +141,8 @@ __device__ int atomic32_op_agent(int *ptr, int val, int desired) {
 }
 
 // CHECK-LABEL: @_Z18atomicu32_op_agentPjjj
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} syncscope("agent") monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 __device__ unsigned int atomicu32_op_agent(unsigned int *ptr, unsigned int val, unsigned int desired) {
   val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
   val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
@@ -152,14 +152,14 @@ __device__ unsigned int atomicu32_op_agent(unsigned int *ptr, unsigned int val,
 // CHECK-LABEL: @_Z18atomic32_op_systemPiii
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i32 {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: load i32, ptr %{{.*}}, align 4{{$}}
 // CHECK: store atomic i32 %{{.*}}, ptr %{{.*}} monotonic, align 4{{$}}
 __device__ int atomic32_op_system(int *ptr, int val, int desired) {
@@ -179,8 +179,8 @@ __device__ int atomic32_op_system(int *ptr, int val, int desired) {
 }
 
 // CHECK-LABEL: @_Z19atomicu32_op_systemPjjj
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i32 {{%[0-9]+}} monotonic, align 4, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 __device__ unsigned int atomicu32_op_system(unsigned int *ptr, unsigned int val, unsigned int desired) {
   val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
   val = __hip_atomic_fetch_max(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SYSTEM);
@@ -190,14 +190,14 @@ __device__ unsigned int atomicu32_op_system(unsigned int *ptr, unsigned int val,
 // CHECK-LABEL: @_Z24atomic64_op_singlethreadPxS_xx
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} syncscope("singlethread") monotonic, align 8{{$}}
 __device__ long long atomic64_op_singlethread(long long *ptr, long long *ptr2, long long val, long long desired) {
   bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_SINGLETHREAD);
@@ -215,8 +215,8 @@ __device__ long long atomic64_op_singlethread(long long *ptr, long long *ptr2, l
 }
 
 // CHECK-LABEL: @_Z25atomicu64_op_singlethreadPyS_yy
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("singlethread") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: load atomic i64, ptr %{{.*}} syncscope("singlethread") monotonic, align 8{{$}}
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} syncscope("singlethread") monotonic, align 8{{$}}
 __device__ unsigned long long atomicu64_op_singlethread(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
@@ -230,14 +230,14 @@ __device__ unsigned long long atomicu64_op_singlethread(unsigned long long *ptr,
 // CHECK-LABEL: @_Z21atomic64_op_wavefrontPxS_xx
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: load atomic i64, ptr {{%[0-9]+}} syncscope("wavefront") monotonic, align 8{{$}}
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} syncscope("wavefront") monotonic, align 8{{$}}
 __device__ long long atomic64_op_wavefront(long long *ptr, long long *ptr2, long long val, long long desired) {
@@ -257,8 +257,8 @@ __device__ long long atomic64_op_wavefront(long long *ptr, long long *ptr2, long
 }
 
 // CHECK-LABEL: @_Z22atomicu64_op_wavefrontPyS_yy
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("wavefront") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: load atomic i64, ptr {{%[0-9]+}} syncscope("wavefront") monotonic, align 8{{$}}
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} syncscope("wavefront") monotonic, align 8{{$}}
 __device__ unsigned long long atomicu64_op_wavefront(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
@@ -272,14 +272,14 @@ __device__ unsigned long long atomicu64_op_wavefront(unsigned long long *ptr, un
 // CHECK-LABEL: @_Z21atomic64_op_workgroupPxS_xx
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} syncscope("workgroup") monotonic, align 8{{$}}
 __device__ long long atomic64_op_workgroup(long long *ptr, long long *ptr2, long long val, long long desired) {
   bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
@@ -297,8 +297,8 @@ __device__ long long atomic64_op_workgroup(long long *ptr, long long *ptr2, long
 }
 
 // CHECK-LABEL: @_Z22atomicu64_op_workgroupPyS_yy
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("workgroup") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} syncscope("workgroup") monotonic, align 8{{$}}
 __device__ unsigned long long atomicu64_op_workgroup(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
   val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_WORKGROUP);
@@ -310,14 +310,14 @@ __device__ unsigned long long atomicu64_op_workgroup(unsigned long long *ptr, un
 // CHECK-LABEL: @_Z17atomic64_op_agentPxS_xx
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} syncscope("agent") monotonic, align 8{{$}}
 __device__ long long atomic64_op_agent(long long *ptr, long long *ptr2, long long val, long long desired) {
   bool flag = __hip_atomic_compare_exchange_strong(ptr, &val, desired, __ATOMIC_RELAXED, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
@@ -335,8 +335,8 @@ __device__ long long atomic64_op_agent(long long *ptr, long long *ptr2, long lon
 }
 
 // CHECK-LABEL: @_Z18atomicu64_op_agentPyS_yy
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} syncscope("agent") monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} syncscope("agent") monotonic, align 8{{$}}
 __device__ unsigned long long atomicu64_op_agent(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
   val = __hip_atomic_fetch_min(ptr, val, __ATOMIC_RELAXED, __HIP_MEMORY_SCOPE_AGENT);
@@ -348,14 +348,14 @@ __device__ unsigned long long atomicu64_op_agent(unsigned long long *ptr, unsign
 // CHECK-LABEL: @_Z18atomic64_op_systemPxS_xx
 // CHECK: cmpxchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
 // CHECK: cmpxchg weak ptr {{%[0-9]+}}, i64 {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw xchg ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw add ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw sub ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw and ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw or ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw xor ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw min ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw max ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: load i64, ptr %{{.*}}, align 8
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} monotonic, align 8{{$}}
 __device__ long long atomic64_op_system(long long *ptr, long long *ptr2, long long val, long long desired) {
@@ -375,8 +375,8 @@ __device__ long long atomic64_op_system(long long *ptr, long long *ptr2, long lo
 }
 
 // CHECK-LABEL: @_Z19atomicu64_op_systemPyS_yy
-// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
-// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]]{{$}}
+// CHECK: atomicrmw umin ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
+// CHECK: atomicrmw umax ptr {{%[0-9]+}}, i64 {{%[0-9]+}} monotonic, align 8, !noalias.addrspace ![[$NOALIAS_ADDRSPACE_STACK]], [[$DEFMD]]
 // CHECK: load i64, ptr %{{.*}}, align 8
 // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} monotonic, align 8{{$}}
 __device__ unsigned long long atomicu64_op_system(unsigned long long *ptr, unsigned long long *ptr2, unsigned long long val, unsigned long long desired) {
diff --git a/clang/test/CodeGenCUDA/atomic-options.hip b/clang/test/CodeGenCUDA/atomic-options.hip
new file mode 100644
index 00000000000000..08ac11714f7bfb
--- /dev/null
+++ b/clang/test/CodeGenCUDA/atomic-options.hip
@@ -0,0 +1,456 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
+// RUN:   -emit-llvm -o - %s | FileCheck --check-prefix=HOST %s
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa \
+// RUN:   -emit-llvm -o - -fcuda-is-device %s | FileCheck --check-prefix=DEV %s
+// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa \
+// RUN:   -fatomic=no_fine_grained_memory:off,no_remote_memory:on,ignore_denormal_mode:on \
+// RUN:   -emit-llvm -o - -fcuda-is-device %s | FileCheck --check-prefix=OPT %s
+
+#include "Inputs/cuda.h"
+
+// HOST-LABEL: define dso_local void @_Z12test_defaultPf(
+// HOST-SAME: ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+// HOST-NEXT:  [[ENTRY:.*:]]
+// HOST-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// HOST-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// HOST-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4
+// HOST-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    ret void
+//
+// DEV-LABEL: define dso_local void @_Z12test_defaultPf(
+// DEV-SAME: ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+// DEV-NEXT:  [[ENTRY:.*:]]
+// DEV-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// DEV-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4:![0-9]+]], !amdgpu.no.remote.memory [[META4]]
+// DEV-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    ret void
+//
+// OPT-LABEL: define dso_local void @_Z12test_defaultPf(
+// OPT-SAME: ptr noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+// OPT-NEXT:  [[ENTRY:.*:]]
+// OPT-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// OPT-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.remote.memory [[META4:![0-9]+]], !amdgpu.ignore.denormal.mode [[META4]]
+// OPT-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    ret void
+//
+__device__ __host__ void test_default(float *a) {
+  __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+}
+
+// HOST-LABEL: define dso_local void @_Z8test_onePf(
+// HOST-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// HOST-NEXT:  [[ENTRY:.*:]]
+// HOST-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// HOST-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// HOST-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4
+// HOST-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    ret void
+//
+// DEV-LABEL: define dso_local void @_Z8test_onePf(
+// DEV-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// DEV-NEXT:  [[ENTRY:.*:]]
+// DEV-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// DEV-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.no.remote.memory [[META4]]
+// DEV-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    ret void
+//
+// OPT-LABEL: define dso_local void @_Z8test_onePf(
+// OPT-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// OPT-NEXT:  [[ENTRY:.*:]]
+// OPT-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// OPT-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.remote.memory [[META4]], !amdgpu.ignore.denormal.mode [[META4]]
+// OPT-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    ret void
+//
+__device__ __host__ void test_one(float *a) {
+  [[clang::atomic(no_remote_memory)]] {
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+// HOST-LABEL: define dso_local void @_Z8test_twoPf(
+// HOST-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// HOST-NEXT:  [[ENTRY:.*:]]
+// HOST-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// HOST-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// HOST-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4
+// HOST-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    ret void
+//
+// DEV-LABEL: define dso_local void @_Z8test_twoPf(
+// DEV-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// DEV-NEXT:  [[ENTRY:.*:]]
+// DEV-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// DEV-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.ignore.denormal.mode [[META4]]
+// DEV-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    ret void
+//
+// OPT-LABEL: define dso_local void @_Z8test_twoPf(
+// OPT-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// OPT-NEXT:  [[ENTRY:.*:]]
+// OPT-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// OPT-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.ignore.denormal.mode [[META4]]
+// OPT-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    ret void
+//
+__device__ __host__ void test_two(float *a) {
+  [[clang::atomic(!no_remote_memory, ignore_denormal_mode)]] {
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+// HOST-LABEL: define dso_local void @_Z10test_threePf(
+// HOST-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// HOST-NEXT:  [[ENTRY:.*:]]
+// HOST-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// HOST-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// HOST-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4
+// HOST-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    ret void
+//
+// DEV-LABEL: define dso_local void @_Z10test_threePf(
+// DEV-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// DEV-NEXT:  [[ENTRY:.*:]]
+// DEV-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// DEV-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.remote.memory [[META4]]
+// DEV-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    ret void
+//
+// OPT-LABEL: define dso_local void @_Z10test_threePf(
+// OPT-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// OPT-NEXT:  [[ENTRY:.*:]]
+// OPT-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// OPT-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.remote.memory [[META4]]
+// OPT-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    ret void
+//
+__device__ __host__ void test_three(float *a) {
+  [[clang::atomic(no_remote_memory, !no_fine_grained_memory, !ignore_denormal_mode)]] {
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+// HOST-LABEL: define dso_local void @_Z11test_nestedPf(
+// HOST-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// HOST-NEXT:  [[ENTRY:.*:]]
+// HOST-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// HOST-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[DOTATOMICTMP1:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP2:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[DOTATOMICTMP3:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP4:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[DOTATOMICTMP5:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP6:%.*]] = alloca float, align 4
+// HOST-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// HOST-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4
+// HOST-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 2.000000e+00, ptr [[DOTATOMICTMP1]], align 4
+// HOST-NEXT:    [[TMP5:%.*]] = load float, ptr [[DOTATOMICTMP1]], align 4
+// HOST-NEXT:    [[TMP6:%.*]] = atomicrmw fmax ptr [[TMP4]], float [[TMP5]] seq_cst, align 4
+// HOST-NEXT:    store float [[TMP6]], ptr [[ATOMIC_TEMP2]], align 4
+// HOST-NEXT:    [[TMP7:%.*]] = load float, ptr [[ATOMIC_TEMP2]], align 4
+// HOST-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 3.000000e+00, ptr [[DOTATOMICTMP3]], align 4
+// HOST-NEXT:    [[TMP9:%.*]] = load float, ptr [[DOTATOMICTMP3]], align 4
+// HOST-NEXT:    [[TMP10:%.*]] = atomicrmw fmin ptr [[TMP8]], float [[TMP9]] acquire, align 4
+// HOST-NEXT:    store float [[TMP10]], ptr [[ATOMIC_TEMP4]], align 4
+// HOST-NEXT:    [[TMP11:%.*]] = load float, ptr [[ATOMIC_TEMP4]], align 4
+// HOST-NEXT:    [[TMP12:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 4.000000e+00, ptr [[DOTATOMICTMP5]], align 4
+// HOST-NEXT:    [[TMP13:%.*]] = load float, ptr [[DOTATOMICTMP5]], align 4
+// HOST-NEXT:    [[TMP14:%.*]] = atomicrmw fsub ptr [[TMP12]], float [[TMP13]] release, align 4
+// HOST-NEXT:    store float [[TMP14]], ptr [[ATOMIC_TEMP6]], align 4
+// HOST-NEXT:    [[TMP15:%.*]] = load float, ptr [[ATOMIC_TEMP6]], align 4
+// HOST-NEXT:    ret void
+//
+// DEV-LABEL: define dso_local void @_Z11test_nestedPf(
+// DEV-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// DEV-NEXT:  [[ENTRY:.*:]]
+// DEV-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP1:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP2:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP3:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP4:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP5:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP6:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP1]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP2]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP3_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP3]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP4_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP4]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP5_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP5]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP6_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP6]] to ptr
+// DEV-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4]], !amdgpu.no.remote.memory [[META4]]
+// DEV-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 2.000000e+00, ptr [[DOTATOMICTMP1_ASCAST]], align 4
+// DEV-NEXT:    [[TMP5:%.*]] = load float, ptr [[DOTATOMICTMP1_ASCAST]], align 4
+// DEV-NEXT:    [[TMP6:%.*]] = atomicrmw fmax ptr [[TMP4]], float [[TMP5]] syncscope("agent") seq_cst, align 4
+// DEV-NEXT:    store float [[TMP6]], ptr [[ATOMIC_TEMP2_ASCAST]], align 4
+// DEV-NEXT:    [[TMP7:%.*]] = load float, ptr [[ATOMIC_TEMP2_ASCAST]], align 4
+// DEV-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 3.000000e+00, ptr [[DOTATOMICTMP3_ASCAST]], align 4
+// DEV-NEXT:    [[TMP9:%.*]] = load float, ptr [[DOTATOMICTMP3_ASCAST]], align 4
+// DEV-NEXT:    [[TMP10:%.*]] = atomicrmw fmin ptr [[TMP8]], float [[TMP9]] syncscope("workgroup") acquire, align 4, !amdgpu.no.remote.memory [[META4]]
+// DEV-NEXT:    store float [[TMP10]], ptr [[ATOMIC_TEMP4_ASCAST]], align 4
+// DEV-NEXT:    [[TMP11:%.*]] = load float, ptr [[ATOMIC_TEMP4_ASCAST]], align 4
+// DEV-NEXT:    [[TMP12:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 4.000000e+00, ptr [[DOTATOMICTMP5_ASCAST]], align 4
+// DEV-NEXT:    [[TMP13:%.*]] = load float, ptr [[DOTATOMICTMP5_ASCAST]], align 4
+// DEV-NEXT:    [[TMP14:%.*]] = atomicrmw fsub ptr [[TMP12]], float [[TMP13]] syncscope("wavefront") release, align 4, !amdgpu.no.fine.grained.memory [[META4]]
+// DEV-NEXT:    store float [[TMP14]], ptr [[ATOMIC_TEMP6_ASCAST]], align 4
+// DEV-NEXT:    [[TMP15:%.*]] = load float, ptr [[ATOMIC_TEMP6_ASCAST]], align 4
+// DEV-NEXT:    ret void
+//
+// OPT-LABEL: define dso_local void @_Z11test_nestedPf(
+// OPT-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] {
+// OPT-NEXT:  [[ENTRY:.*:]]
+// OPT-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP1:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP2:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP3:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP4:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP5:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP6:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP1]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP2]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP3_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP3]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP4_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP4]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP5_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP5]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP6_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP6]] to ptr
+// OPT-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.remote.memory [[META4]], !amdgpu.ignore.denormal.mode [[META4]]
+// OPT-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 2.000000e+00, ptr [[DOTATOMICTMP1_ASCAST]], align 4
+// OPT-NEXT:    [[TMP5:%.*]] = load float, ptr [[DOTATOMICTMP1_ASCAST]], align 4
+// OPT-NEXT:    [[TMP6:%.*]] = atomicrmw fmax ptr [[TMP4]], float [[TMP5]] syncscope("agent") seq_cst, align 4
+// OPT-NEXT:    store float [[TMP6]], ptr [[ATOMIC_TEMP2_ASCAST]], align 4
+// OPT-NEXT:    [[TMP7:%.*]] = load float, ptr [[ATOMIC_TEMP2_ASCAST]], align 4
+// OPT-NEXT:    [[TMP8:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 3.000000e+00, ptr [[DOTATOMICTMP3_ASCAST]], align 4
+// OPT-NEXT:    [[TMP9:%.*]] = load float, ptr [[DOTATOMICTMP3_ASCAST]], align 4
+// OPT-NEXT:    [[TMP10:%.*]] = atomicrmw fmin ptr [[TMP8]], float [[TMP9]] syncscope("workgroup") acquire, align 4, !amdgpu.no.remote.memory [[META4]]
+// OPT-NEXT:    store float [[TMP10]], ptr [[ATOMIC_TEMP4_ASCAST]], align 4
+// OPT-NEXT:    [[TMP11:%.*]] = load float, ptr [[ATOMIC_TEMP4_ASCAST]], align 4
+// OPT-NEXT:    [[TMP12:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 4.000000e+00, ptr [[DOTATOMICTMP5_ASCAST]], align 4
+// OPT-NEXT:    [[TMP13:%.*]] = load float, ptr [[DOTATOMICTMP5_ASCAST]], align 4
+// OPT-NEXT:    [[TMP14:%.*]] = atomicrmw fsub ptr [[TMP12]], float [[TMP13]] syncscope("wavefront") release, align 4, !amdgpu.no.fine.grained.memory [[META4]]
+// OPT-NEXT:    store float [[TMP14]], ptr [[ATOMIC_TEMP6_ASCAST]], align 4
+// OPT-NEXT:    [[TMP15:%.*]] = load float, ptr [[ATOMIC_TEMP6_ASCAST]], align 4
+// OPT-NEXT:    ret void
+//
+__device__ __host__ void test_nested(float *a) {
+  __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  {
+    [[clang::atomic(!no_remote_memory, !no_fine_grained_memory, !ignore_denormal_mode)]] {
+      __scoped_atomic_fetch_max(a, 2, __ATOMIC_SEQ_CST, __MEMORY_SCOPE_DEVICE);
+      {
+        [[clang::atomic(no_remote_memory)]] {
+          __scoped_atomic_fetch_min(a, 3, __ATOMIC_ACQUIRE, __MEMORY_SCOPE_WRKGRP);
+        }
+      }
+      {
+        [[clang::atomic(no_fine_grained_memory)]] {
+          __scoped_atomic_fetch_sub(a, 4, __ATOMIC_RELEASE, __MEMORY_SCOPE_WVFRNT);
+        }
+      }
+    }
+  }
+}
+
+//
+// HOST-LABEL: define weak_odr void @_Z13test_templateIfEvPT_(
+// HOST-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] comdat {
+// HOST-NEXT:  [[ENTRY:.*:]]
+// HOST-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8
+// HOST-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4
+// HOST-NEXT:    store ptr [[A]], ptr [[A_ADDR]], align 8
+// HOST-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
+// HOST-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP]], align 4
+// HOST-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4
+// HOST-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP]], align 4
+// HOST-NEXT:    ret void
+//
+// DEV-LABEL: define internal void @_Z13test_templateIfEvPT_(
+// DEV-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] comdat {
+// DEV-NEXT:  [[ENTRY:.*:]]
+// DEV-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// DEV-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// DEV-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// DEV-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// DEV-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// DEV-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// DEV-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.remote.memory [[META4]]
+// DEV-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// DEV-NEXT:    ret void
+//
+// OPT-LABEL: define internal void @_Z13test_templateIfEvPT_(
+// OPT-SAME: ptr noundef [[A:%.*]]) #[[ATTR0]] comdat {
+// OPT-NEXT:  [[ENTRY:.*:]]
+// OPT-NEXT:    [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
+// OPT-NEXT:    [[DOTATOMICTMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca float, align 4, addrspace(5)
+// OPT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// OPT-NEXT:    [[DOTATOMICTMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTATOMICTMP]] to ptr
+// OPT-NEXT:    [[ATOMIC_TEMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ATOMIC_TEMP]] to ptr
+// OPT-NEXT:    store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
+// OPT-NEXT:    store float 1.000000e+00, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP1:%.*]] = load float, ptr [[DOTATOMICTMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], float [[TMP1]] monotonic, align 4, !amdgpu.no.remote.memory [[META4]], !amdgpu.ignore.denormal.mode [[META4]]
+// OPT-NEXT:    store float [[TMP2]], ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    [[TMP3:%.*]] = load float, ptr [[ATOMIC_TEMP_ASCAST]], align 4
+// OPT-NEXT:    ret void
+//
+template<typename T> __device__ __host__ void test_template(T *a) {
+  [[clang::atomic(no_remote_memory, !no_fine_grained_memory)]] {
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+template __device__ __host__ void test_template<float>(float *a);
+
+//.
+// DEV: [[META4]] = !{}
+//.
+// OPT: [[META4]] = !{}
+//.
diff --git a/clang/test/CodeGenOpenCL/atomic-ops.cl b/clang/test/CodeGenOpenCL/atomic-ops.cl
index 1d850261e5e813..214b3a4314222d 100644
--- a/clang/test/CodeGenOpenCL/atomic-ops.cl
+++ b/clang/test/CodeGenOpenCL/atomic-ops.cl
@@ -70,19 +70,19 @@ void test_addr(global atomic_int *ig, private atomic_int *ip, local atomic_int *
 
 void fi3(atomic_int *i, atomic_uint *ui) {
   // CHECK-LABEL: @fi3
-  // CHECK: atomicrmw and ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE:![0-9]+]]{{$}}
+  // CHECK: atomicrmw and ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE:![0-9]+]], [[$DEFMD:!amdgpu.no.fine.grained.memory ![0-9]+, !amdgpu.no.remote.memory ![0-9]+$]]
   int x = __opencl_atomic_fetch_and(i, 1, memory_order_seq_cst, memory_scope_work_group);
 
-  // CHECK: atomicrmw min ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]]{{$}}
+  // CHECK: atomicrmw min ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]], [[$DEFMD]]
   x = __opencl_atomic_fetch_min(i, 1, memory_order_seq_cst, memory_scope_work_group);
 
-  // CHECK: atomicrmw max ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]]{{$}}
+  // CHECK: atomicrmw max ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]], [[$DEFMD]]
   x = __opencl_atomic_fetch_max(i, 1, memory_order_seq_cst, memory_scope_work_group);
 
-  // CHECK: atomicrmw umin ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]]{{$}}
+  // CHECK: atomicrmw umin ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]], [[$DEFMD]]
   x = __opencl_atomic_fetch_min(ui, 1, memory_order_seq_cst, memory_scope_work_group);
 
-  // CHECK: atomicrmw umax ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]]{{$}}
+  // CHECK: atomicrmw umax ptr %{{[.0-9A-Z_a-z]+}}, i32 %{{[.0-9A-Z_a-z]+}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]], [[$DEFMD]]
   x = __opencl_atomic_fetch_max(ui, 1, memory_order_seq_cst, memory_scope_work_group);
 }
 
@@ -186,31 +186,31 @@ void ff2(atomic_float *d) {
 
 float ff3(atomic_float *d) {
   // CHECK-LABEL: @ff3
-  // CHECK: atomicrmw xchg ptr {{.*}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]]{{$}}
+  // CHECK: atomicrmw xchg ptr {{.*}} syncscope("workgroup") seq_cst, align 4, !noalias.addrspace [[$NOPRIVATE]], [[$DEFMD]]
   return __opencl_atomic_exchange(d, 2, memory_order_seq_cst, memory_scope_work_group);
 }
 
 float ff4(global atomic_float *d, float a) {
   // CHECK-LABEL: @ff4
-  // CHECK: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("workgroup-one-as") monotonic, align 4{{$}}
+  // CHECK: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("workgroup-one-as") monotonic, align 4, [[$DEFMD]]
   return __opencl_atomic_fetch_add(d, a, memory_order_relaxed, memory_scope_work_group);
 }
 
 float ff5(global atomic_double *d, double a) {
   // CHECK-LABEL: @ff5
-  // CHECK: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("workgroup-one-as") monotonic, align 8{{$}}
+  // CHECK: atomicrmw fadd ptr addrspace(1) {{.*}} syncscope("workgroup-one-as") monotonic, align 8, [[$DEFMD]]
   return __opencl_atomic_fetch_add(d, a, memory_order_relaxed, memory_scope_work_group);
 }
 
 float ff4_generic(atomic_float *d, float a) {
   // CHECK-LABEL: @ff4_generic
-  // CHECK: atomicrmw fadd ptr {{.*}} syncscope("workgroup-one-as") monotonic, align 4, !noalias.addrspace [[$NOPRIVATE]]{{$}}
+  // CHECK: atomicrmw fadd ptr {{.*}} syncscope("workgroup-one-as") monotonic, align 4, !noalias.addrspace [[$NOPRIVATE]], [[$DEFMD]]
   return __opencl_atomic_fetch_add(d, a, memory_order_relaxed, memory_scope_work_group);
 }
 
 float ff5_generic(atomic_double *d, double a) {
   // CHECK-LABEL: @ff5_generic
-  // CHECK: atomicrmw fadd ptr {{.*}} syncscope("workgroup-one-as") monotonic, align 8, !noalias.addrspace [[$NOPRIVATE]]{{$}}
+  // CHECK: atomicrmw fadd ptr {{.*}} syncscope("workgroup-one-as") monotonic, align 8, !noalias.addrspace [[$NOPRIVATE]], [[$DEFMD]]
   return __opencl_atomic_fetch_add(d, a, memory_order_relaxed, memory_scope_work_group);
 }
 
diff --git a/clang/test/Driver/atomic-options.hip b/clang/test/Driver/atomic-options.hip
new file mode 100644
index 00000000000000..a79818f0484b7a
--- /dev/null
+++ b/clang/test/Driver/atomic-options.hip
@@ -0,0 +1,31 @@
+// RUN: %clang -### -nogpulib -nogpuinc %s \
+// RUN:   -fatomic=no_fine_grained_memory:off,no_remote_memory:on,ignore_denormal_mode:on \
+// RUN:   2>&1 | FileCheck %s --check-prefix=CHECK-VALID
+
+// CHECK-VALID: "-cc1" {{.*}}"-triple" "amdgcn-amd-amdhsa" {{.*}}"-fatomic=no_fine_grained_memory:off,no_remote_memory:on,ignore_denormal_mode:on"
+// CHECK-VALID: "-cc1" {{.*}}"-triple" {{.*}}"-fatomic=no_fine_grained_memory:off,no_remote_memory:on,ignore_denormal_mode:on"
+
+// RUN: not %clang -### -nogpulib -nogpuinc %s \
+// RUN:   -fatomic=invalid_key:on 2>&1 | FileCheck %s --check-prefix=CHECK-INVALID-KEY
+
+// CHECK-INVALID-KEY: clang: error: invalid argument 'invalid_key:on' to -fatomic=; must be a comma-separated list of key:value pairs, where allowed keys are 'no_fine_grained_memory', 'no_remote_memory', 'ignore_denormal_mode', and values are 'on' or 'off', and each key must be unique
+
+// RUN: not %clang -### -nogpulib -nogpuinc %s \
+// RUN:   -fatomic=no_fine_grained_memory:invalid 2>&1 | FileCheck %s --check-prefix=CHECK-INVALID-VALUE
+
+// CHECK-INVALID-VALUE: clang: error: invalid argument 'no_fine_grained_memory:invalid' to -fatomic=; must be a comma-separated list of key:value pairs, where allowed keys are 'no_fine_grained_memory', 'no_remote_memory', 'ignore_denormal_mode', and values are 'on' or 'off', and each key must be unique
+
+// RUN: not %clang -### -nogpulib -nogpuinc %s \
+// RUN:   -fatomic=no_fine_grained_memory 2>&1 | FileCheck %s --check-prefix=CHECK-MISSING-VALUE
+
+// CHECK-MISSING-VALUE: clang: error: invalid argument 'no_fine_grained_memory' to -fatomic=; must be a comma-separated list of key:value pairs, where allowed keys are 'no_fine_grained_memory', 'no_remote_memory', 'ignore_denormal_mode', and values are 'on' or 'off', and each key must be unique
+
+// RUN: not %clang -### -nogpulib -nogpuinc %s \
+// RUN:   -fatomic=no_fine_grained_memory:on,no_fine_grained_memory:off 2>&1 | FileCheck %s --check-prefix=CHECK-DUPLICATE-KEY
+
+// CHECK-DUPLICATE-KEY: clang: error: invalid argument 'no_fine_grained_memory:off' to -fatomic=; must be a comma-separated list of key:value pairs, where allowed keys are 'no_fine_grained_memory', 'no_remote_memory', 'ignore_denormal_mode', and values are 'on' or 'off', and each key must be unique
+
+// RUN: %clang -### -nogpulib -nogpuinc %s \
+// RUN:   -fatomic= 2>&1 | FileCheck %s --check-prefix=CHECK-EMPTY
+
+// CHECK-EMPTY: clang: warning: joined argument expects additional value: '-fatomic=' [-Wunused-command-line-argument]
diff --git a/clang/test/OpenMP/amdgpu-unsafe-fp-atomics.cpp b/clang/test/OpenMP/amdgpu-unsafe-fp-atomics.cpp
index 7a34113cec8fa1..60d7cb008a3689 100644
--- a/clang/test/OpenMP/amdgpu-unsafe-fp-atomics.cpp
+++ b/clang/test/OpenMP/amdgpu-unsafe-fp-atomics.cpp
@@ -11,7 +11,7 @@ double dv, dx;
 // DEFAULT-SAME: ) #[[ATTR0:[0-9]+]] {
 // DEFAULT-NEXT:  [[ENTRY:.*:]]
 // DEFAULT-NEXT:    [[TMP0:%.*]] = load float, ptr addrspacecast (ptr addrspace(1) @fv to ptr), align 4
-// DEFAULT-NEXT:    [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @fx to ptr), float [[TMP0]] monotonic, align 4
+// DEFAULT-NEXT:    [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @fx to ptr), float [[TMP0]] monotonic, align 4, !amdgpu.no.fine.grained.memory [[META5:![0-9]+]], !amdgpu.no.remote.memory [[META5]]
 // DEFAULT-NEXT:    [[ADD:%.*]] = fadd float [[TMP1]], [[TMP0]]
 // DEFAULT-NEXT:    store float [[ADD]], ptr addrspacecast (ptr addrspace(1) @fv to ptr), align 4
 // DEFAULT-NEXT:    ret void
@@ -20,7 +20,7 @@ double dv, dx;
 // UNSAFE-FP-ATOMICS-SAME: ) #[[ATTR0:[0-9]+]] {
 // UNSAFE-FP-ATOMICS-NEXT:  [[ENTRY:.*:]]
 // UNSAFE-FP-ATOMICS-NEXT:    [[TMP0:%.*]] = load float, ptr addrspacecast (ptr addrspace(1) @fv to ptr), align 4
-// UNSAFE-FP-ATOMICS-NEXT:    [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @fx to ptr), float [[TMP0]] monotonic, align 4, !amdgpu.no.fine.grained.memory [[META5:![0-9]+]], !amdgpu.ignore.denormal.mode [[META5]]
+// UNSAFE-FP-ATOMICS-NEXT:    [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @fx to ptr), float [[TMP0]] monotonic, align 4, !amdgpu.no.fine.grained.memory [[META5:![0-9]+]], !amdgpu.no.remote.memory [[META5]], !amdgpu.ignore.denormal.mode [[META5]]
 // UNSAFE-FP-ATOMICS-NEXT:    [[ADD:%.*]] = fadd float [[TMP1]], [[TMP0]]
 // UNSAFE-FP-ATOMICS-NEXT:    store float [[ADD]], ptr addrspacecast (ptr addrspace(1) @fv to ptr), align 4
 // UNSAFE-FP-ATOMICS-NEXT:    ret void
@@ -34,7 +34,7 @@ void atomic_fadd_f32() {
 // DEFAULT-SAME: ) #[[ATTR0]] {
 // DEFAULT-NEXT:  [[ENTRY:.*:]]
 // DEFAULT-NEXT:    [[TMP0:%.*]] = load double, ptr addrspacecast (ptr addrspace(1) @dv to ptr), align 8
-// DEFAULT-NEXT:    [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @dx to ptr), double [[TMP0]] monotonic, align 8
+// DEFAULT-NEXT:    [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @dx to ptr), double [[TMP0]] monotonic, align 8, !amdgpu.no.fine.grained.memory [[META5]], !amdgpu.no.remote.memory [[META5]]
 // DEFAULT-NEXT:    [[ADD:%.*]] = fadd double [[TMP1]], [[TMP0]]
 // DEFAULT-NEXT:    store double [[ADD]], ptr addrspacecast (ptr addrspace(1) @dv to ptr), align 8
 // DEFAULT-NEXT:    ret void
@@ -43,7 +43,7 @@ void atomic_fadd_f32() {
 // UNSAFE-FP-ATOMICS-SAME: ) #[[ATTR0]] {
 // UNSAFE-FP-ATOMICS-NEXT:  [[ENTRY:.*:]]
 // UNSAFE-FP-ATOMICS-NEXT:    [[TMP0:%.*]] = load double, ptr addrspacecast (ptr addrspace(1) @dv to ptr), align 8
-// UNSAFE-FP-ATOMICS-NEXT:    [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @dx to ptr), double [[TMP0]] monotonic, align 8, !amdgpu.no.fine.grained.memory [[META5]]
+// UNSAFE-FP-ATOMICS-NEXT:    [[TMP1:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @dx to ptr), double [[TMP0]] monotonic, align 8, !amdgpu.no.fine.grained.memory [[META5]], !amdgpu.no.remote.memory [[META5]]
 // UNSAFE-FP-ATOMICS-NEXT:    [[ADD:%.*]] = fadd double [[TMP1]], [[TMP0]]
 // UNSAFE-FP-ATOMICS-NEXT:    store double [[ADD]], ptr addrspacecast (ptr addrspace(1) @dv to ptr), align 8
 // UNSAFE-FP-ATOMICS-NEXT:    ret void
@@ -55,5 +55,7 @@ void atomic_fadd_f64() {
 
 #pragma omp end declare target
 //.
+// DEFAULT: [[META5]] = !{}
+//.
 // UNSAFE-FP-ATOMICS: [[META5]] = !{}
 //.
diff --git a/clang/test/Parser/Inputs/cuda.h b/clang/test/Parser/Inputs/cuda.h
new file mode 100644
index 00000000000000..405ef8bb807d90
--- /dev/null
+++ b/clang/test/Parser/Inputs/cuda.h
@@ -0,0 +1,54 @@
+/* Minimal declarations for CUDA support.  Testing purposes only. */
+
+#include <stddef.h>
+
+// Make this file work with nvcc, for testing compatibility.
+
+#ifndef __NVCC__
+#define __constant__ __attribute__((constant))
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __host__ __attribute__((host))
+#define __shared__ __attribute__((shared))
+#define __managed__ __attribute__((managed))
+#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__)))
+
+struct dim3 {
+  unsigned x, y, z;
+  __host__ __device__ dim3(unsigned x, unsigned y = 1, unsigned z = 1) : x(x), y(y), z(z) {}
+};
+
+#ifdef __HIP__
+typedef struct hipStream *hipStream_t;
+typedef enum hipError {} hipError_t;
+int hipConfigureCall(dim3 gridSize, dim3 blockSize, size_t sharedSize = 0,
+                     hipStream_t stream = 0);
+extern "C" hipError_t __hipPushCallConfiguration(dim3 gridSize, dim3 blockSize,
+                                                 size_t sharedSize = 0,
+                                                 hipStream_t stream = 0);
+extern "C" hipError_t hipLaunchKernel(const void *func, dim3 gridDim,
+                                      dim3 blockDim, void **args,
+                                      size_t sharedMem,
+                                      hipStream_t stream);
+#else
+typedef struct cudaStream *cudaStream_t;
+typedef enum cudaError {} cudaError_t;
+
+extern "C" int cudaConfigureCall(dim3 gridSize, dim3 blockSize,
+                                 size_t sharedSize = 0,
+                                 cudaStream_t stream = 0);
+extern "C" int __cudaPushCallConfiguration(dim3 gridSize, dim3 blockSize,
+                                           size_t sharedSize = 0,
+                                           cudaStream_t stream = 0);
+extern "C" cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim,
+                                        dim3 blockDim, void **args,
+                                        size_t sharedMem, cudaStream_t stream);
+#endif
+
+// Host- and device-side placement new overloads.
+void *operator new(__SIZE_TYPE__, void *p) { return p; }
+void *operator new[](__SIZE_TYPE__, void *p) { return p; }
+__device__ void *operator new(__SIZE_TYPE__, void *p) { return p; }
+__device__ void *operator new[](__SIZE_TYPE__, void *p) { return p; }
+
+#endif // !__NVCC__
diff --git a/clang/test/Parser/atomic-options.hip b/clang/test/Parser/atomic-options.hip
new file mode 100644
index 00000000000000..eb9be6b3c36956
--- /dev/null
+++ b/clang/test/Parser/atomic-options.hip
@@ -0,0 +1,30 @@
+// RUN: %clang_cc1 -fsyntax-only -verify %s
+// RUN: %clang_cc1 -fsyntax-only -verify -fcuda-is-device %s
+// RUN: %clang_cc1 -fsyntax-only -verify -fcuda-is-device %s \
+// RUN:   -fatomic=no_fine_grained_memory:off,no_remote_memory:on,ignore_denormal_mode:on
+
+#include "Inputs/cuda.h"
+
+[[clang::atomic(!no_remote_memory)]] // expected-error {{'atomic' attribute cannot be applied to a declaration}}
+__device__ __host__ void test_location(float *a) {
+  __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  [[clang::atomic(!no_remote_memory)]] int x; // expected-error {{'atomic' attribute cannot be applied to a declaration}}
+}
+
+__device__ __host__ void test_invalid_option(float *a) {
+  [[clang::atomic(fast)]] { // expected-error {{invalid argument 'fast' to atomic attribute; valid options are: 'no_remote_memory', 'no_fine_grained_memory', 'ignore_denormal_mode' (optionally prefixed with '!')}}
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+__device__ __host__ void test_invalid_value(float *a) {
+  [[clang::atomic(no_remote_memory(default))]] { // expected-error {{expected ')'}} expected-note {{to match this '('}}
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}
+
+__device__ __host__ void test_invalid_format(float *a) {
+  [[clang::atomic(no_remote_memory=on)]] { // expected-error {{expected ')'}} expected-note {{to match this '('}}
+    __scoped_atomic_fetch_add(a, 1, __ATOMIC_RELAXED, __MEMORY_SCOPE_SYSTEM);
+  }
+}



More information about the cfe-commits mailing list