[llvm] [AMDGPU] Add NoaliasAddrSpace to AAMDnodes (PR #149247)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 22 18:08:04 PDT 2025


https://github.com/Shoreshen updated https://github.com/llvm/llvm-project/pull/149247

>From 504c9c30d5a0b15628314f9c820dfb29d83398ab Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Thu, 17 Jul 2025 12:33:24 +0800
Subject: [PATCH 1/2] add noaliasaddrspace to aamdnodes

---
 llvm/include/llvm/IR/Metadata.h               | 24 ++++++++++++-------
 llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp  |  4 ++++
 llvm/lib/CodeGen/MachineOperand.cpp           |  4 ++++
 llvm/lib/IR/Metadata.cpp                      |  2 ++
 .../AMDGPU/GlobalISel/flat-atomic-fadd.f32.ll |  8 +++----
 .../AMDGPU/GlobalISel/flat-atomic-fadd.f64.ll |  4 ++--
 6 files changed, 32 insertions(+), 14 deletions(-)

diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h
index 2de26c0c1f7c7..959003eff860e 100644
--- a/llvm/include/llvm/IR/Metadata.h
+++ b/llvm/include/llvm/IR/Metadata.h
@@ -759,18 +759,18 @@ class MDString : public Metadata {
 /// memory access used by the alias-analysis infrastructure.
 struct AAMDNodes {
   explicit AAMDNodes() = default;
-  explicit AAMDNodes(MDNode *T, MDNode *TS, MDNode *S, MDNode *N)
-      : TBAA(T), TBAAStruct(TS), Scope(S), NoAlias(N) {}
+  explicit AAMDNodes(MDNode *T, MDNode *TS, MDNode *S, MDNode *N, MDNode *NAS)
+      : TBAA(T), TBAAStruct(TS), Scope(S), NoAlias(N), NoAliasAddrSpace(NAS) {}
 
   bool operator==(const AAMDNodes &A) const {
     return TBAA == A.TBAA && TBAAStruct == A.TBAAStruct && Scope == A.Scope &&
-           NoAlias == A.NoAlias;
+           NoAlias == A.NoAlias && NoAliasAddrSpace == A.NoAliasAddrSpace;
   }
 
   bool operator!=(const AAMDNodes &A) const { return !(*this == A); }
 
   explicit operator bool() const {
-    return TBAA || TBAAStruct || Scope || NoAlias;
+    return TBAA || TBAAStruct || Scope || NoAlias || NoAliasAddrSpace;
   }
 
   /// The tag for type-based alias analysis.
@@ -785,6 +785,9 @@ struct AAMDNodes {
   /// The tag specifying the noalias scope.
   MDNode *NoAlias = nullptr;
 
+  /// The tag specifying the noalias address space scope.
+  MDNode *NoAliasAddrSpace = nullptr;
+
   // Shift tbaa Metadata node to start off bytes later
   LLVM_ABI static MDNode *shiftTBAA(MDNode *M, size_t off);
 
@@ -806,6 +809,8 @@ struct AAMDNodes {
     Result.TBAAStruct = Other.TBAAStruct == TBAAStruct ? TBAAStruct : nullptr;
     Result.Scope = Other.Scope == Scope ? Scope : nullptr;
     Result.NoAlias = Other.NoAlias == NoAlias ? NoAlias : nullptr;
+    Result.NoAliasAddrSpace =
+        Other.NoAliasAddrSpace == NoAliasAddrSpace ? NoAliasAddrSpace : nullptr;
     return Result;
   }
 
@@ -818,6 +823,7 @@ struct AAMDNodes {
         TBAAStruct ? shiftTBAAStruct(TBAAStruct, Offset) : nullptr;
     Result.Scope = Scope;
     Result.NoAlias = NoAlias;
+    Result.NoAliasAddrSpace = NoAliasAddrSpace;
     return Result;
   }
 
@@ -833,6 +839,7 @@ struct AAMDNodes {
     Result.TBAAStruct = TBAAStruct;
     Result.Scope = Scope;
     Result.NoAlias = NoAlias;
+    Result.NoAliasAddrSpace = NoAliasAddrSpace;
     return Result;
   }
 
@@ -860,12 +867,12 @@ struct AAMDNodes {
 template<>
 struct DenseMapInfo<AAMDNodes> {
   static inline AAMDNodes getEmptyKey() {
-    return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(),
-                     nullptr, nullptr, nullptr);
+    return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(), nullptr, nullptr,
+                     nullptr, nullptr);
   }
 
   static inline AAMDNodes getTombstoneKey() {
-    return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(),
+    return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(), nullptr,
                      nullptr, nullptr, nullptr);
   }
 
@@ -873,7 +880,8 @@ struct DenseMapInfo<AAMDNodes> {
     return DenseMapInfo<MDNode *>::getHashValue(Val.TBAA) ^
            DenseMapInfo<MDNode *>::getHashValue(Val.TBAAStruct) ^
            DenseMapInfo<MDNode *>::getHashValue(Val.Scope) ^
-           DenseMapInfo<MDNode *>::getHashValue(Val.NoAlias);
+           DenseMapInfo<MDNode *>::getHashValue(Val.NoAlias) ^
+           DenseMapInfo<MDNode *>::getHashValue(Val.NoAliasAddrSpace);
   }
 
   static bool isEqual(const AAMDNodes &LHS, const AAMDNodes &RHS) {
diff --git a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index c871070eb037e..7025b8354564a 100644
--- a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -525,6 +525,8 @@ AAMDNodes AAMDNodes::merge(const AAMDNodes &Other) const {
   Result.TBAAStruct = nullptr;
   Result.Scope = MDNode::getMostGenericAliasScope(Scope, Other.Scope);
   Result.NoAlias = MDNode::intersect(NoAlias, Other.NoAlias);
+  Result.NoAliasAddrSpace = MDNode::getMostGenericNoaliasAddrspace(
+      NoAliasAddrSpace, Other.NoAliasAddrSpace);
   return Result;
 }
 
@@ -533,6 +535,8 @@ AAMDNodes AAMDNodes::concat(const AAMDNodes &Other) const {
   Result.TBAA = Result.TBAAStruct = nullptr;
   Result.Scope = MDNode::getMostGenericAliasScope(Scope, Other.Scope);
   Result.NoAlias = MDNode::intersect(NoAlias, Other.NoAlias);
+  Result.NoAliasAddrSpace = MDNode::getMostGenericNoaliasAddrspace(
+      NoAliasAddrSpace, Other.NoAliasAddrSpace);
   return Result;
 }
 
diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index 0d251697f2567..c612f8de7b50b 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -1273,6 +1273,10 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
     OS << ", !noalias ";
     AAInfo.NoAlias->printAsOperand(OS, MST);
   }
+  if (AAInfo.NoAliasAddrSpace) {
+    OS << ", !noalias.addrspace ";
+    AAInfo.NoAliasAddrSpace->printAsOperand(OS, MST);
+  }
   if (getRanges()) {
     OS << ", !range ";
     getRanges()->printAsOperand(OS, MST);
diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp
index f0448b06e7e82..0961b3eaf56e3 100644
--- a/llvm/lib/IR/Metadata.cpp
+++ b/llvm/lib/IR/Metadata.cpp
@@ -1778,6 +1778,7 @@ AAMDNodes Instruction::getAAMetadata() const {
     Result.TBAAStruct = Info.lookup(LLVMContext::MD_tbaa_struct);
     Result.Scope = Info.lookup(LLVMContext::MD_alias_scope);
     Result.NoAlias = Info.lookup(LLVMContext::MD_noalias);
+    Result.NoAliasAddrSpace = Info.lookup(LLVMContext::MD_noalias_addrspace);
   }
   return Result;
 }
@@ -1787,6 +1788,7 @@ void Instruction::setAAMetadata(const AAMDNodes &N) {
   setMetadata(LLVMContext::MD_tbaa_struct, N.TBAAStruct);
   setMetadata(LLVMContext::MD_alias_scope, N.Scope);
   setMetadata(LLVMContext::MD_noalias, N.NoAlias);
+  setMetadata(LLVMContext::MD_noalias_addrspace, N.NoAliasAddrSpace);
 }
 
 void Instruction::setNoSanitizeMetadata() {
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f32.ll
index 340e293cda7b5..6ee0c74863770 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f32.ll
@@ -12,7 +12,7 @@ define amdgpu_ps void @flat_atomic_fadd_f32_no_rtn_intrinsic(ptr %ptr, float %da
   ; GFX942-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
   ; GFX942-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
   ; GFX942-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GFX942-NEXT:   FLAT_ATOMIC_ADD_F32 [[REG_SEQUENCE]], [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr)
+  ; GFX942-NEXT:   FLAT_ATOMIC_ADD_F32 [[REG_SEQUENCE]], [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr, !noalias.addrspace !0)
   ; GFX942-NEXT:   S_ENDPGM 0
   ;
   ; GFX11-LABEL: name: flat_atomic_fadd_f32_no_rtn_intrinsic
@@ -23,7 +23,7 @@ define amdgpu_ps void @flat_atomic_fadd_f32_no_rtn_intrinsic(ptr %ptr, float %da
   ; GFX11-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
   ; GFX11-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
   ; GFX11-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GFX11-NEXT:   FLAT_ATOMIC_ADD_F32 [[REG_SEQUENCE]], [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr)
+  ; GFX11-NEXT:   FLAT_ATOMIC_ADD_F32 [[REG_SEQUENCE]], [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr, !noalias.addrspace !0)
   ; GFX11-NEXT:   S_ENDPGM 0
   %ret = call float @llvm.amdgcn.flat.atomic.fadd.f32.p1.f32(ptr %ptr, float %data)
   ret void
@@ -38,7 +38,7 @@ define amdgpu_ps float @flat_atomic_fadd_f32_rtn_intrinsic(ptr %ptr, float %data
   ; GFX942-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
   ; GFX942-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
   ; GFX942-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GFX942-NEXT:   [[FLAT_ATOMIC_ADD_F32_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_ADD_F32_RTN [[REG_SEQUENCE]], [[COPY2]], 0, 1, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr)
+  ; GFX942-NEXT:   [[FLAT_ATOMIC_ADD_F32_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_ADD_F32_RTN [[REG_SEQUENCE]], [[COPY2]], 0, 1, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr, !noalias.addrspace !0)
   ; GFX942-NEXT:   $vgpr0 = COPY [[FLAT_ATOMIC_ADD_F32_RTN]]
   ; GFX942-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
   ;
@@ -50,7 +50,7 @@ define amdgpu_ps float @flat_atomic_fadd_f32_rtn_intrinsic(ptr %ptr, float %data
   ; GFX11-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
   ; GFX11-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
   ; GFX11-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GFX11-NEXT:   [[FLAT_ATOMIC_ADD_F32_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_ADD_F32_RTN [[REG_SEQUENCE]], [[COPY2]], 0, 1, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr)
+  ; GFX11-NEXT:   [[FLAT_ATOMIC_ADD_F32_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_ADD_F32_RTN [[REG_SEQUENCE]], [[COPY2]], 0, 1, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr, !noalias.addrspace !0)
   ; GFX11-NEXT:   $vgpr0 = COPY [[FLAT_ATOMIC_ADD_F32_RTN]]
   ; GFX11-NEXT:   SI_RETURN_TO_EPILOG implicit $vgpr0
   %ret = call float @llvm.amdgcn.flat.atomic.fadd.f32.p1.f32(ptr %ptr, float %data)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f64.ll
index c82ae2fbcbbdc..bf3697924c22c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-atomic-fadd.f64.ll
@@ -13,7 +13,7 @@ define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_atomicrmw(ptr %ptr, double %d
   ; GFX90A_GFX942-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
   ; GFX90A_GFX942-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
   ; GFX90A_GFX942-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
-  ; GFX90A_GFX942-NEXT:   FLAT_ATOMIC_ADD_F64 [[REG_SEQUENCE]], [[REG_SEQUENCE1]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr)
+  ; GFX90A_GFX942-NEXT:   FLAT_ATOMIC_ADD_F64 [[REG_SEQUENCE]], [[REG_SEQUENCE1]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr, !noalias.addrspace !0)
   ; GFX90A_GFX942-NEXT:   S_ENDPGM 0
   %ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
   ret void
@@ -30,7 +30,7 @@ define amdgpu_ps double @flat_atomic_fadd_f64_rtn_atomicrmw(ptr %ptr, double %da
   ; GFX90A_GFX942-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
   ; GFX90A_GFX942-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
   ; GFX90A_GFX942-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
-  ; GFX90A_GFX942-NEXT:   [[FLAT_ATOMIC_ADD_F64_RTN:%[0-9]+]]:vreg_64_align2 = FLAT_ATOMIC_ADD_F64_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE1]], 0, 1, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr)
+  ; GFX90A_GFX942-NEXT:   [[FLAT_ATOMIC_ADD_F64_RTN:%[0-9]+]]:vreg_64_align2 = FLAT_ATOMIC_ADD_F64_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE1]], 0, 1, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr, !noalias.addrspace !0)
   ; GFX90A_GFX942-NEXT:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[FLAT_ATOMIC_ADD_F64_RTN]].sub0
   ; GFX90A_GFX942-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[FLAT_ATOMIC_ADD_F64_RTN]].sub1
   ; GFX90A_GFX942-NEXT:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec

>From 686652a63650709b1b13c9f72212acb4d3eb6836 Mon Sep 17 00:00:00 2001
From: shore <372660931 at qq.com>
Date: Mon, 21 Jul 2025 17:47:12 +0800
Subject: [PATCH 2/2] add parsing for noaliasaddrspace & add parse/print test
 cases & fix comment

---
 llvm/include/llvm/IR/Metadata.h               |    2 +-
 llvm/lib/CodeGen/MIRParser/MILexer.cpp        |    1 +
 llvm/lib/CodeGen/MIRParser/MILexer.h          |    1 +
 llvm/lib/CodeGen/MIRParser/MIParser.cpp       |    5 +
 .../attributor-noalias-addrspace-parse.mir    |   44 +
 .../attributor-noalias-addrspace-print.ll     | 1564 +++++++++++++++++
 6 files changed, 1616 insertions(+), 1 deletion(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/attributor-noalias-addrspace-parse.mir
 create mode 100644 llvm/test/CodeGen/AMDGPU/attributor-noalias-addrspace-print.ll

diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h
index 958c3c19045b8..33203ad85aa32 100644
--- a/llvm/include/llvm/IR/Metadata.h
+++ b/llvm/include/llvm/IR/Metadata.h
@@ -785,7 +785,7 @@ struct AAMDNodes {
   /// The tag specifying the noalias scope.
   MDNode *NoAlias = nullptr;
 
-  /// The tag specifying the noalias address space scope.
+  /// The tag specifying the noalias address spaces.
   MDNode *NoAliasAddrSpace = nullptr;
 
   // Shift tbaa Metadata node to start off bytes later
diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/llvm/lib/CodeGen/MIRParser/MILexer.cpp
index 7153902fe2e7a..193df1fe43ecf 100644
--- a/llvm/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MILexer.cpp
@@ -616,6 +616,7 @@ static MIToken::TokenKind getMetadataKeywordKind(StringRef Identifier) {
       .Case("!range", MIToken::md_range)
       .Case("!DIExpression", MIToken::md_diexpr)
       .Case("!DILocation", MIToken::md_dilocation)
+      .Case("!noalias.addrspace", MIToken::md_noalias_addrspace)
       .Default(MIToken::Error);
 }
 
diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.h b/llvm/lib/CodeGen/MIRParser/MILexer.h
index d7cd06759cfbb..54142ac68a1c1 100644
--- a/llvm/lib/CodeGen/MIRParser/MILexer.h
+++ b/llvm/lib/CodeGen/MIRParser/MILexer.h
@@ -151,6 +151,7 @@ struct MIToken {
     md_tbaa,
     md_alias_scope,
     md_noalias,
+    md_noalias_addrspace,
     md_range,
     md_diexpr,
     md_dilocation,
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 3a364d5ff0d20..fd8fe075c4452 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -3482,6 +3482,11 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
       if (parseMDNode(AAInfo.NoAlias))
         return true;
       break;
+    case MIToken::md_noalias_addrspace:
+      lex();
+      if (parseMDNode(AAInfo.NoAliasAddrSpace))
+        return true;
+      break;
     case MIToken::md_range:
       lex();
       if (parseMDNode(Range))
diff --git a/llvm/test/CodeGen/AMDGPU/attributor-noalias-addrspace-parse.mir b/llvm/test/CodeGen/AMDGPU/attributor-noalias-addrspace-parse.mir
new file mode 100644
index 0000000000000..20b1793584e7f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/attributor-noalias-addrspace-parse.mir
@@ -0,0 +1,44 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -run-pass=machineverifier -o - %s | FileCheck %s
+
+
+--- |
+  ; ModuleID = 'internal_issues/SWDEV-525030/test2.ll'
+  source_filename = "internal_issues/SWDEV-525030/test2.ll"
+  target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128:128:48-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
+  target triple = "amdgcn"
+
+  define amdgpu_ps void @test_parsing_pringing(ptr %ptr, float %data) #0 {
+    %1 = atomicrmw fadd ptr %ptr, float %data syncscope("agent") seq_cst, align 4, !noalias.addrspace !0, !amdgpu.no.fine.grained.memory !1, !amdgpu.ignore.denormal.mode !1
+    ret void
+  }
+
+  attributes #0 = { "target-cpu"="gfx1200" }
+
+  !0 = !{i32 5, i32 6}
+  !1 = !{}
+...
+
+---
+name: test_parsing_pringing
+
+body: |
+  bb.1 (%ir-block.0):
+    liveins: $vgpr0, $vgpr1, $vgpr2
+
+    ; CHECK-LABEL: name: test_parsing_pringing
+    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+    ; CHECK-NEXT: FLAT_ATOMIC_ADD_F32 [[REG_SEQUENCE]], [[COPY2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr, !noalias.addrspace !0)
+    ; CHECK-NEXT: S_ENDPGM 0
+    %2:vgpr_32 = COPY $vgpr0
+    %3:vgpr_32 = COPY $vgpr1
+    %0:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %3, %subreg.sub1
+    %1:vgpr_32 = COPY $vgpr2
+    FLAT_ATOMIC_ADD_F32 %0, %1, 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("agent") seq_cst (s32) on %ir.ptr, !noalias.addrspace !0)
+    S_ENDPGM 0
+...
diff --git a/llvm/test/CodeGen/AMDGPU/attributor-noalias-addrspace-print.ll b/llvm/test/CodeGen/AMDGPU/attributor-noalias-addrspace-print.ll
new file mode 100644
index 0000000000000..c9c133e5ccc2d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/attributor-noalias-addrspace-print.ll
@@ -0,0 +1,1564 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck -check-prefix=DAG %s
+; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -stop-after=instruction-select < %s | FileCheck -check-prefix=GIS %s
+
+ at gptr = addrspace(1) externally_initialized global i32 0, align 4
+ at gptr2 = addrspace(4) externally_initialized global i32 0, align 4
+ at gptr3 = addrspace(3) externally_initialized global i32 0, align 4
+
+define amdgpu_kernel void @no_alias_addr_space_select(ptr addrspace(3) %sptr, i1 %cond1, i1 %cond2, i32 %val, i32 %offset) #0 {
+  ; DAG-LABEL: name: no_alias_addr_space_select
+  ; DAG: bb.0 (%ir-block.0):
+  ; DAG-NEXT:   liveins: $sgpr0_sgpr1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; DAG-NEXT:   [[S_LOAD_DWORDX3_IMM:%[0-9]+]]:sgpr_96 = S_LOAD_DWORDX3_IMM [[COPY]](p4), 40, 0 :: (dereferenceable invariant load (s96) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; DAG-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY killed [[S_LOAD_DWORDX2_IMM]]
+  ; DAG-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
+  ; DAG-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub1
+  ; DAG-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub0
+  ; DAG-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub2
+  ; DAG-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; DAG-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 killed [[COPY4]], killed [[S_MOV_B32_]], implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY killed [[S_LSHR_B32_]]
+  ; DAG-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY6]], implicit-def dead $scc
+  ; DAG-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; DAG-NEXT:   S_CMP_LG_U32 [[COPY2]], killed [[S_MOV_B32_1]], implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY2]], [[S_MOV_B32_2]], implicit $scc
+  ; DAG-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; DAG-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; DAG-NEXT:   [[S_CSELECT_B32_1:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY7]], [[S_MOV_B32_2]], implicit $scc
+  ; DAG-NEXT:   [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[COPY5]], 31, implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY8:%[0-9]+]]:sreg_32_xm0 = COPY killed [[S_ASHR_I32_]]
+  ; DAG-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, killed [[COPY8]], %subreg.sub1
+  ; DAG-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
+  ; DAG-NEXT:   [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 killed [[S_LOAD_DWORDX2_IMM1]], killed [[REG_SEQUENCE]]
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub1
+  ; DAG-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; DAG-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; DAG-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY [[S_ADD_U64_]].sub0
+  ; DAG-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0.lptr
+  ; DAG-NEXT:   [[S_CSELECT_B32_2:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY11]], killed [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[COPY12:%[0-9]+]]:sreg_32 = COPY [[S_ADD_U64_]].sub1
+  ; DAG-NEXT:   [[S_CSELECT_B32_3:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY12]], killed [[COPY10]], implicit $scc
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_1]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_4:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[S_CSELECT_B32_3]], killed [[S_CSELECT_B32_1]], implicit $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_5:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[S_CSELECT_B32_2]], killed [[S_CSELECT_B32_]], implicit $scc
+  ; DAG-NEXT:   [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[S_CSELECT_B32_5]]
+  ; DAG-NEXT:   [[COPY14:%[0-9]+]]:vgpr_32 = COPY killed [[S_CSELECT_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY13]], %subreg.sub0, killed [[COPY14]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+  ; DAG-NEXT:   FLAT_STORE_DWORD killed [[REG_SEQUENCE1]], killed [[COPY15]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.ptr2, !noalias.addrspace !1)
+  ; DAG-NEXT:   S_ENDPGM 0
+  ;
+  ; GIS-LABEL: name: no_alias_addr_space_select
+  ; GIS: bb.1 (%ir-block.0):
+  ; GIS-NEXT:   liveins: $sgpr0_sgpr1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+  ; GIS-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (p1) from got, addrspace 4)
+  ; GIS-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]], 36, 0 :: (dereferenceable invariant load (<2 x s32>) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; GIS-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_LOAD_DWORDX3_IMM:%[0-9]+]]:sgpr_96 = S_LOAD_DWORDX3_IMM [[COPY]], 40, 0 :: (dereferenceable invariant load (<3 x s32>) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; GIS-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub0
+  ; GIS-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub1
+  ; GIS-NEXT:   [[COPY6:%[0-9]+]]:sreg_32_xexec_hi_and_sreg_32_xm0 = COPY [[S_LOAD_DWORDX3_IMM]].sub2
+  ; GIS-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; GIS-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[COPY4]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0.lptr
+  ; GIS-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; GIS-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub0
+  ; GIS-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[COPY8]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; GIS-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub0
+  ; GIS-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY10]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GIS-NEXT:   [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 0
+  ; GIS-NEXT:   S_CMP_LG_U32 [[COPY2]], [[S_MOV_B32_2]], implicit-def $scc
+  ; GIS-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY $scc
+  ; GIS-NEXT:   $scc = COPY [[COPY11]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE1]], [[S_MOV_B]], implicit $scc
+  ; GIS-NEXT:   [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[COPY6]], 31, implicit-def dead $scc
+  ; GIS-NEXT:   [[COPY12:%[0-9]+]]:sreg_32_xm0 = COPY [[S_ASHR_I32_]]
+  ; GIS-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY12]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY13:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
+  ; GIS-NEXT:   [[COPY14:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE2]].sub0
+  ; GIS-NEXT:   [[COPY15:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub1
+  ; GIS-NEXT:   [[COPY16:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE2]].sub1
+  ; GIS-NEXT:   [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY13]], [[COPY14]], implicit-def $scc
+  ; GIS-NEXT:   [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY15]], [[COPY16]], implicit-def dead $scc, implicit $scc
+  ; GIS-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+  ; GIS-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY3]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_1:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE3]], [[REG_SEQUENCE]], implicit $scc
+  ; GIS-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LSHR_B32_]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_1]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_2:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[S_CSELECT_B64_1]], [[S_CSELECT_B64_]], implicit $scc
+  ; GIS-NEXT:   [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+  ; GIS-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   FLAT_STORE_DWORD [[COPY18]], [[COPY17]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   S_ENDPGM 0
+  %lptr = alloca i32, align 4, addrspace(5)
+  %b = addrspacecast ptr addrspace(5) %lptr to ptr
+  %c = addrspacecast ptr addrspace(3) %sptr to ptr
+  %add_a = getelementptr inbounds i8, ptr addrspacecast (ptr addrspace(1) @gptr to ptr), i32 %offset
+  %ptr = select i1 %cond1, ptr %add_a, ptr %b
+  %ptr2 = select i1 %cond2, ptr %ptr, ptr %c
+  store i32 %val, ptr %ptr2, align 4, !noalias.addrspace !0
+  ret void
+}
+
+define amdgpu_kernel void @no_alias_addr_space_branch(ptr addrspace(3) %sptr, i1 %cond1, i1 %cond2, i32 %val, i32 %offset) #0 {
+  ; DAG-LABEL: name: no_alias_addr_space_branch
+  ; DAG: bb.0 (%ir-block.0):
+  ; DAG-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; DAG-NEXT:   liveins: $sgpr0_sgpr1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
+  ; DAG-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[COPY]](p4)
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; DAG-NEXT:   [[COPY2:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; DAG-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM]].sub1
+  ; DAG-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_CSELECT_B32 -1, 0, implicit $scc
+  ; DAG-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_CSELECT_B32_]]
+  ; DAG-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
+  ; DAG-NEXT:   [[COPY5:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM1]]
+  ; DAG-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 $exec_lo, killed [[COPY4]], implicit-def dead $scc
+  ; DAG-NEXT:   $vcc_lo = COPY [[S_AND_B32_1]]
+  ; DAG-NEXT:   S_CBRANCH_VCCNZ %bb.2, implicit $vcc_lo
+  ; DAG-NEXT:   S_BRANCH %bb.1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.1.bb.1.false:
+  ; DAG-NEXT:   successors: %bb.2(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+  ; DAG-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr32
+  ; DAG-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 512
+  ; DAG-NEXT:   [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], killed [[S_MOV_B32_]], implicit-def dead $scc
+  ; DAG-NEXT:   $sgpr32 = COPY [[S_ADD_I32_]]
+  ; DAG-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+  ; DAG-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; DAG-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; DAG-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, killed [[COPY7]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.2.bb.1.end:
+  ; DAG-NEXT:   successors: %bb.3(0x40000000), %bb.4(0x40000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI:%[0-9]+]]:sreg_64 = PHI [[COPY5]], %bb.0, [[COPY8]], %bb.1
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY1]], 40, 0 :: (dereferenceable invariant load (s64) from %ir.cond2.kernarg.offset.align.down, addrspace 4)
+  ; DAG-NEXT:   [[COPY9:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM2]]
+  ; DAG-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM2]].sub0
+  ; DAG-NEXT:   [[S_BFE_U32_:%[0-9]+]]:sreg_32 = S_BFE_U32 killed [[COPY10]], 65544, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_BFE_U32_]], killed [[S_MOV_B32_1]], implicit-def $scc
+  ; DAG-NEXT:   S_CBRANCH_SCC1 %bb.4, implicit $scc
+  ; DAG-NEXT:   S_BRANCH %bb.3
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.3.bb.2.true:
+  ; DAG-NEXT:   successors: %bb.4(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY [[COPY2]].sub0
+  ; DAG-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; DAG-NEXT:   [[COPY12:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; DAG-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; DAG-NEXT:   S_CMP_LG_U32 [[COPY11]], killed [[S_MOV_B32_2]], implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   [[S_CSELECT_B32_1:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY12]], [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_2:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY11]], [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[S_CSELECT_B32_2]], %subreg.sub0, killed [[S_CSELECT_B32_1]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.4.bb.2.end:
+  ; DAG-NEXT:   [[PHI1:%[0-9]+]]:sreg_64 = PHI [[PHI]], %bb.2, [[COPY13]], %bb.3
+  ; DAG-NEXT:   [[COPY14:%[0-9]+]]:sreg_32 = COPY [[COPY9]].sub1
+  ; DAG-NEXT:   [[COPY15:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[COPY14]]
+  ; DAG-NEXT:   FLAT_STORE_DWORD [[COPY15]], killed [[COPY16]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.ptr2, !noalias.addrspace !1)
+  ; DAG-NEXT:   S_ENDPGM 0
+  ;
+  ; GIS-LABEL: name: no_alias_addr_space_branch
+  ; GIS: bb.1 (%ir-block.0):
+  ; GIS-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
+  ; GIS-NEXT:   liveins: $sgpr0_sgpr1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+  ; GIS-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (p1) from got, addrspace 4)
+  ; GIS-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]], 36, 0 :: (dereferenceable invariant load (<2 x s32>) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; GIS-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1
+  ; GIS-NEXT:   [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY3]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_XOR_B32_1]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_]]
+  ; GIS-NEXT:   S_CBRANCH_SCC1 %bb.3, implicit $scc
+  ; GIS-NEXT:   S_BRANCH %bb.2
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.2.bb.1.false:
+  ; GIS-NEXT:   successors: %bb.3(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+  ; GIS-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 5
+  ; GIS-NEXT:   [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[S_MOV_B32_1]], [[S_MOV_B32_2]], implicit-def dead $scc
+  ; GIS-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr32
+  ; GIS-NEXT:   [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY4]], [[S_LSHL_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   $sgpr32 = COPY [[S_ADD_U32_]]
+  ; GIS-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; GIS-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub0
+  ; GIS-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY6]], %subreg.sub1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.3.bb.1.end:
+  ; GIS-NEXT:   successors: %bb.4(0x40000000), %bb.5(0x40000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI:%[0-9]+]]:sreg_64 = PHI [[COPY1]], %bb.1, [[REG_SEQUENCE]], %bb.2
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]], 40, 0 :: (dereferenceable invariant load (<2 x s32>) from %ir.cond2.kernarg.offset.align.down, addrspace 4)
+  ; GIS-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM2]].sub0
+  ; GIS-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM2]].sub1
+  ; GIS-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; GIS-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[COPY7]], [[S_MOV_B32_3]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 1
+  ; GIS-NEXT:   [[S_XOR_B32_2:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_LSHR_B32_]], [[S_MOV_B32_4]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_XOR_B32_2]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_1]]
+  ; GIS-NEXT:   S_CBRANCH_SCC1 %bb.5, implicit $scc
+  ; GIS-NEXT:   S_BRANCH %bb.4
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.4.bb.2.true:
+  ; GIS-NEXT:   successors: %bb.5(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; GIS-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub0
+  ; GIS-NEXT:   [[COPY12:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY12]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GIS-NEXT:   [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 0
+  ; GIS-NEXT:   S_CMP_LG_U32 [[COPY9]], [[S_MOV_B32_5]], implicit-def $scc
+  ; GIS-NEXT:   [[COPY13:%[0-9]+]]:sreg_32 = COPY $scc
+  ; GIS-NEXT:   $scc = COPY [[COPY13]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE1]], [[S_MOV_B]], implicit $scc
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.5.bb.2.end:
+  ; GIS-NEXT:   [[PHI1:%[0-9]+]]:sreg_64 = PHI [[PHI]], %bb.3, [[S_CSELECT_B64_]], %bb.4
+  ; GIS-NEXT:   [[COPY14:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM2]].sub0
+  ; GIS-NEXT:   [[COPY15:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM2]].sub1
+  ; GIS-NEXT:   [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[COPY15]]
+  ; GIS-NEXT:   [[COPY17:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   FLAT_STORE_DWORD [[COPY17]], [[COPY16]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   S_ENDPGM 0
+  br i1 %cond1, label %bb.1.true, label %bb.1.false
+
+bb.1.true:                                        ; preds = %0
+  %a = addrspacecast ptr addrspace(1) @gptr to ptr
+  br label %bb.1.end
+
+bb.1.false:                                       ; preds = %0
+  %lptr = alloca i32, align 4, addrspace(5)
+  %b = addrspacecast ptr addrspace(5) %lptr to ptr
+  br label %bb.1.end
+
+bb.1.end:                                         ; preds = %bb.1.false, %bb.1.true
+  %ptr1 = phi ptr [ %a, %bb.1.true ], [ %b, %bb.1.false ]
+  br i1 %cond2, label %bb.2.true, label %bb.2.end
+
+bb.2.true:                                        ; preds = %bb.1.end
+  %c = addrspacecast ptr addrspace(3) %sptr to ptr
+  br label %bb.2.end
+
+bb.2.end:                                         ; preds = %bb.2.true, %bb.1.end
+  %ptr2 = phi ptr [ %ptr1, %bb.1.end ], [ %c, %bb.2.true ]
+  store i32 %val, ptr %ptr2, align 4, !noalias.addrspace !0
+  ret void
+}
+
+define amdgpu_kernel void @no_alias_addr_space_select_cmpxchg(ptr addrspace(3) %sptr, i1 %cond1, i1 %cond2, i32 %val, i32 %offset) #0 {
+  ; DAG-LABEL: name: no_alias_addr_space_select_cmpxchg
+  ; DAG: bb.0 (%ir-block.0):
+  ; DAG-NEXT:   liveins: $sgpr0_sgpr1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; DAG-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 40, 0 :: (dereferenceable invariant load (s32) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; DAG-NEXT:   [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 48, 0 :: (dereferenceable invariant load (s32) from %ir.offset.kernarg.offset, align 16, addrspace 4)
+  ; DAG-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY killed [[S_LOAD_DWORDX2_IMM]]
+  ; DAG-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
+  ; DAG-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub1
+  ; DAG-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
+  ; DAG-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; DAG-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 killed [[S_LOAD_DWORD_IMM]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY killed [[S_LSHR_B32_]]
+  ; DAG-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY4]], implicit-def dead $scc
+  ; DAG-NEXT:   [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[S_LOAD_DWORD_IMM1]], 31, implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY killed [[S_ASHR_I32_]]
+  ; DAG-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_LOAD_DWORD_IMM1]], %subreg.sub0, killed [[COPY5]], %subreg.sub1
+  ; DAG-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; DAG-NEXT:   S_CMP_LG_U32 [[COPY2]], killed [[S_MOV_B32_1]], implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY2]], [[S_MOV_B32_2]], implicit $scc
+  ; DAG-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; DAG-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; DAG-NEXT:   [[S_CSELECT_B32_1:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY6]], [[S_MOV_B32_2]], implicit $scc
+  ; DAG-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
+  ; DAG-NEXT:   [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 killed [[S_LOAD_DWORDX2_IMM1]], killed [[REG_SEQUENCE]]
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; DAG-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; DAG-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_ADD_U64_]].sub0
+  ; DAG-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0.lptr
+  ; DAG-NEXT:   [[S_CSELECT_B32_2:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY8]], killed [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_ADD_U64_]].sub1
+  ; DAG-NEXT:   [[S_CSELECT_B32_3:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY9]], killed [[COPY7]], implicit $scc
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_1]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_4:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[S_CSELECT_B32_3]], killed [[S_CSELECT_B32_1]], implicit $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_5:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[S_CSELECT_B32_2]], killed [[S_CSELECT_B32_]], implicit $scc
+  ; DAG-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[S_CSELECT_B32_5]], %subreg.sub0, killed [[S_CSELECT_B32_4]], %subreg.sub1
+  ; DAG-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 4
+  ; DAG-NEXT:   [[COPY10:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+  ; DAG-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY10]], %subreg.sub0, [[COPY11]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY12]], killed [[REG_SEQUENCE2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 5
+  ; DAG-NEXT:   [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_5]]
+  ; DAG-NEXT:   [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+  ; DAG-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY15]], killed [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (load store acq_rel monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+  ; DAG-NEXT:   [[COPY16:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_6]]
+  ; DAG-NEXT:   [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+  ; DAG-NEXT:   [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY16]], %subreg.sub0, [[COPY17]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY18]], killed [[REG_SEQUENCE4]], 0, 0, implicit $exec, implicit $flat_scr :: (load store acquire monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 7
+  ; DAG-NEXT:   [[COPY19:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_7]]
+  ; DAG-NEXT:   [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+  ; DAG-NEXT:   [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY19]], %subreg.sub0, [[COPY20]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY21:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY21]], killed [[REG_SEQUENCE5]], 0, 0, implicit $exec, implicit $flat_scr :: (load store release monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[COPY22:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+  ; DAG-NEXT:   [[COPY23:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+  ; DAG-NEXT:   [[REG_SEQUENCE6:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY22]], %subreg.sub0, [[COPY23]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY24:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY24]], killed [[REG_SEQUENCE6]], 0, 0, implicit $exec, implicit $flat_scr :: (load store seq_cst monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 9
+  ; DAG-NEXT:   [[COPY25:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_8]]
+  ; DAG-NEXT:   [[COPY26:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+  ; DAG-NEXT:   [[REG_SEQUENCE7:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY25]], %subreg.sub0, [[COPY26]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY27:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY27]], killed [[REG_SEQUENCE7]], 0, 0, implicit $exec, implicit $flat_scr :: (load store seq_cst monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_9:%[0-9]+]]:sreg_32 = S_MOV_B32 10
+  ; DAG-NEXT:   [[COPY28:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_9]]
+  ; DAG-NEXT:   [[COPY29:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+  ; DAG-NEXT:   [[REG_SEQUENCE8:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY28]], %subreg.sub0, [[COPY29]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY30:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY30]], killed [[REG_SEQUENCE8]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_10:%[0-9]+]]:sreg_32 = S_MOV_B32 11
+  ; DAG-NEXT:   [[COPY31:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_10]]
+  ; DAG-NEXT:   [[COPY32:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+  ; DAG-NEXT:   [[REG_SEQUENCE9:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY31]], %subreg.sub0, [[COPY32]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY33:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY33]], killed [[REG_SEQUENCE9]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("singlethread") seq_cst monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   S_ENDPGM 0
+  ;
+  ; GIS-LABEL: name: no_alias_addr_space_select_cmpxchg
+  ; GIS: bb.1 (%ir-block.0):
+  ; GIS-NEXT:   liveins: $sgpr0_sgpr1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+  ; GIS-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (p1) from got, addrspace 4)
+  ; GIS-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]], 36, 0 :: (dereferenceable invariant load (<2 x s32>) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; GIS-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GIS-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]], 40, 0 :: (dereferenceable invariant load (s32) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; GIS-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; GIS-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[S_LOAD_DWORD_IMM]], [[S_MOV_B32_1]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]], 48, 0 :: (dereferenceable invariant load (s32) from %ir.offset.kernarg.offset, align 16, addrspace 4)
+  ; GIS-NEXT:   [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[S_LOAD_DWORD_IMM1]], 31, implicit-def dead $scc
+  ; GIS-NEXT:   [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[S_ASHR_I32_]]
+  ; GIS-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_LOAD_DWORD_IMM1]], %subreg.sub0, [[COPY4]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0.lptr
+  ; GIS-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; GIS-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub0
+  ; GIS-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, [[COPY6]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; GIS-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub0
+  ; GIS-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY8]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GIS-NEXT:   [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 0
+  ; GIS-NEXT:   S_CMP_LG_U32 [[COPY2]], [[S_MOV_B32_3]], implicit-def $scc
+  ; GIS-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY $scc
+  ; GIS-NEXT:   $scc = COPY [[COPY9]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE2]], [[S_MOV_B]], implicit $scc
+  ; GIS-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
+  ; GIS-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0
+  ; GIS-NEXT:   [[COPY12:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub1
+  ; GIS-NEXT:   [[COPY13:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1
+  ; GIS-NEXT:   [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY10]], [[COPY11]], implicit-def $scc
+  ; GIS-NEXT:   [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY12]], [[COPY13]], implicit-def dead $scc, implicit $scc
+  ; GIS-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+  ; GIS-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY3]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_1:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], implicit $scc
+  ; GIS-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LSHR_B32_]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_1]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_2:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[S_CSELECT_B64_1]], [[S_CSELECT_B64_]], implicit $scc
+  ; GIS-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 4
+  ; GIS-NEXT:   [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_4]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY14:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE4]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY14]], [[COPY15]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 5
+  ; GIS-NEXT:   [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_5]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY16:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY17:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE5]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY16]], [[COPY17]], 0, 0, implicit $exec, implicit $flat_scr :: (load store acq_rel monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+  ; GIS-NEXT:   [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_6]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY19:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE6]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY18]], [[COPY19]], 0, 0, implicit $exec, implicit $flat_scr :: (load store acquire monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 7
+  ; GIS-NEXT:   [[REG_SEQUENCE7:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_7]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY20:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY21:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE7]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY20]], [[COPY21]], 0, 0, implicit $exec, implicit $flat_scr :: (load store release monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[REG_SEQUENCE8:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY22:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY23:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE8]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY22]], [[COPY23]], 0, 0, implicit $exec, implicit $flat_scr :: (load store seq_cst monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 9
+  ; GIS-NEXT:   [[REG_SEQUENCE9:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_8]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY24:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY25:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE9]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY24]], [[COPY25]], 0, 0, implicit $exec, implicit $flat_scr :: (load store seq_cst monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_9:%[0-9]+]]:sreg_32 = S_MOV_B32 10
+  ; GIS-NEXT:   [[REG_SEQUENCE10:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_9]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY26:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY27:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE10]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY26]], [[COPY27]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_10:%[0-9]+]]:sreg_32 = S_MOV_B32 11
+  ; GIS-NEXT:   [[REG_SEQUENCE11:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_10]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY28:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY29:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE11]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY28]], [[COPY29]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("singlethread") seq_cst monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   S_ENDPGM 0
+  %lptr = alloca i32, align 4, addrspace(5)
+  %b = addrspacecast ptr addrspace(5) %lptr to ptr
+  %c = addrspacecast ptr addrspace(3) %sptr to ptr
+  %add_a = getelementptr inbounds i8, ptr addrspacecast (ptr addrspace(1) @gptr to ptr), i32 %offset
+  %ptr = select i1 %cond1, ptr %add_a, ptr %b
+  %ptr2 = select i1 %cond2, ptr %ptr, ptr %c
+  %cmpxchg.0 = cmpxchg ptr %ptr2, i32 0, i32 4 monotonic monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.1 = cmpxchg ptr %ptr2, i32 0, i32 5 acq_rel monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.2 = cmpxchg ptr %ptr2, i32 0, i32 6 acquire monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.3 = cmpxchg ptr %ptr2, i32 0, i32 7 release monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.4 = cmpxchg ptr %ptr2, i32 0, i32 8 seq_cst monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.5 = cmpxchg weak ptr %ptr2, i32 0, i32 9 seq_cst monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.6 = cmpxchg volatile ptr %ptr2, i32 0, i32 10 seq_cst monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.7 = cmpxchg weak volatile ptr %ptr2, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4, !noalias.addrspace !0
+  ret void
+}
+
+define amdgpu_kernel void @no_alias_addr_space_branch_cmpxchg(ptr addrspace(3) %sptr, i1 %cond1, i1 %cond2, i32 %val, i32 %offset) #0 {
+  ; DAG-LABEL: name: no_alias_addr_space_branch_cmpxchg
+  ; DAG: bb.0 (%ir-block.0):
+  ; DAG-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; DAG-NEXT:   liveins: $sgpr0_sgpr1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
+  ; DAG-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[COPY]](p4)
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; DAG-NEXT:   [[COPY2:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; DAG-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM]].sub1
+  ; DAG-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_CSELECT_B32 -1, 0, implicit $scc
+  ; DAG-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_CSELECT_B32_]]
+  ; DAG-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
+  ; DAG-NEXT:   [[COPY5:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM1]]
+  ; DAG-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 $exec_lo, killed [[COPY4]], implicit-def dead $scc
+  ; DAG-NEXT:   $vcc_lo = COPY [[S_AND_B32_1]]
+  ; DAG-NEXT:   S_CBRANCH_VCCNZ %bb.2, implicit $vcc_lo
+  ; DAG-NEXT:   S_BRANCH %bb.1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.1.bb.1.false:
+  ; DAG-NEXT:   successors: %bb.2(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+  ; DAG-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr32
+  ; DAG-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 512
+  ; DAG-NEXT:   [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], killed [[S_MOV_B32_]], implicit-def dead $scc
+  ; DAG-NEXT:   $sgpr32 = COPY [[S_ADD_I32_]]
+  ; DAG-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+  ; DAG-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; DAG-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; DAG-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, killed [[COPY7]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.2.bb.1.end:
+  ; DAG-NEXT:   successors: %bb.3(0x40000000), %bb.4(0x40000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI:%[0-9]+]]:sreg_64 = PHI [[COPY5]], %bb.0, [[COPY8]], %bb.1
+  ; DAG-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY1]], 40, 0 :: (dereferenceable invariant load (s32) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; DAG-NEXT:   [[S_BFE_U32_:%[0-9]+]]:sreg_32 = S_BFE_U32 killed [[S_LOAD_DWORD_IMM]], 65544, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_BFE_U32_]], killed [[S_MOV_B32_1]], implicit-def $scc
+  ; DAG-NEXT:   S_CBRANCH_SCC1 %bb.4, implicit $scc
+  ; DAG-NEXT:   S_BRANCH %bb.3
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.3.bb.2.true:
+  ; DAG-NEXT:   successors: %bb.4(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[COPY2]].sub0
+  ; DAG-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; DAG-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; DAG-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; DAG-NEXT:   S_CMP_LG_U32 [[COPY9]], killed [[S_MOV_B32_2]], implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   [[S_CSELECT_B32_1:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY10]], [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_2:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY9]], [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[S_CSELECT_B32_2]], %subreg.sub0, killed [[S_CSELECT_B32_1]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.4.bb.2.end:
+  ; DAG-NEXT:   [[PHI1:%[0-9]+]]:sreg_64 = PHI [[PHI]], %bb.2, [[COPY11]], %bb.3
+  ; DAG-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 4
+  ; DAG-NEXT:   [[COPY12:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_5]]
+  ; DAG-NEXT:   [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY14:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY14]], killed [[REG_SEQUENCE2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 5
+  ; DAG-NEXT:   [[COPY15:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_6]]
+  ; DAG-NEXT:   [[COPY16:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY15]], %subreg.sub0, [[COPY16]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY17:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY17]], killed [[REG_SEQUENCE3]], 0, 0, implicit $exec, implicit $flat_scr :: (load store acq_rel monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+  ; DAG-NEXT:   [[COPY18:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_7]]
+  ; DAG-NEXT:   [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY18]], %subreg.sub0, [[COPY19]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY20:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY20]], killed [[REG_SEQUENCE4]], 0, 0, implicit $exec, implicit $flat_scr :: (load store acquire monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 7
+  ; DAG-NEXT:   [[COPY21:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_8]]
+  ; DAG-NEXT:   [[COPY22:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY21]], %subreg.sub0, [[COPY22]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY23:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY23]], killed [[REG_SEQUENCE5]], 0, 0, implicit $exec, implicit $flat_scr :: (load store release monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_9:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; DAG-NEXT:   [[COPY24:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_9]]
+  ; DAG-NEXT:   [[COPY25:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE6:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY24]], %subreg.sub0, [[COPY25]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY26:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY26]], killed [[REG_SEQUENCE6]], 0, 0, implicit $exec, implicit $flat_scr :: (load store seq_cst monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_10:%[0-9]+]]:sreg_32 = S_MOV_B32 9
+  ; DAG-NEXT:   [[COPY27:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_10]]
+  ; DAG-NEXT:   [[COPY28:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE7:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY27]], %subreg.sub0, [[COPY28]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY29:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY29]], killed [[REG_SEQUENCE7]], 0, 0, implicit $exec, implicit $flat_scr :: (load store seq_cst monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_11:%[0-9]+]]:sreg_32 = S_MOV_B32 10
+  ; DAG-NEXT:   [[COPY30:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_11]]
+  ; DAG-NEXT:   [[COPY31:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE8:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY30]], %subreg.sub0, [[COPY31]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY32:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY32]], killed [[REG_SEQUENCE8]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_12:%[0-9]+]]:sreg_32 = S_MOV_B32 11
+  ; DAG-NEXT:   [[COPY33:%[0-9]+]]:vgpr_32 = COPY killed [[S_MOV_B32_12]]
+  ; DAG-NEXT:   [[COPY34:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE9:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY33]], %subreg.sub0, [[COPY34]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY35:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY35]], killed [[REG_SEQUENCE9]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("singlethread") seq_cst monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   S_ENDPGM 0
+  ;
+  ; GIS-LABEL: name: no_alias_addr_space_branch_cmpxchg
+  ; GIS: bb.1 (%ir-block.0):
+  ; GIS-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
+  ; GIS-NEXT:   liveins: $sgpr0_sgpr1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+  ; GIS-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (p1) from got, addrspace 4)
+  ; GIS-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]], 36, 0 :: (dereferenceable invariant load (<2 x s32>) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; GIS-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1
+  ; GIS-NEXT:   [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY3]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_XOR_B32_1]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_]]
+  ; GIS-NEXT:   S_CBRANCH_SCC1 %bb.3, implicit $scc
+  ; GIS-NEXT:   S_BRANCH %bb.2
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.2.bb.1.false:
+  ; GIS-NEXT:   successors: %bb.3(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+  ; GIS-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 5
+  ; GIS-NEXT:   [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[S_MOV_B32_1]], [[S_MOV_B32_2]], implicit-def dead $scc
+  ; GIS-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr32
+  ; GIS-NEXT:   [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY4]], [[S_LSHL_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   $sgpr32 = COPY [[S_ADD_U32_]]
+  ; GIS-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; GIS-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub0
+  ; GIS-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY6]], %subreg.sub1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.3.bb.1.end:
+  ; GIS-NEXT:   successors: %bb.4(0x40000000), %bb.5(0x40000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI:%[0-9]+]]:sreg_64 = PHI [[COPY1]], %bb.1, [[REG_SEQUENCE]], %bb.2
+  ; GIS-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]], 40, 0 :: (dereferenceable invariant load (s32) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; GIS-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; GIS-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[S_LOAD_DWORD_IMM]], [[S_MOV_B32_3]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 1
+  ; GIS-NEXT:   [[S_XOR_B32_2:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_LSHR_B32_]], [[S_MOV_B32_4]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_XOR_B32_2]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_1]]
+  ; GIS-NEXT:   S_CBRANCH_SCC1 %bb.5, implicit $scc
+  ; GIS-NEXT:   S_BRANCH %bb.4
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.4.bb.2.true:
+  ; GIS-NEXT:   successors: %bb.5(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; GIS-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub0
+  ; GIS-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY10]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GIS-NEXT:   [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 0
+  ; GIS-NEXT:   S_CMP_LG_U32 [[COPY7]], [[S_MOV_B32_5]], implicit-def $scc
+  ; GIS-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY $scc
+  ; GIS-NEXT:   $scc = COPY [[COPY11]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE1]], [[S_MOV_B]], implicit $scc
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.5.bb.2.end:
+  ; GIS-NEXT:   [[PHI1:%[0-9]+]]:sreg_64 = PHI [[PHI]], %bb.3, [[S_CSELECT_B64_]], %bb.4
+  ; GIS-NEXT:   [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GIS-NEXT:   [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 4
+  ; GIS-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_7]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY12:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY12]], [[COPY13]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 5
+  ; GIS-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_8]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY14:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE3]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY14]], [[COPY15]], 0, 0, implicit $exec, implicit $flat_scr :: (load store acq_rel monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_9:%[0-9]+]]:sreg_32 = S_MOV_B32 6
+  ; GIS-NEXT:   [[REG_SEQUENCE4:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_9]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY16:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY17:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE4]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY16]], [[COPY17]], 0, 0, implicit $exec, implicit $flat_scr :: (load store acquire monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_10:%[0-9]+]]:sreg_32 = S_MOV_B32 7
+  ; GIS-NEXT:   [[REG_SEQUENCE5:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_10]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY19:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE5]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY18]], [[COPY19]], 0, 0, implicit $exec, implicit $flat_scr :: (load store release monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_11:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; GIS-NEXT:   [[REG_SEQUENCE6:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_11]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY20:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY21:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE6]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY20]], [[COPY21]], 0, 0, implicit $exec, implicit $flat_scr :: (load store seq_cst monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_12:%[0-9]+]]:sreg_32 = S_MOV_B32 9
+  ; GIS-NEXT:   [[REG_SEQUENCE7:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_12]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY22:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY23:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE7]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY22]], [[COPY23]], 0, 0, implicit $exec, implicit $flat_scr :: (load store seq_cst monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_13:%[0-9]+]]:sreg_32 = S_MOV_B32 10
+  ; GIS-NEXT:   [[REG_SEQUENCE8:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_13]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY24:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY25:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE8]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY24]], [[COPY25]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_14:%[0-9]+]]:sreg_32 = S_MOV_B32 11
+  ; GIS-NEXT:   [[REG_SEQUENCE9:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_14]], %subreg.sub0, [[S_MOV_B32_6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY26:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY27:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE9]]
+  ; GIS-NEXT:   FLAT_ATOMIC_CMPSWAP [[COPY26]], [[COPY27]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("singlethread") seq_cst monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   S_ENDPGM 0
+  br i1 %cond1, label %bb.1.true, label %bb.1.false
+
+bb.1.true:                                        ; preds = %0
+  %a = addrspacecast ptr addrspace(1) @gptr to ptr
+  br label %bb.1.end
+
+bb.1.false:                                       ; preds = %0
+  %lptr = alloca i32, align 4, addrspace(5)
+  %b = addrspacecast ptr addrspace(5) %lptr to ptr
+  br label %bb.1.end
+
+bb.1.end:                                         ; preds = %bb.1.false, %bb.1.true
+  %ptr1 = phi ptr [ %a, %bb.1.true ], [ %b, %bb.1.false ]
+  br i1 %cond2, label %bb.2.true, label %bb.2.end
+
+bb.2.true:                                        ; preds = %bb.1.end
+  %c = addrspacecast ptr addrspace(3) %sptr to ptr
+  br label %bb.2.end
+
+bb.2.end:                                         ; preds = %bb.2.true, %bb.1.end
+  %ptr2 = phi ptr [ %ptr1, %bb.1.end ], [ %c, %bb.2.true ]
+  %cmpxchg.0 = cmpxchg ptr %ptr2, i32 0, i32 4 monotonic monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.1 = cmpxchg ptr %ptr2, i32 0, i32 5 acq_rel monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.2 = cmpxchg ptr %ptr2, i32 0, i32 6 acquire monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.3 = cmpxchg ptr %ptr2, i32 0, i32 7 release monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.4 = cmpxchg ptr %ptr2, i32 0, i32 8 seq_cst monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.5 = cmpxchg weak ptr %ptr2, i32 0, i32 9 seq_cst monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.6 = cmpxchg volatile ptr %ptr2, i32 0, i32 10 seq_cst monotonic, align 4, !noalias.addrspace !0
+  %cmpxchg.7 = cmpxchg weak volatile ptr %ptr2, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4, !noalias.addrspace !0
+  ret void
+}
+
+define amdgpu_kernel void @no_alias_addr_space_select_atomicrmw(ptr addrspace(3) %sptr, i1 %cond1, i1 %cond2, i32 %val, i32 %offset) #0 {
+  ; DAG-LABEL: name: no_alias_addr_space_select_atomicrmw
+  ; DAG: bb.0 (%ir-block.0):
+  ; DAG-NEXT:   successors: %bb.1(0x80000000)
+  ; DAG-NEXT:   liveins: $sgpr0_sgpr1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; DAG-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 40, 0 :: (dereferenceable invariant load (s32) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; DAG-NEXT:   [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 48, 0 :: (dereferenceable invariant load (s32) from %ir.offset.kernarg.offset, align 16, addrspace 4)
+  ; DAG-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY killed [[S_LOAD_DWORDX2_IMM]]
+  ; DAG-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
+  ; DAG-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub1
+  ; DAG-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
+  ; DAG-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; DAG-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 killed [[S_LOAD_DWORD_IMM]], killed [[S_MOV_B32_]], implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY killed [[S_LSHR_B32_]]
+  ; DAG-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY4]], implicit-def dead $scc
+  ; DAG-NEXT:   [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[S_LOAD_DWORD_IMM1]], 31, implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY5:%[0-9]+]]:sreg_32_xm0 = COPY killed [[S_ASHR_I32_]]
+  ; DAG-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_LOAD_DWORD_IMM1]], %subreg.sub0, killed [[COPY5]], %subreg.sub1
+  ; DAG-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; DAG-NEXT:   S_CMP_LG_U32 [[COPY2]], killed [[S_MOV_B32_1]], implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY2]], [[S_MOV_B32_2]], implicit $scc
+  ; DAG-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; DAG-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; DAG-NEXT:   [[S_CSELECT_B32_1:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY6]], [[S_MOV_B32_2]], implicit $scc
+  ; DAG-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
+  ; DAG-NEXT:   [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 killed [[S_LOAD_DWORDX2_IMM1]], killed [[REG_SEQUENCE]]
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; DAG-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; DAG-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_ADD_U64_]].sub0
+  ; DAG-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0.lptr
+  ; DAG-NEXT:   [[S_CSELECT_B32_2:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY8]], killed [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_ADD_U64_]].sub1
+  ; DAG-NEXT:   [[S_CSELECT_B32_3:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY9]], killed [[COPY7]], implicit $scc
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_1]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_4:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[S_CSELECT_B32_3]], killed [[S_CSELECT_B32_1]], implicit $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_5:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[S_CSELECT_B32_2]], killed [[S_CSELECT_B32_]], implicit $scc
+  ; DAG-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[S_CSELECT_B32_5]], %subreg.sub0, killed [[S_CSELECT_B32_4]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 12, implicit $exec
+  ; DAG-NEXT:   [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_SWAP [[COPY11]], killed [[V_MOV_B32_e32_]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 13, implicit $exec
+  ; DAG-NEXT:   [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_ADD [[COPY12]], killed [[V_MOV_B32_e32_1]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 14, implicit $exec
+  ; DAG-NEXT:   [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_SUB [[COPY13]], killed [[V_MOV_B32_e32_2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_3:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 15, implicit $exec
+  ; DAG-NEXT:   [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_AND [[COPY14]], killed [[V_MOV_B32_e32_3]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT:   [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY15]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.1.atomicrmw.start8:
+  ; DAG-NEXT:   successors: %bb.2(0x04000000), %bb.1(0x7c000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_2]], %bb.0, %5, %bb.1
+  ; DAG-NEXT:   [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD]], %bb.0, %4, %bb.1
+  ; DAG-NEXT:   [[V_NOT_B32_e32_:%[0-9]+]]:vgpr_32 = V_NOT_B32_e32 [[PHI1]], implicit $exec
+  ; DAG-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 -17
+  ; DAG-NEXT:   [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 killed [[V_NOT_B32_e32_]], killed [[S_MOV_B32_4]], implicit $exec
+  ; DAG-NEXT:   [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_OR_B32_e64_]], %subreg.sub0, [[PHI1]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY16:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   [[COPY17:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]]
+  ; DAG-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY16]], killed [[COPY17]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN]], [[PHI1]], implicit $exec
+  ; DAG-NEXT:   [[SI_IF_BREAK:%[0-9]+]]:sreg_32 = SI_IF_BREAK killed [[V_CMP_EQ_U32_e64_]], [[PHI]], implicit-def dead $scc
+  ; DAG-NEXT:   SI_LOOP [[SI_IF_BREAK]], %bb.1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   S_BRANCH %bb.2
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.2.atomicrmw.end7:
+  ; DAG-NEXT:   successors: %bb.3(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI2:%[0-9]+]]:sreg_32 = PHI [[SI_IF_BREAK]], %bb.1
+  ; DAG-NEXT:   SI_END_CF [[PHI2]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   [[V_MOV_B32_e32_4:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 17, implicit $exec
+  ; DAG-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   FLAT_ATOMIC_OR [[COPY18]], killed [[V_MOV_B32_e32_4]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_5:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 18, implicit $exec
+  ; DAG-NEXT:   [[COPY19:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   FLAT_ATOMIC_XOR [[COPY19]], killed [[V_MOV_B32_e32_5]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[COPY20:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY20]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.3.atomicrmw.start2:
+  ; DAG-NEXT:   successors: %bb.4(0x04000000), %bb.3(0x7c000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI3:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_5]], %bb.2, %11, %bb.3
+  ; DAG-NEXT:   [[PHI4:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD1]], %bb.2, %10, %bb.3
+  ; DAG-NEXT:   [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 19
+  ; DAG-NEXT:   [[V_MAX_I32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_I32_e64 [[PHI4]], killed [[S_MOV_B32_6]], implicit $exec
+  ; DAG-NEXT:   [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[DEF3:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MAX_I32_e64_]], %subreg.sub0, [[PHI4]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY21:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   [[COPY22:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE3]]
+  ; DAG-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN1:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY21]], killed [[COPY22]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN1]], [[PHI4]], implicit $exec
+  ; DAG-NEXT:   [[SI_IF_BREAK1:%[0-9]+]]:sreg_32 = SI_IF_BREAK killed [[V_CMP_EQ_U32_e64_1]], [[PHI3]], implicit-def dead $scc
+  ; DAG-NEXT:   SI_LOOP [[SI_IF_BREAK1]], %bb.3, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   S_BRANCH %bb.4
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.4.atomicrmw.end1:
+  ; DAG-NEXT:   successors: %bb.5(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI5:%[0-9]+]]:sreg_32 = PHI [[SI_IF_BREAK1]], %bb.3
+  ; DAG-NEXT:   SI_END_CF [[PHI5]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   [[COPY23:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   [[FLAT_LOAD_DWORD2:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY23]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.5.atomicrmw.start:
+  ; DAG-NEXT:   successors: %bb.6(0x04000000), %bb.5(0x7c000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI6:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_7]], %bb.4, %17, %bb.5
+  ; DAG-NEXT:   [[PHI7:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD2]], %bb.4, %16, %bb.5
+  ; DAG-NEXT:   [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 20
+  ; DAG-NEXT:   [[V_MIN_I32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_I32_e64 [[PHI7]], killed [[S_MOV_B32_8]], implicit $exec
+  ; DAG-NEXT:   [[DEF4:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[DEF5:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MIN_I32_e64_]], %subreg.sub0, [[PHI7]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY24:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   [[COPY25:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE4]]
+  ; DAG-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN2:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY24]], killed [[COPY25]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_CMP_EQ_U32_e64_2:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN2]], [[PHI7]], implicit $exec
+  ; DAG-NEXT:   [[SI_IF_BREAK2:%[0-9]+]]:sreg_32 = SI_IF_BREAK killed [[V_CMP_EQ_U32_e64_2]], [[PHI6]], implicit-def dead $scc
+  ; DAG-NEXT:   SI_LOOP [[SI_IF_BREAK2]], %bb.5, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   S_BRANCH %bb.6
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.6.atomicrmw.end:
+  ; DAG-NEXT:   [[PHI8:%[0-9]+]]:sreg_32 = PHI [[SI_IF_BREAK2]], %bb.5
+  ; DAG-NEXT:   SI_END_CF [[PHI8]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   [[V_MOV_B32_e32_6:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 21, implicit $exec
+  ; DAG-NEXT:   [[COPY26:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   FLAT_ATOMIC_UMAX [[COPY26]], killed [[V_MOV_B32_e32_6]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("singlethread") monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_7:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 22, implicit $exec
+  ; DAG-NEXT:   [[COPY27:%[0-9]+]]:vreg_64 = COPY [[COPY10]]
+  ; DAG-NEXT:   FLAT_ATOMIC_UMIN [[COPY27]], killed [[V_MOV_B32_e32_7]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("singlethread") monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   S_ENDPGM 0
+  ;
+  ; GIS-LABEL: name: no_alias_addr_space_select_atomicrmw
+  ; GIS: bb.1 (%ir-block.0):
+  ; GIS-NEXT:   successors: %bb.2(0x80000000)
+  ; GIS-NEXT:   liveins: $sgpr0_sgpr1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+  ; GIS-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (p1) from got, addrspace 4)
+  ; GIS-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]], 36, 0 :: (dereferenceable invariant load (<2 x s32>) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; GIS-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GIS-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]], 40, 0 :: (dereferenceable invariant load (s32) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; GIS-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; GIS-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[S_LOAD_DWORD_IMM]], [[S_MOV_B32_1]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]], 48, 0 :: (dereferenceable invariant load (s32) from %ir.offset.kernarg.offset, align 16, addrspace 4)
+  ; GIS-NEXT:   [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[S_LOAD_DWORD_IMM1]], 31, implicit-def dead $scc
+  ; GIS-NEXT:   [[COPY4:%[0-9]+]]:sreg_32_xm0 = COPY [[S_ASHR_I32_]]
+  ; GIS-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_LOAD_DWORD_IMM1]], %subreg.sub0, [[COPY4]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0.lptr
+  ; GIS-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; GIS-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub0
+  ; GIS-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_2]], %subreg.sub0, [[COPY6]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; GIS-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub0
+  ; GIS-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY8]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GIS-NEXT:   [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 0
+  ; GIS-NEXT:   S_CMP_LG_U32 [[COPY2]], [[S_MOV_B32_3]], implicit-def $scc
+  ; GIS-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY $scc
+  ; GIS-NEXT:   $scc = COPY [[COPY9]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE2]], [[S_MOV_B]], implicit $scc
+  ; GIS-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
+  ; GIS-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub0
+  ; GIS-NEXT:   [[COPY12:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub1
+  ; GIS-NEXT:   [[COPY13:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE]].sub1
+  ; GIS-NEXT:   [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY10]], [[COPY11]], implicit-def $scc
+  ; GIS-NEXT:   [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY12]], [[COPY13]], implicit-def dead $scc, implicit $scc
+  ; GIS-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+  ; GIS-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY3]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_1:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE3]], [[REG_SEQUENCE1]], implicit $scc
+  ; GIS-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LSHR_B32_]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_1]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_2:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[S_CSELECT_B64_1]], [[S_CSELECT_B64_]], implicit $scc
+  ; GIS-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 12
+  ; GIS-NEXT:   [[COPY14:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_4]]
+  ; GIS-NEXT:   FLAT_ATOMIC_SWAP [[COPY14]], [[COPY15]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 13
+  ; GIS-NEXT:   [[COPY16:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_5]]
+  ; GIS-NEXT:   FLAT_ATOMIC_ADD [[COPY16]], [[COPY17]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 14
+  ; GIS-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_6]]
+  ; GIS-NEXT:   FLAT_ATOMIC_SUB [[COPY18]], [[COPY19]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+  ; GIS-NEXT:   [[COPY20:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_7]]
+  ; GIS-NEXT:   FLAT_ATOMIC_AND [[COPY20]], [[COPY21]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[COPY22:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY22]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.2.atomicrmw.start8:
+  ; GIS-NEXT:   successors: %bb.3(0x04000000), %bb.2(0x7c000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI:%[0-9]+]]:sreg_32_xm0_xexec = PHI %53, %bb.2, [[S_MOV_B32_]], %bb.1
+  ; GIS-NEXT:   [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD]], %bb.1, %106, %bb.2
+  ; GIS-NEXT:   [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+  ; GIS-NEXT:   [[COPY23:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_8]]
+  ; GIS-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[PHI1]], [[COPY23]], implicit $exec
+  ; GIS-NEXT:   [[V_NOT_B32_e32_:%[0-9]+]]:vgpr_32 = V_NOT_B32_e32 [[V_AND_B32_e64_]], implicit $exec
+  ; GIS-NEXT:   [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_NOT_B32_e32_]], %subreg.sub0, [[PHI1]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY24:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY24]], [[REG_SEQUENCE4]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN]], [[PHI1]], implicit $exec
+  ; GIS-NEXT:   [[SI_IF_BREAK:%[0-9]+]]:sreg_32_xm0_xexec = SI_IF_BREAK [[V_CMP_EQ_U32_e64_]], [[PHI]], implicit-def $scc
+  ; GIS-NEXT:   SI_LOOP [[SI_IF_BREAK]], %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   S_BRANCH %bb.3
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.3.atomicrmw.end7:
+  ; GIS-NEXT:   successors: %bb.4(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[SI_IF_BREAK]], %bb.2
+  ; GIS-NEXT:   SI_END_CF [[PHI2]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   [[S_MOV_B32_9:%[0-9]+]]:sreg_32 = S_MOV_B32 17
+  ; GIS-NEXT:   [[COPY25:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY26:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_9]]
+  ; GIS-NEXT:   FLAT_ATOMIC_OR [[COPY25]], [[COPY26]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_10:%[0-9]+]]:sreg_32 = S_MOV_B32 18
+  ; GIS-NEXT:   [[COPY27:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY28:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_10]]
+  ; GIS-NEXT:   FLAT_ATOMIC_XOR [[COPY27]], [[COPY28]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[COPY29:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY29]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; GIS-NEXT:   [[S_MOV_B32_11:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.4.atomicrmw.start2:
+  ; GIS-NEXT:   successors: %bb.5(0x04000000), %bb.4(0x7c000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI3:%[0-9]+]]:sreg_32_xm0_xexec = PHI %68, %bb.4, [[S_MOV_B32_11]], %bb.3
+  ; GIS-NEXT:   [[PHI4:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD1]], %bb.3, %104, %bb.4
+  ; GIS-NEXT:   [[S_MOV_B32_12:%[0-9]+]]:sreg_32 = S_MOV_B32 19
+  ; GIS-NEXT:   [[COPY30:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_12]]
+  ; GIS-NEXT:   [[V_MAX_I32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_I32_e64 [[PHI4]], [[COPY30]], implicit $exec
+  ; GIS-NEXT:   [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MAX_I32_e64_]], %subreg.sub0, [[PHI4]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY31:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN1:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY31]], [[REG_SEQUENCE5]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN1]], [[PHI4]], implicit $exec
+  ; GIS-NEXT:   [[SI_IF_BREAK1:%[0-9]+]]:sreg_32_xm0_xexec = SI_IF_BREAK [[V_CMP_EQ_U32_e64_1]], [[PHI3]], implicit-def $scc
+  ; GIS-NEXT:   SI_LOOP [[SI_IF_BREAK1]], %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   S_BRANCH %bb.5
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.5.atomicrmw.end1:
+  ; GIS-NEXT:   successors: %bb.6(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI5:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[SI_IF_BREAK1]], %bb.4
+  ; GIS-NEXT:   SI_END_CF [[PHI5]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   [[COPY32:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[FLAT_LOAD_DWORD2:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY32]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; GIS-NEXT:   [[S_MOV_B32_13:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.6.atomicrmw.start:
+  ; GIS-NEXT:   successors: %bb.7(0x04000000), %bb.6(0x7c000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec = PHI %79, %bb.6, [[S_MOV_B32_13]], %bb.5
+  ; GIS-NEXT:   [[PHI7:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD2]], %bb.5, %102, %bb.6
+  ; GIS-NEXT:   [[S_MOV_B32_14:%[0-9]+]]:sreg_32 = S_MOV_B32 20
+  ; GIS-NEXT:   [[COPY33:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_14]]
+  ; GIS-NEXT:   [[V_MIN_I32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_I32_e64 [[PHI7]], [[COPY33]], implicit $exec
+  ; GIS-NEXT:   [[REG_SEQUENCE6:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MIN_I32_e64_]], %subreg.sub0, [[PHI7]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY34:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN2:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY34]], [[REG_SEQUENCE6]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[V_CMP_EQ_U32_e64_2:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN2]], [[PHI7]], implicit $exec
+  ; GIS-NEXT:   [[SI_IF_BREAK2:%[0-9]+]]:sreg_32_xm0_xexec = SI_IF_BREAK [[V_CMP_EQ_U32_e64_2]], [[PHI6]], implicit-def $scc
+  ; GIS-NEXT:   SI_LOOP [[SI_IF_BREAK2]], %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   S_BRANCH %bb.7
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.7.atomicrmw.end:
+  ; GIS-NEXT:   [[PHI8:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[SI_IF_BREAK2]], %bb.6
+  ; GIS-NEXT:   SI_END_CF [[PHI8]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   [[S_MOV_B32_15:%[0-9]+]]:sreg_32 = S_MOV_B32 21
+  ; GIS-NEXT:   [[COPY35:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY36:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_15]]
+  ; GIS-NEXT:   FLAT_ATOMIC_UMAX [[COPY35]], [[COPY36]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("singlethread") monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_16:%[0-9]+]]:sreg_32 = S_MOV_B32 22
+  ; GIS-NEXT:   [[COPY37:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   [[COPY38:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_16]]
+  ; GIS-NEXT:   FLAT_ATOMIC_UMIN [[COPY37]], [[COPY38]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("singlethread") monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   S_ENDPGM 0
+  %lptr = alloca i32, align 4, addrspace(5)
+  %b = addrspacecast ptr addrspace(5) %lptr to ptr
+  %c = addrspacecast ptr addrspace(3) %sptr to ptr
+  %add_a = getelementptr inbounds i8, ptr addrspacecast (ptr addrspace(1) @gptr to ptr), i32 %offset
+  %ptr = select i1 %cond1, ptr %add_a, ptr %b
+  %ptr2 = select i1 %cond2, ptr %ptr, ptr %c
+  %atomicrmw.xchg = atomicrmw xchg ptr %ptr2, i32 12 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.add = atomicrmw add ptr %ptr2, i32 13 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.sub = atomicrmw sub ptr %ptr2, i32 14 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.and = atomicrmw and ptr %ptr2, i32 15 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.nand = atomicrmw nand ptr %ptr2, i32 16 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.or = atomicrmw or ptr %ptr2, i32 17 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.xor = atomicrmw xor ptr %ptr2, i32 18 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.max = atomicrmw max ptr %ptr2, i32 19 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.min = atomicrmw volatile min ptr %ptr2, i32 20 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.umax = atomicrmw umax ptr %ptr2, i32 21 syncscope("singlethread") monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.umin = atomicrmw volatile umin ptr %ptr2, i32 22 syncscope("singlethread") monotonic, align 4, !noalias.addrspace !0
+  ret void
+}
+
+define amdgpu_kernel void @no_alias_addr_space_branch_atomicrmw(ptr addrspace(3) %sptr, i1 %cond1, i1 %cond2, i32 %val, i32 %offset) #0 {
+  ; DAG-LABEL: name: no_alias_addr_space_branch_atomicrmw
+  ; DAG: bb.0 (%ir-block.0):
+  ; DAG-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; DAG-NEXT:   liveins: $sgpr0_sgpr1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
+  ; DAG-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[COPY]](p4)
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; DAG-NEXT:   [[COPY2:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; DAG-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM]].sub1
+  ; DAG-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_CSELECT_B32 -1, 0, implicit $scc
+  ; DAG-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_CSELECT_B32_]]
+  ; DAG-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
+  ; DAG-NEXT:   [[COPY5:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM1]]
+  ; DAG-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 $exec_lo, killed [[COPY4]], implicit-def dead $scc
+  ; DAG-NEXT:   $vcc_lo = COPY [[S_AND_B32_1]]
+  ; DAG-NEXT:   S_CBRANCH_VCCNZ %bb.2, implicit $vcc_lo
+  ; DAG-NEXT:   S_BRANCH %bb.1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.1.bb.1.false:
+  ; DAG-NEXT:   successors: %bb.2(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+  ; DAG-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr32
+  ; DAG-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 512
+  ; DAG-NEXT:   [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY6]], killed [[S_MOV_B32_]], implicit-def dead $scc
+  ; DAG-NEXT:   $sgpr32 = COPY [[S_ADD_I32_]]
+  ; DAG-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+  ; DAG-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; DAG-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; DAG-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, killed [[COPY7]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE]]
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.2.bb.1.end:
+  ; DAG-NEXT:   successors: %bb.3(0x40000000), %bb.4(0x40000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI:%[0-9]+]]:sreg_64 = PHI [[COPY5]], %bb.0, [[COPY8]], %bb.1
+  ; DAG-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY1]], 40, 0 :: (dereferenceable invariant load (s32) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; DAG-NEXT:   [[S_BFE_U32_:%[0-9]+]]:sreg_32 = S_BFE_U32 killed [[S_LOAD_DWORD_IMM]], 65544, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_BFE_U32_]], killed [[S_MOV_B32_1]], implicit-def $scc
+  ; DAG-NEXT:   S_CBRANCH_SCC1 %bb.4, implicit $scc
+  ; DAG-NEXT:   S_BRANCH %bb.3
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.3.bb.2.true:
+  ; DAG-NEXT:   successors: %bb.4(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[COPY2]].sub0
+  ; DAG-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; DAG-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; DAG-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; DAG-NEXT:   S_CMP_LG_U32 [[COPY9]], killed [[S_MOV_B32_2]], implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   [[S_CSELECT_B32_1:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY10]], [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_2:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY9]], [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[S_CSELECT_B32_2]], %subreg.sub0, killed [[S_CSELECT_B32_1]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]]
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.4.bb.2.end:
+  ; DAG-NEXT:   successors: %bb.5(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI1:%[0-9]+]]:sreg_64 = PHI [[PHI]], %bb.2, [[COPY11]], %bb.3
+  ; DAG-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 12, implicit $exec
+  ; DAG-NEXT:   [[COPY12:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_SWAP [[COPY12]], killed [[V_MOV_B32_e32_]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 13, implicit $exec
+  ; DAG-NEXT:   [[COPY13:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_ADD [[COPY13]], killed [[V_MOV_B32_e32_1]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 14, implicit $exec
+  ; DAG-NEXT:   [[COPY14:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_SUB [[COPY14]], killed [[V_MOV_B32_e32_2]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_3:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 15, implicit $exec
+  ; DAG-NEXT:   [[COPY15:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_AND [[COPY15]], killed [[V_MOV_B32_e32_3]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[COPY16:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY16]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.5.atomicrmw.start8:
+  ; DAG-NEXT:   successors: %bb.6(0x04000000), %bb.5(0x7c000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI2:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_4]], %bb.4, %10, %bb.5
+  ; DAG-NEXT:   [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD]], %bb.4, %9, %bb.5
+  ; DAG-NEXT:   [[V_NOT_B32_e32_:%[0-9]+]]:vgpr_32 = V_NOT_B32_e32 [[PHI3]], implicit $exec
+  ; DAG-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 -17
+  ; DAG-NEXT:   [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 killed [[V_NOT_B32_e32_]], killed [[S_MOV_B32_5]], implicit $exec
+  ; DAG-NEXT:   [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_OR_B32_e64_]], %subreg.sub0, [[PHI3]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY17:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]]
+  ; DAG-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY17]], killed [[COPY18]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN]], [[PHI3]], implicit $exec
+  ; DAG-NEXT:   [[SI_IF_BREAK:%[0-9]+]]:sreg_32 = SI_IF_BREAK killed [[V_CMP_EQ_U32_e64_]], [[PHI2]], implicit-def dead $scc
+  ; DAG-NEXT:   SI_LOOP [[SI_IF_BREAK]], %bb.5, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   S_BRANCH %bb.6
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.6.atomicrmw.end7:
+  ; DAG-NEXT:   successors: %bb.7(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI4:%[0-9]+]]:sreg_32 = PHI [[SI_IF_BREAK]], %bb.5
+  ; DAG-NEXT:   SI_END_CF [[PHI4]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   [[V_MOV_B32_e32_4:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 17, implicit $exec
+  ; DAG-NEXT:   [[COPY19:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_OR [[COPY19]], killed [[V_MOV_B32_e32_4]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_5:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 18, implicit $exec
+  ; DAG-NEXT:   [[COPY20:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_XOR [[COPY20]], killed [[V_MOV_B32_e32_5]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[COPY21:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY21]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.7.atomicrmw.start2:
+  ; DAG-NEXT:   successors: %bb.8(0x04000000), %bb.7(0x7c000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI5:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_6]], %bb.6, %16, %bb.7
+  ; DAG-NEXT:   [[PHI6:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD1]], %bb.6, %15, %bb.7
+  ; DAG-NEXT:   [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 19
+  ; DAG-NEXT:   [[V_MAX_I32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_I32_e64 [[PHI6]], killed [[S_MOV_B32_7]], implicit $exec
+  ; DAG-NEXT:   [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[DEF3:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MAX_I32_e64_]], %subreg.sub0, [[PHI6]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY22:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   [[COPY23:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE3]]
+  ; DAG-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN1:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY22]], killed [[COPY23]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN1]], [[PHI6]], implicit $exec
+  ; DAG-NEXT:   [[SI_IF_BREAK1:%[0-9]+]]:sreg_32 = SI_IF_BREAK killed [[V_CMP_EQ_U32_e64_1]], [[PHI5]], implicit-def dead $scc
+  ; DAG-NEXT:   SI_LOOP [[SI_IF_BREAK1]], %bb.7, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   S_BRANCH %bb.8
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.8.atomicrmw.end1:
+  ; DAG-NEXT:   successors: %bb.9(0x80000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI7:%[0-9]+]]:sreg_32 = PHI [[SI_IF_BREAK1]], %bb.7
+  ; DAG-NEXT:   SI_END_CF [[PHI7]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   [[COPY24:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   [[FLAT_LOAD_DWORD2:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY24]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; DAG-NEXT:   [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.9.atomicrmw.start:
+  ; DAG-NEXT:   successors: %bb.10(0x04000000), %bb.9(0x7c000000)
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[PHI8:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_8]], %bb.8, %22, %bb.9
+  ; DAG-NEXT:   [[PHI9:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD2]], %bb.8, %21, %bb.9
+  ; DAG-NEXT:   [[S_MOV_B32_9:%[0-9]+]]:sreg_32 = S_MOV_B32 20
+  ; DAG-NEXT:   [[V_MIN_I32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_I32_e64 [[PHI9]], killed [[S_MOV_B32_9]], implicit $exec
+  ; DAG-NEXT:   [[DEF4:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[DEF5:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; DAG-NEXT:   [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MIN_I32_e64_]], %subreg.sub0, [[PHI9]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY25:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   [[COPY26:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE4]]
+  ; DAG-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN2:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY25]], killed [[COPY26]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_CMP_EQ_U32_e64_2:%[0-9]+]]:sreg_32 = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN2]], [[PHI9]], implicit $exec
+  ; DAG-NEXT:   [[SI_IF_BREAK2:%[0-9]+]]:sreg_32 = SI_IF_BREAK killed [[V_CMP_EQ_U32_e64_2]], [[PHI8]], implicit-def dead $scc
+  ; DAG-NEXT:   SI_LOOP [[SI_IF_BREAK2]], %bb.9, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   S_BRANCH %bb.10
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT: bb.10.atomicrmw.end:
+  ; DAG-NEXT:   [[PHI10:%[0-9]+]]:sreg_32 = PHI [[SI_IF_BREAK2]], %bb.9
+  ; DAG-NEXT:   SI_END_CF [[PHI10]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+  ; DAG-NEXT:   [[V_MOV_B32_e32_6:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 21, implicit $exec
+  ; DAG-NEXT:   [[COPY27:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_UMAX [[COPY27]], killed [[V_MOV_B32_e32_6]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("singlethread") monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   [[V_MOV_B32_e32_7:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 22, implicit $exec
+  ; DAG-NEXT:   [[COPY28:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; DAG-NEXT:   FLAT_ATOMIC_UMIN [[COPY28]], killed [[V_MOV_B32_e32_7]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("singlethread") monotonic (s32) on %ir.ptr2)
+  ; DAG-NEXT:   S_ENDPGM 0
+  ;
+  ; GIS-LABEL: name: no_alias_addr_space_branch_atomicrmw
+  ; GIS: bb.1 (%ir-block.0):
+  ; GIS-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
+  ; GIS-NEXT:   liveins: $sgpr0_sgpr1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+  ; GIS-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (p1) from got, addrspace 4)
+  ; GIS-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]], 36, 0 :: (dereferenceable invariant load (<2 x s32>) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; GIS-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 1
+  ; GIS-NEXT:   [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 [[COPY3]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_XOR_B32_1:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_XOR_B32_]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_XOR_B32_1]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_]]
+  ; GIS-NEXT:   S_CBRANCH_SCC1 %bb.3, implicit $scc
+  ; GIS-NEXT:   S_BRANCH %bb.2
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.2.bb.1.false:
+  ; GIS-NEXT:   successors: %bb.3(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+  ; GIS-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 5
+  ; GIS-NEXT:   [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[S_MOV_B32_1]], [[S_MOV_B32_2]], implicit-def dead $scc
+  ; GIS-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr32
+  ; GIS-NEXT:   [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY4]], [[S_LSHL_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   $sgpr32 = COPY [[S_ADD_U32_]]
+  ; GIS-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; GIS-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub0
+  ; GIS-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY6]], %subreg.sub1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.3.bb.1.end:
+  ; GIS-NEXT:   successors: %bb.4(0x40000000), %bb.5(0x40000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI:%[0-9]+]]:sreg_64 = PHI [[COPY1]], %bb.1, [[REG_SEQUENCE]], %bb.2
+  ; GIS-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]], 40, 0 :: (dereferenceable invariant load (s32) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; GIS-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; GIS-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[S_LOAD_DWORD_IMM]], [[S_MOV_B32_3]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_MOV_B32_4:%[0-9]+]]:sreg_32 = S_MOV_B32 1
+  ; GIS-NEXT:   [[S_XOR_B32_2:%[0-9]+]]:sreg_32 = S_XOR_B32 [[S_LSHR_B32_]], [[S_MOV_B32_4]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_XOR_B32_2]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_1]]
+  ; GIS-NEXT:   S_CBRANCH_SCC1 %bb.5, implicit $scc
+  ; GIS-NEXT:   S_BRANCH %bb.4
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.4.bb.2.true:
+  ; GIS-NEXT:   successors: %bb.5(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; GIS-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub0
+  ; GIS-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY10]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_5:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GIS-NEXT:   [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 0
+  ; GIS-NEXT:   S_CMP_LG_U32 [[COPY7]], [[S_MOV_B32_5]], implicit-def $scc
+  ; GIS-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY $scc
+  ; GIS-NEXT:   $scc = COPY [[COPY11]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE1]], [[S_MOV_B]], implicit $scc
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.5.bb.2.end:
+  ; GIS-NEXT:   successors: %bb.6(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI1:%[0-9]+]]:sreg_64 = PHI [[PHI]], %bb.3, [[S_CSELECT_B64_]], %bb.4
+  ; GIS-NEXT:   [[S_MOV_B32_6:%[0-9]+]]:sreg_32 = S_MOV_B32 12
+  ; GIS-NEXT:   [[COPY12:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_6]]
+  ; GIS-NEXT:   FLAT_ATOMIC_SWAP [[COPY12]], [[COPY13]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_7:%[0-9]+]]:sreg_32 = S_MOV_B32 13
+  ; GIS-NEXT:   [[COPY14:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_7]]
+  ; GIS-NEXT:   FLAT_ATOMIC_ADD [[COPY14]], [[COPY15]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_8:%[0-9]+]]:sreg_32 = S_MOV_B32 14
+  ; GIS-NEXT:   [[COPY16:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_8]]
+  ; GIS-NEXT:   FLAT_ATOMIC_SUB [[COPY16]], [[COPY17]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_9:%[0-9]+]]:sreg_32 = S_MOV_B32 15
+  ; GIS-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_9]]
+  ; GIS-NEXT:   FLAT_ATOMIC_AND [[COPY18]], [[COPY19]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[COPY20:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY20]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; GIS-NEXT:   [[S_MOV_B32_10:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.6.atomicrmw.start8:
+  ; GIS-NEXT:   successors: %bb.7(0x04000000), %bb.6(0x7c000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec = PHI %53, %bb.6, [[S_MOV_B32_10]], %bb.5
+  ; GIS-NEXT:   [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD]], %bb.5, %118, %bb.6
+  ; GIS-NEXT:   [[S_MOV_B32_11:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+  ; GIS-NEXT:   [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_11]]
+  ; GIS-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[PHI3]], [[COPY21]], implicit $exec
+  ; GIS-NEXT:   [[V_NOT_B32_e32_:%[0-9]+]]:vgpr_32 = V_NOT_B32_e32 [[V_AND_B32_e64_]], implicit $exec
+  ; GIS-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_NOT_B32_e32_]], %subreg.sub0, [[PHI3]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY22:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY22]], [[REG_SEQUENCE2]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN]], [[PHI3]], implicit $exec
+  ; GIS-NEXT:   [[SI_IF_BREAK:%[0-9]+]]:sreg_32_xm0_xexec = SI_IF_BREAK [[V_CMP_EQ_U32_e64_]], [[PHI2]], implicit-def $scc
+  ; GIS-NEXT:   SI_LOOP [[SI_IF_BREAK]], %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   S_BRANCH %bb.7
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.7.atomicrmw.end7:
+  ; GIS-NEXT:   successors: %bb.8(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI4:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[SI_IF_BREAK]], %bb.6
+  ; GIS-NEXT:   SI_END_CF [[PHI4]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   [[S_MOV_B32_12:%[0-9]+]]:sreg_32 = S_MOV_B32 17
+  ; GIS-NEXT:   [[COPY23:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY24:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_12]]
+  ; GIS-NEXT:   FLAT_ATOMIC_OR [[COPY23]], [[COPY24]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_13:%[0-9]+]]:sreg_32 = S_MOV_B32 18
+  ; GIS-NEXT:   [[COPY25:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY26:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_13]]
+  ; GIS-NEXT:   FLAT_ATOMIC_XOR [[COPY25]], [[COPY26]], 0, 0, implicit $exec, implicit $flat_scr :: (load store monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[COPY27:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY27]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; GIS-NEXT:   [[S_MOV_B32_14:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.8.atomicrmw.start2:
+  ; GIS-NEXT:   successors: %bb.9(0x04000000), %bb.8(0x7c000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI5:%[0-9]+]]:sreg_32_xm0_xexec = PHI %68, %bb.8, [[S_MOV_B32_14]], %bb.7
+  ; GIS-NEXT:   [[PHI6:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD1]], %bb.7, %116, %bb.8
+  ; GIS-NEXT:   [[S_MOV_B32_15:%[0-9]+]]:sreg_32 = S_MOV_B32 19
+  ; GIS-NEXT:   [[COPY28:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_15]]
+  ; GIS-NEXT:   [[V_MAX_I32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_I32_e64 [[PHI6]], [[COPY28]], implicit $exec
+  ; GIS-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MAX_I32_e64_]], %subreg.sub0, [[PHI6]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY29:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN1:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY29]], [[REG_SEQUENCE3]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[V_CMP_EQ_U32_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN1]], [[PHI6]], implicit $exec
+  ; GIS-NEXT:   [[SI_IF_BREAK1:%[0-9]+]]:sreg_32_xm0_xexec = SI_IF_BREAK [[V_CMP_EQ_U32_e64_1]], [[PHI5]], implicit-def $scc
+  ; GIS-NEXT:   SI_LOOP [[SI_IF_BREAK1]], %bb.8, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   S_BRANCH %bb.9
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.9.atomicrmw.end1:
+  ; GIS-NEXT:   successors: %bb.10(0x80000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI7:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[SI_IF_BREAK1]], %bb.8
+  ; GIS-NEXT:   SI_END_CF [[PHI7]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   [[COPY30:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[FLAT_LOAD_DWORD2:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[COPY30]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %ir.ptr2)
+  ; GIS-NEXT:   [[S_MOV_B32_16:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.10.atomicrmw.start:
+  ; GIS-NEXT:   successors: %bb.11(0x04000000), %bb.10(0x7c000000)
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[PHI8:%[0-9]+]]:sreg_32_xm0_xexec = PHI %79, %bb.10, [[S_MOV_B32_16]], %bb.9
+  ; GIS-NEXT:   [[PHI9:%[0-9]+]]:vgpr_32 = PHI [[FLAT_LOAD_DWORD2]], %bb.9, %114, %bb.10
+  ; GIS-NEXT:   [[S_MOV_B32_17:%[0-9]+]]:sreg_32 = S_MOV_B32 20
+  ; GIS-NEXT:   [[COPY31:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_17]]
+  ; GIS-NEXT:   [[V_MIN_I32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_I32_e64 [[PHI9]], [[COPY31]], implicit $exec
+  ; GIS-NEXT:   [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MIN_I32_e64_]], %subreg.sub0, [[PHI9]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY32:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[FLAT_ATOMIC_CMPSWAP_RTN2:%[0-9]+]]:vgpr_32 = FLAT_ATOMIC_CMPSWAP_RTN [[COPY32]], [[REG_SEQUENCE4]], 0, 1, implicit $exec, implicit $flat_scr :: (load store monotonic monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[V_CMP_EQ_U32_e64_2:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[FLAT_ATOMIC_CMPSWAP_RTN2]], [[PHI9]], implicit $exec
+  ; GIS-NEXT:   [[SI_IF_BREAK2:%[0-9]+]]:sreg_32_xm0_xexec = SI_IF_BREAK [[V_CMP_EQ_U32_e64_2]], [[PHI8]], implicit-def $scc
+  ; GIS-NEXT:   SI_LOOP [[SI_IF_BREAK2]], %bb.10, implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   S_BRANCH %bb.11
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT: bb.11.atomicrmw.end:
+  ; GIS-NEXT:   [[PHI10:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[SI_IF_BREAK2]], %bb.10
+  ; GIS-NEXT:   SI_END_CF [[PHI10]], implicit-def $exec, implicit-def $scc, implicit $exec
+  ; GIS-NEXT:   [[S_MOV_B32_18:%[0-9]+]]:sreg_32 = S_MOV_B32 21
+  ; GIS-NEXT:   [[COPY33:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY34:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_18]]
+  ; GIS-NEXT:   FLAT_ATOMIC_UMAX [[COPY33]], [[COPY34]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("singlethread") monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   [[S_MOV_B32_19:%[0-9]+]]:sreg_32 = S_MOV_B32 22
+  ; GIS-NEXT:   [[COPY35:%[0-9]+]]:vreg_64 = COPY [[PHI1]]
+  ; GIS-NEXT:   [[COPY36:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_19]]
+  ; GIS-NEXT:   FLAT_ATOMIC_UMIN [[COPY35]], [[COPY36]], 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("singlethread") monotonic (s32) on %ir.ptr2, !noalias.addrspace !1)
+  ; GIS-NEXT:   S_ENDPGM 0
+  br i1 %cond1, label %bb.1.true, label %bb.1.false
+
+bb.1.true:                                        ; preds = %0
+  %a = addrspacecast ptr addrspace(1) @gptr to ptr
+  br label %bb.1.end
+
+bb.1.false:                                       ; preds = %0
+  %lptr = alloca i32, align 4, addrspace(5)
+  %b = addrspacecast ptr addrspace(5) %lptr to ptr
+  br label %bb.1.end
+
+bb.1.end:                                         ; preds = %bb.1.false, %bb.1.true
+  %ptr1 = phi ptr [ %a, %bb.1.true ], [ %b, %bb.1.false ]
+  br i1 %cond2, label %bb.2.true, label %bb.2.end
+
+bb.2.true:                                        ; preds = %bb.1.end
+  %c = addrspacecast ptr addrspace(3) %sptr to ptr
+  br label %bb.2.end
+
+bb.2.end:                                         ; preds = %bb.2.true, %bb.1.end
+  %ptr2 = phi ptr [ %ptr1, %bb.1.end ], [ %c, %bb.2.true ]
+  %atomicrmw.xchg = atomicrmw xchg ptr %ptr2, i32 12 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.add = atomicrmw add ptr %ptr2, i32 13 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.sub = atomicrmw sub ptr %ptr2, i32 14 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.and = atomicrmw and ptr %ptr2, i32 15 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.nand = atomicrmw nand ptr %ptr2, i32 16 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.or = atomicrmw or ptr %ptr2, i32 17 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.xor = atomicrmw xor ptr %ptr2, i32 18 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.max = atomicrmw max ptr %ptr2, i32 19 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.min = atomicrmw volatile min ptr %ptr2, i32 20 monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.umax = atomicrmw umax ptr %ptr2, i32 21 syncscope("singlethread") monotonic, align 4, !noalias.addrspace !0
+  %atomicrmw.umin = atomicrmw volatile umin ptr %ptr2, i32 22 syncscope("singlethread") monotonic, align 4, !noalias.addrspace !0
+  ret void
+}
+
+define amdgpu_kernel void @no_alias_addr_space_has_meta(ptr addrspace(3) %sptr, i1 %cond1, i1 %cond2, i32 %val, i32 %offset) #0 {
+  ; DAG-LABEL: name: no_alias_addr_space_has_meta
+  ; DAG: bb.0 (%ir-block.0):
+  ; DAG-NEXT:   liveins: $sgpr0_sgpr1
+  ; DAG-NEXT: {{  $}}
+  ; DAG-NEXT:   [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; DAG-NEXT:   [[S_LOAD_DWORDX3_IMM:%[0-9]+]]:sgpr_96 = S_LOAD_DWORDX3_IMM [[COPY]](p4), 40, 0 :: (dereferenceable invariant load (s96) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; DAG-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY killed [[S_LOAD_DWORDX2_IMM]]
+  ; DAG-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
+  ; DAG-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub1
+  ; DAG-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub0
+  ; DAG-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub2
+  ; DAG-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; DAG-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 killed [[COPY4]], killed [[S_MOV_B32_]], implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY killed [[S_LSHR_B32_]]
+  ; DAG-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY6]], implicit-def dead $scc
+  ; DAG-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; DAG-NEXT:   S_CMP_LG_U32 [[COPY2]], killed [[S_MOV_B32_1]], implicit-def $scc
+  ; DAG-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; DAG-NEXT:   [[S_CSELECT_B32_:%[0-9]+]]:sreg_32 = S_CSELECT_B32 [[COPY2]], [[S_MOV_B32_2]], implicit $scc
+  ; DAG-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; DAG-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; DAG-NEXT:   [[S_CSELECT_B32_1:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY7]], [[S_MOV_B32_2]], implicit $scc
+  ; DAG-NEXT:   [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[COPY5]], 31, implicit-def dead $scc
+  ; DAG-NEXT:   [[COPY8:%[0-9]+]]:sreg_32_xm0 = COPY killed [[S_ASHR_I32_]]
+  ; DAG-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, killed [[COPY8]], %subreg.sub1
+  ; DAG-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def dead $scc
+  ; DAG-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (s64) from got, addrspace 4)
+  ; DAG-NEXT:   [[S_ADD_U64_:%[0-9]+]]:sreg_64 = S_ADD_U64 killed [[S_LOAD_DWORDX2_IMM1]], killed [[REG_SEQUENCE]]
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub1
+  ; DAG-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; DAG-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; DAG-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY [[S_ADD_U64_]].sub0
+  ; DAG-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0.lptr
+  ; DAG-NEXT:   [[S_CSELECT_B32_2:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY11]], killed [[S_MOV_B32_3]], implicit $scc
+  ; DAG-NEXT:   [[COPY12:%[0-9]+]]:sreg_32 = COPY [[S_ADD_U64_]].sub1
+  ; DAG-NEXT:   [[S_CSELECT_B32_3:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[COPY12]], killed [[COPY10]], implicit $scc
+  ; DAG-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_1]], 1, implicit-def $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_4:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[S_CSELECT_B32_3]], killed [[S_CSELECT_B32_1]], implicit $scc
+  ; DAG-NEXT:   [[S_CSELECT_B32_5:%[0-9]+]]:sreg_32 = S_CSELECT_B32 killed [[S_CSELECT_B32_2]], killed [[S_CSELECT_B32_]], implicit $scc
+  ; DAG-NEXT:   [[COPY13:%[0-9]+]]:vgpr_32 = COPY killed [[S_CSELECT_B32_5]]
+  ; DAG-NEXT:   [[COPY14:%[0-9]+]]:vgpr_32 = COPY killed [[S_CSELECT_B32_4]]
+  ; DAG-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[COPY13]], %subreg.sub0, killed [[COPY14]], %subreg.sub1
+  ; DAG-NEXT:   [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+  ; DAG-NEXT:   FLAT_STORE_DWORD killed [[REG_SEQUENCE1]], killed [[COPY15]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.ptr2, !noalias.addrspace !2)
+  ; DAG-NEXT:   S_ENDPGM 0
+  ;
+  ; GIS-LABEL: name: no_alias_addr_space_has_meta
+  ; GIS: bb.1 (%ir-block.0):
+  ; GIS-NEXT:   liveins: $sgpr0_sgpr1
+  ; GIS-NEXT: {{  $}}
+  ; GIS-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
+  ; GIS-NEXT:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @gptr, target-flags(amdgpu-gotprel32-hi) @gptr, implicit-def $scc
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[SI_PC_ADD_REL_OFFSET]], 0, 0 :: (dereferenceable invariant load (p1) from got, addrspace 4)
+  ; GIS-NEXT:   [[COPY1:%[0-9]+]]:sreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
+  ; GIS-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]], 36, 0 :: (dereferenceable invariant load (<2 x s32>) from %ir.sptr.kernarg.offset, align 4, addrspace 4)
+  ; GIS-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
+  ; GIS-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
+  ; GIS-NEXT:   [[S_LOAD_DWORDX3_IMM:%[0-9]+]]:sgpr_96 = S_LOAD_DWORDX3_IMM [[COPY]], 40, 0 :: (dereferenceable invariant load (<3 x s32>) from %ir.cond2.kernarg.offset.align.down, align 8, addrspace 4)
+  ; GIS-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub0
+  ; GIS-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX3_IMM]].sub1
+  ; GIS-NEXT:   [[COPY6:%[0-9]+]]:sreg_32_xexec_hi_and_sreg_32_xm0 = COPY [[S_LOAD_DWORDX3_IMM]].sub2
+  ; GIS-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 8
+  ; GIS-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[COPY4]], [[S_MOV_B32_]], implicit-def dead $scc
+  ; GIS-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0.lptr
+  ; GIS-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_private_base
+  ; GIS-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub0
+  ; GIS-NEXT:   [[COPY8:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[COPY8]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 $src_shared_base
+  ; GIS-NEXT:   [[COPY9:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub0
+  ; GIS-NEXT:   [[COPY10:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_1]].sub1
+  ; GIS-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY10]], %subreg.sub1
+  ; GIS-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
+  ; GIS-NEXT:   [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO 0
+  ; GIS-NEXT:   S_CMP_LG_U32 [[COPY2]], [[S_MOV_B32_2]], implicit-def $scc
+  ; GIS-NEXT:   [[COPY11:%[0-9]+]]:sreg_32 = COPY $scc
+  ; GIS-NEXT:   $scc = COPY [[COPY11]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE1]], [[S_MOV_B]], implicit $scc
+  ; GIS-NEXT:   [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[COPY6]], 31, implicit-def dead $scc
+  ; GIS-NEXT:   [[COPY12:%[0-9]+]]:sreg_32_xm0 = COPY [[S_ASHR_I32_]]
+  ; GIS-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY12]], %subreg.sub1
+  ; GIS-NEXT:   [[COPY13:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0
+  ; GIS-NEXT:   [[COPY14:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE2]].sub0
+  ; GIS-NEXT:   [[COPY15:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub1
+  ; GIS-NEXT:   [[COPY16:%[0-9]+]]:sreg_32 = COPY [[REG_SEQUENCE2]].sub1
+  ; GIS-NEXT:   [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY13]], [[COPY14]], implicit-def $scc
+  ; GIS-NEXT:   [[S_ADDC_U32_:%[0-9]+]]:sreg_32 = S_ADDC_U32 [[COPY15]], [[COPY16]], implicit-def dead $scc, implicit $scc
+  ; GIS-NEXT:   [[REG_SEQUENCE3:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_ADDC_U32_]], %subreg.sub1
+  ; GIS-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[COPY3]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_1:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[REG_SEQUENCE3]], [[REG_SEQUENCE]], implicit $scc
+  ; GIS-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LSHR_B32_]], 1, implicit-def dead $scc
+  ; GIS-NEXT:   $scc = COPY [[S_AND_B32_1]]
+  ; GIS-NEXT:   [[S_CSELECT_B64_2:%[0-9]+]]:sreg_64 = S_CSELECT_B64 [[S_CSELECT_B64_1]], [[S_CSELECT_B64_]], implicit $scc
+  ; GIS-NEXT:   [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+  ; GIS-NEXT:   [[COPY18:%[0-9]+]]:vreg_64 = COPY [[S_CSELECT_B64_2]]
+  ; GIS-NEXT:   FLAT_STORE_DWORD [[COPY18]], [[COPY17]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.ptr2, !noalias.addrspace !2)
+  ; GIS-NEXT:   S_ENDPGM 0
+  %lptr = alloca i32, align 4, addrspace(5)
+  %b = addrspacecast ptr addrspace(5) %lptr to ptr
+  %c = addrspacecast ptr addrspace(3) %sptr to ptr
+  %add_a = getelementptr inbounds i8, ptr addrspacecast (ptr addrspace(1) @gptr to ptr), i32 %offset
+  %ptr = select i1 %cond1, ptr %add_a, ptr %b
+  %ptr2 = select i1 %cond2, ptr %ptr, ptr %c
+  store i32 %val, ptr %ptr2, align 4, !noalias.addrspace !1
+  ret void
+}
+
+attributes #0 = { "amdgpu-agpr-alloc"="0" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+attributes #1 = { "amdgpu-agpr-alloc"="0" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+attributes #2 = { "amdgpu-agpr-alloc"="0" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,10" "uniform-work-group-size"="false" }
+
+!0 = !{i32 2, i32 3, i32 4, i32 5, i32 6, i32 10}
+!1 = !{i32 2, i32 3, i32 4, i32 10}



More information about the llvm-commits mailing list