[llvm] [ASAN][AMDGPU] Make address sanitizer checks more efficient for the divergent target. (PR #72247)

Valery Pykhtin via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 14 04:24:34 PST 2023


https://github.com/vpykhtin created https://github.com/llvm/llvm-project/pull/72247

Address sanitizer checks for AMDGPU target in non-recovery mode aren't quite efficient at the moment which can be illustrated with a program:
```
instr_before; 
load ptr1; 
instr_in_the_middle; 
load ptr2; 
instr_after; 
```
ASAN generates the following instrumentation:
```
instr_before; 
if (sanity_check_passed(ptr1)) 
  load ptr1; 
  instr_in_the_middle; 
  if (sanity_check_passed(ptr2)) 
     load ptr2; 
     instr_after; 
  else 
     // ASAN report block 2 
     __asan_report(ptr2); // wave terminates   
     unreachable; 
else 
   // ASAN report block 1 
  __asan_report(ptr1); // wave terminates 
  unreachable; 
```
Each sanitizer check is treated as a non-uniform condition (and this is true because some lanes may pass the check and some don't). This results in the program above: basically normal program flow is continued in _then_ blocks. This way it allows lanes that pass all sanity checks to complete the program and then the wave terminates at the first reporting _else_ block. For each _else_ block it has to keep execmask and pointer value to report error consuming tons (megatons!) of registers which are live till the program end. 

This patch changes the behavior on a failing sanity check: instead of waiting when passing lanes reach program end report error and terminate as soon as any lane has violated the sanity check. Sanity check condition is treated uniform with this approach and the resulting program looks much like ordinary CPU code:

```
instr_before; 
if (any_lane_violated(sanity_check_passed(ptr1)))
  // ASAN report block 1 
  __asan_report(ptr1); // abort the program 
  unreachable; 
load ptr1; 
instr_in_the_middle; 
if (any_lane_violated(sanity_check_passed(ptr2))) 
  // ASAN report block 2   
  __asan_report(ptr2); // abort the program 
  unreachable; 
load ptr2; 
instr_after; 
```

However it has to use a trick to pass structurizer and some later passes: ASAN check is generated like in recovery mode but reporting function aborts, that is standard _unreachable_ instruction isn't used:
```
...
if (any_lane_violated(sanity_check_passed(ptr1)))
  // ASAN report block 1 
  __asan_report(ptr1); // abort the program 
  // pretend we're going to continue the program
load ptr1; 
...
```
This may create some undesirable effects:
1. Register allocator generates a lot of code for save/restore registers for asan_report call. This may potentially bloat the code since we have a report block for every accessed pointer.
2. Loop invariant code in report blocks is hoisted into a loop preheader. I'm not sure but probably this can be solved using block frequency information, but most likely this isn't a problem at all.

These problems are to be addressed later.

### Flattening address sanitizer check 

In order to simplify divergent CFG this patch also changes the instrumentation code from: 

```
  uint64_t address = ptr; 
  sbyte *shadow_address = MemToShadow(address); 
  sbyte shadow_value = *shadow_address; 
  if (shadow_value) { 
    sbyte last_accessed_byte = (address & 7) + kAccessSize - 1; 
    if (last_accessed_byte >= shadow_value) { 
      ReportError(address, kAccessSize, kIsWrite); 
      abort(); 
    } 
  } 
```
to 
```
  uint64_t address = ptr; 
  sbyte *shadow_address = MemToShadow(address); 
  sbyte shadow_value = *shadow_address; 

  sbyte last_accessed_byte = (address & 7) + kAccessSize - 1; 
  if (shadow_value && last_accessed_byte >= shadow_value) { 
    ReportError(address, kAccessSize, kIsWrite); 
    abort(); 
  } 
```
It saves one _if_ which really avoids very few instructions and their latency can be hidden by the load from shadow memory.

>From 434c58eb1a10792f9f9ed22c34812323098cd9ae Mon Sep 17 00:00:00 2001
From: Valery Pykhtin <valery.pykhtin at gmail.com>
Date: Tue, 14 Nov 2023 11:42:00 +0100
Subject: [PATCH] [ASAN][AMDGPU] Make address sanitizer checks more efficient
 for the divergent target.

---
 .../Instrumentation/AddressSanitizer.cpp      |  38 +-
 llvm/test/CodeGen/AMDGPU/asan_loop.ll         | 231 +++++++
 llvm/test/CodeGen/AMDGPU/asan_trivial.ll      | 610 ++++++++++++++++++
 .../asan_instrument_constant_address_space.ll | 103 ++-
 .../asan_instrument_generic_address_space.ll  | 260 +++++++-
 .../asan_instrument_global_address_space.ll   | 195 +++++-
 6 files changed, 1377 insertions(+), 60 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/asan_loop.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/asan_trivial.ll

diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 5c2763850ac6540..a88b271ed8e7325 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -174,6 +174,8 @@ const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
 
 const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
 const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
+const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
+const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
 
 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
 static const size_t kNumberOfAccessSizes = 5;
@@ -692,6 +694,8 @@ struct AddressSanitizer {
                                        Instruction *InsertBefore, Value *Addr,
                                        uint32_t TypeStoreSize, bool IsWrite,
                                        Value *SizeArgument);
+  Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
+                                    bool Recover);
   void instrumentUnusualSizeOrAlignment(Instruction *I,
                                         Instruction *InsertBefore, Value *Addr,
                                         TypeSize TypeStoreSize, bool IsWrite,
@@ -1707,6 +1711,30 @@ Instruction *AddressSanitizer::instrumentAMDGPUAddress(
   return InsertBefore;
 }
 
+Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
+                                                    Value *Cond, bool Recover) {
+  Module &M = *IRB.GetInsertBlock()->getModule();
+  Value *ReportCond = Cond;
+  if (!Recover) {
+    auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
+                                        IRB.getInt1Ty());
+    ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
+  }
+
+  auto *Trm =
+      SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
+                                MDBuilder(*C).createBranchWeights(1, 100000));
+  Trm->getParent()->setName("asan.report");
+
+  if (Recover)
+    return Trm;
+
+  Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
+  IRB.SetInsertPoint(Trm);
+  return IRB.CreateCall(
+      M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
+}
+
 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
                                          Instruction *InsertBefore, Value *Addr,
                                          MaybeAlign Alignment,
@@ -1758,7 +1786,15 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
   size_t Granularity = 1ULL << Mapping.Scale;
   Instruction *CrashTerm = nullptr;
 
-  if (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity)) {
+  bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
+
+  if (TargetTriple.isAMDGPU()) {
+    if (GenSlowPath) {
+      auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
+      Cmp = IRB.CreateAnd(Cmp, Cmp2);
+    }
+    CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
+  } else if (GenSlowPath) {
     // We use branch weights for the slow path check, to indicate that the slow
     // path is rarely taken. This seems to be the case for SPEC benchmarks.
     Instruction *CheckTerm = SplitBlockAndInsertIfThen(
diff --git a/llvm/test/CodeGen/AMDGPU/asan_loop.ll b/llvm/test/CodeGen/AMDGPU/asan_loop.ll
new file mode 100644
index 000000000000000..44561b6f4b9a134
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/asan_loop.ll
@@ -0,0 +1,231 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: opt  -passes=asan -S < %s | FileCheck %s --check-prefix=OPT
+; RUN: opt < %s -passes='asan,default<O3>' -o - | llc -O3 -mtriple=amdgcn-hsa-amdhsa -mcpu=gfx90a -o - | FileCheck %s --check-prefix=LLC
+
+; This test contains checks for opt and llc, to update use:
+;   utils/update_test_checks.py --force-update
+;   utils/update_llc_test_checks.py --force-update
+;
+; --force-update allows to override "Assertions have been autogenerated by" guard
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
+target triple = "amdgcn-amd-amdhsa"
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+define protected amdgpu_kernel void @uniform_loop_global(i32 %num, ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr2) sanitize_address {
+; OPT-LABEL: define protected amdgpu_kernel void @uniform_loop_global(
+; OPT-SAME: i32 [[NUM:%.*]], ptr addrspace(1) [[PTR1:%.*]], ptr addrspace(1) [[PTR2:%.*]]) #[[ATTR1:[0-9]+]] {
+; OPT-NEXT:  entry:
+; OPT-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; OPT-NEXT:    br label [[WHILE_COND:%.*]]
+; OPT:       while.cond:
+; OPT-NEXT:    [[C:%.*]] = phi i32 [ [[NUM]], [[ENTRY:%.*]] ], [ [[NEXT_C:%.*]], [[TMP31:%.*]] ]
+; OPT-NEXT:    [[CMP:%.*]] = icmp eq i32 [[C]], 0
+; OPT-NEXT:    br i1 [[CMP]], label [[EXIT:%.*]], label [[WHILE_BODY:%.*]]
+; OPT:       while.body:
+; OPT-NEXT:    [[OFFS32:%.*]] = add i32 [[TID]], [[C]]
+; OPT-NEXT:    [[OFFS:%.*]] = zext i32 [[OFFS32]] to i64
+; OPT-NEXT:    [[PP1:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[PTR1]], i64 [[OFFS]]
+; OPT-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[PP1]] to i64
+; OPT-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; OPT-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; OPT-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; OPT-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; OPT-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; OPT-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; OPT-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; OPT-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; OPT-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; OPT-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; OPT-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP10]])
+; OPT-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; OPT-NEXT:    br i1 [[TMP12]], label [[ASAN_REPORT:%.*]], label [[TMP15:%.*]], !prof [[PROF0:![0-9]+]]
+; OPT:       asan.report:
+; OPT-NEXT:    br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP14:%.*]]
+; OPT:       13:
+; OPT-NEXT:    call void @__asan_report_load4(i64 [[TMP0]]) #[[ATTR5:[0-9]+]]
+; OPT-NEXT:    call void @llvm.amdgcn.unreachable()
+; OPT-NEXT:    br label [[TMP14]]
+; OPT:       14:
+; OPT-NEXT:    br label [[TMP15]]
+; OPT:       15:
+; OPT-NEXT:    [[VAL:%.*]] = load i32, ptr addrspace(1) [[PP1]], align 4
+; OPT-NEXT:    [[SUM:%.*]] = add i32 [[VAL]], 42
+; OPT-NEXT:    [[PP2:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[PTR2]], i64 [[OFFS]]
+; OPT-NEXT:    [[TMP16:%.*]] = ptrtoint ptr addrspace(1) [[PP2]] to i64
+; OPT-NEXT:    [[TMP17:%.*]] = lshr i64 [[TMP16]], 3
+; OPT-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 2147450880
+; OPT-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; OPT-NEXT:    [[TMP20:%.*]] = load i8, ptr [[TMP19]], align 1
+; OPT-NEXT:    [[TMP21:%.*]] = icmp ne i8 [[TMP20]], 0
+; OPT-NEXT:    [[TMP22:%.*]] = and i64 [[TMP16]], 7
+; OPT-NEXT:    [[TMP23:%.*]] = add i64 [[TMP22]], 3
+; OPT-NEXT:    [[TMP24:%.*]] = trunc i64 [[TMP23]] to i8
+; OPT-NEXT:    [[TMP25:%.*]] = icmp sge i8 [[TMP24]], [[TMP20]]
+; OPT-NEXT:    [[TMP26:%.*]] = and i1 [[TMP21]], [[TMP25]]
+; OPT-NEXT:    [[TMP27:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP26]])
+; OPT-NEXT:    [[TMP28:%.*]] = icmp ne i64 [[TMP27]], 0
+; OPT-NEXT:    br i1 [[TMP28]], label [[ASAN_REPORT1:%.*]], label [[TMP31]], !prof [[PROF0]]
+; OPT:       asan.report1:
+; OPT-NEXT:    br i1 [[TMP26]], label [[TMP29:%.*]], label [[TMP30:%.*]]
+; OPT:       29:
+; OPT-NEXT:    call void @__asan_report_store4(i64 [[TMP16]]) #[[ATTR5]]
+; OPT-NEXT:    call void @llvm.amdgcn.unreachable()
+; OPT-NEXT:    br label [[TMP30]]
+; OPT:       30:
+; OPT-NEXT:    br label [[TMP31]]
+; OPT:       31:
+; OPT-NEXT:    store i32 [[SUM]], ptr addrspace(1) [[PP2]], align 4
+; OPT-NEXT:    [[NEXT_C]] = sub i32 [[C]], 1
+; OPT-NEXT:    br label [[WHILE_COND]]
+; OPT:       exit:
+; OPT-NEXT:    ret void
+;
+; LLC-LABEL: uniform_loop_global:
+; LLC:       ; %bb.0: ; %entry
+; LLC-NEXT:    s_load_dword s54, s[8:9], 0x0
+; LLC-NEXT:    s_add_u32 flat_scratch_lo, s12, s17
+; LLC-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0
+; LLC-NEXT:    s_add_u32 s0, s0, s17
+; LLC-NEXT:    s_addc_u32 s1, s1, 0
+; LLC-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-NEXT:    s_cmp_eq_u32 s54, 0
+; LLC-NEXT:    s_mov_b32 s32, 0
+; LLC-NEXT:    s_cbranch_scc1 .LBB0_11
+; LLC-NEXT:  ; %bb.1: ; %while.body.preheader
+; LLC-NEXT:    s_mov_b64 s[40:41], s[4:5]
+; LLC-NEXT:    s_getpc_b64 s[4:5]
+; LLC-NEXT:    s_add_u32 s4, s4, __asan_report_load4 at gotpcrel32@lo+4
+; LLC-NEXT:    s_addc_u32 s5, s5, __asan_report_load4 at gotpcrel32@hi+12
+; LLC-NEXT:    s_load_dwordx2 s[48:49], s[4:5], 0x0
+; LLC-NEXT:    s_getpc_b64 s[4:5]
+; LLC-NEXT:    s_add_u32 s4, s4, __asan_report_store4 at gotpcrel32@lo+4
+; LLC-NEXT:    s_addc_u32 s5, s5, __asan_report_store4 at gotpcrel32@hi+12
+; LLC-NEXT:    s_load_dwordx4 s[44:47], s[8:9], 0x8
+; LLC-NEXT:    s_load_dwordx2 s[50:51], s[4:5], 0x0
+; LLC-NEXT:    v_mov_b32_e32 v44, v0
+; LLC-NEXT:    s_mov_b32 s33, s16
+; LLC-NEXT:    s_mov_b32 s42, s15
+; LLC-NEXT:    s_mov_b64 s[34:35], s[8:9]
+; LLC-NEXT:    s_mov_b32 s43, s14
+; LLC-NEXT:    s_mov_b64 s[36:37], s[10:11]
+; LLC-NEXT:    s_mov_b64 s[38:39], s[6:7]
+; LLC-NEXT:    v_and_b32_e32 v45, 0x3ff, v44
+; LLC-NEXT:    v_mov_b32_e32 v47, 0
+; LLC-NEXT:    s_branch .LBB0_4
+; LLC-NEXT:  .LBB0_2: ; %Flow
+; LLC-NEXT:    ; in Loop: Header=BB0_4 Depth=1
+; LLC-NEXT:    s_or_b64 exec, exec, s[52:53]
+; LLC-NEXT:  .LBB0_3: ; %while.cond
+; LLC-NEXT:    ; in Loop: Header=BB0_4 Depth=1
+; LLC-NEXT:    s_add_i32 s54, s54, -1
+; LLC-NEXT:    v_add_u32_e32 v0, 42, v46
+; LLC-NEXT:    s_cmp_lg_u32 s54, 0
+; LLC-NEXT:    global_store_dword v[42:43], v0, off
+; LLC-NEXT:    s_cbranch_scc0 .LBB0_11
+; LLC-NEXT:  .LBB0_4: ; %while.body
+; LLC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; LLC-NEXT:    v_add_u32_e32 v46, s54, v45
+; LLC-NEXT:    v_lshlrev_b64 v[42:43], 3, v[46:47]
+; LLC-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-NEXT:    v_mov_b32_e32 v0, s45
+; LLC-NEXT:    v_add_co_u32_e32 v40, vcc, s44, v42
+; LLC-NEXT:    v_addc_co_u32_e32 v41, vcc, v0, v43, vcc
+; LLC-NEXT:    v_lshrrev_b64 v[0:1], 3, v[40:41]
+; LLC-NEXT:    v_add_co_u32_e32 v0, vcc, 0x7fff8000, v0
+; LLC-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; LLC-NEXT:    flat_load_sbyte v0, v[0:1]
+; LLC-NEXT:    v_and_b32_e32 v1, 7, v40
+; LLC-NEXT:    v_add_u16_e32 v1, 3, v1
+; LLC-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-NEXT:    v_cmp_ne_u16_sdwa s[4:5], v0, v47 src0_sel:BYTE_0 src1_sel:DWORD
+; LLC-NEXT:    v_cmp_ge_i16_e32 vcc, v1, v0
+; LLC-NEXT:    s_and_b64 vcc, s[4:5], vcc
+; LLC-NEXT:    s_cbranch_vccz .LBB0_8
+; LLC-NEXT:  ; %bb.5: ; %asan.report
+; LLC-NEXT:    ; in Loop: Header=BB0_4 Depth=1
+; LLC-NEXT:    s_and_saveexec_b64 s[52:53], vcc
+; LLC-NEXT:    s_cbranch_execz .LBB0_7
+; LLC-NEXT:  ; %bb.6: ; in Loop: Header=BB0_4 Depth=1
+; LLC-NEXT:    s_add_u32 s8, s34, 24
+; LLC-NEXT:    s_addc_u32 s9, s35, 0
+; LLC-NEXT:    s_mov_b64 s[4:5], s[40:41]
+; LLC-NEXT:    s_mov_b64 s[6:7], s[38:39]
+; LLC-NEXT:    s_mov_b64 s[10:11], s[36:37]
+; LLC-NEXT:    s_mov_b32 s12, s43
+; LLC-NEXT:    s_mov_b32 s13, s42
+; LLC-NEXT:    s_mov_b32 s14, s33
+; LLC-NEXT:    v_mov_b32_e32 v31, v44
+; LLC-NEXT:    v_mov_b32_e32 v0, v40
+; LLC-NEXT:    v_mov_b32_e32 v1, v41
+; LLC-NEXT:    s_swappc_b64 s[30:31], s[48:49]
+; LLC-NEXT:    ; divergent unreachable
+; LLC-NEXT:  .LBB0_7: ; %Flow4
+; LLC-NEXT:    ; in Loop: Header=BB0_4 Depth=1
+; LLC-NEXT:    s_or_b64 exec, exec, s[52:53]
+; LLC-NEXT:  .LBB0_8: ; in Loop: Header=BB0_4 Depth=1
+; LLC-NEXT:    v_mov_b32_e32 v0, s47
+; LLC-NEXT:    v_add_co_u32_e32 v42, vcc, s46, v42
+; LLC-NEXT:    v_addc_co_u32_e32 v43, vcc, v0, v43, vcc
+; LLC-NEXT:    v_lshrrev_b64 v[0:1], 3, v[42:43]
+; LLC-NEXT:    v_add_co_u32_e32 v0, vcc, 0x7fff8000, v0
+; LLC-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; LLC-NEXT:    flat_load_sbyte v2, v[0:1]
+; LLC-NEXT:    global_load_dword v46, v[40:41], off
+; LLC-NEXT:    v_and_b32_e32 v0, 7, v42
+; LLC-NEXT:    v_add_u16_e32 v0, 3, v0
+; LLC-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-NEXT:    v_cmp_ne_u16_sdwa s[4:5], v2, v47 src0_sel:BYTE_0 src1_sel:DWORD
+; LLC-NEXT:    v_cmp_ge_i16_e32 vcc, v0, v2
+; LLC-NEXT:    s_and_b64 vcc, s[4:5], vcc
+; LLC-NEXT:    s_cbranch_vccz .LBB0_3
+; LLC-NEXT:  ; %bb.9: ; %asan.report1
+; LLC-NEXT:    ; in Loop: Header=BB0_4 Depth=1
+; LLC-NEXT:    s_and_saveexec_b64 s[52:53], vcc
+; LLC-NEXT:    s_cbranch_execz .LBB0_2
+; LLC-NEXT:  ; %bb.10: ; in Loop: Header=BB0_4 Depth=1
+; LLC-NEXT:    s_add_u32 s8, s34, 24
+; LLC-NEXT:    s_addc_u32 s9, s35, 0
+; LLC-NEXT:    s_mov_b64 s[4:5], s[40:41]
+; LLC-NEXT:    s_mov_b64 s[6:7], s[38:39]
+; LLC-NEXT:    s_mov_b64 s[10:11], s[36:37]
+; LLC-NEXT:    s_mov_b32 s12, s43
+; LLC-NEXT:    s_mov_b32 s13, s42
+; LLC-NEXT:    s_mov_b32 s14, s33
+; LLC-NEXT:    v_mov_b32_e32 v31, v44
+; LLC-NEXT:    v_mov_b32_e32 v0, v42
+; LLC-NEXT:    v_mov_b32_e32 v1, v43
+; LLC-NEXT:    s_swappc_b64 s[30:31], s[50:51]
+; LLC-NEXT:    ; divergent unreachable
+; LLC-NEXT:    s_branch .LBB0_2
+; LLC-NEXT:  .LBB0_11: ; %exit
+; LLC-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  br label %while.cond
+
+while.cond:
+  %c = phi i32 [%num, %entry], [%next_c, %while.body]
+  %cmp = icmp eq i32 %c, 0
+  br i1 %cmp, label %exit, label %while.body
+
+while.body:
+  %offs32 = add i32 %tid, %c
+  %offs = zext i32 %offs32 to i64
+
+  %pp1 = getelementptr inbounds i64, ptr addrspace(1) %ptr1, i64 %offs
+  %val = load i32, ptr addrspace(1) %pp1, align 4
+
+  %sum = add i32 %val, 42
+
+  %pp2 = getelementptr inbounds i64, ptr addrspace(1) %ptr2, i64 %offs
+  store i32 %sum, ptr addrspace(1) %pp2, align 4
+
+  %next_c = sub i32 %c, 1
+  br label %while.cond
+
+exit:
+  ret void
+}
+
+attributes #0 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/AMDGPU/asan_trivial.ll b/llvm/test/CodeGen/AMDGPU/asan_trivial.ll
new file mode 100644
index 000000000000000..c3b191b405dba97
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/asan_trivial.ll
@@ -0,0 +1,610 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: opt  -passes=asan -S < %s | FileCheck %s --check-prefix=OPT
+; RUN: opt < %s -passes='asan,default<O3>' -o - | llc -O3 -mtriple=amdgcn-hsa-amdhsa -mcpu=gfx90a -o - | FileCheck %s --check-prefix=LLC-W64
+; RUN: opt < %s -passes='asan,default<O3>' -o - | llc -mtriple=amdgcn-hsa-amdhsa -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -mattr=+wavefrontsize32,-wavefrontsize64 -o - | FileCheck %s --check-prefix=LLC-W32
+
+; This test contains checks for opt and llc, to update use:
+;   utils/update_test_checks.py --force-update
+;   utils/update_llc_test_checks.py --force-update
+;
+; --force-update allows to override "Assertions have been autogenerated by" guard
+target triple = "amdgcn-amd-amdhsa"
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+define protected amdgpu_kernel void @global_loadstore_uniform(ptr addrspace(1) %ptr) sanitize_address {
+; OPT-LABEL: define protected amdgpu_kernel void @global_loadstore_uniform(
+; OPT-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; OPT-NEXT:  entry:
+; OPT-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[PTR]] to i64
+; OPT-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; OPT-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; OPT-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; OPT-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; OPT-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; OPT-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; OPT-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; OPT-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; OPT-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; OPT-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; OPT-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP10]])
+; OPT-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; OPT-NEXT:    br i1 [[TMP12]], label [[ASAN_REPORT:%.*]], label [[TMP15:%.*]], !prof [[PROF0:![0-9]+]]
+; OPT:       asan.report:
+; OPT-NEXT:    br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP14:%.*]]
+; OPT:       13:
+; OPT-NEXT:    call void @__asan_report_load4(i64 [[TMP0]]) #[[ATTR5:[0-9]+]]
+; OPT-NEXT:    call void @llvm.amdgcn.unreachable()
+; OPT-NEXT:    br label [[TMP14]]
+; OPT:       14:
+; OPT-NEXT:    br label [[TMP15]]
+; OPT:       15:
+; OPT-NEXT:    [[VAL:%.*]] = load volatile i32, ptr addrspace(1) [[PTR]], align 4
+; OPT-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
+; OPT-NEXT:    ret void
+;
+; LLC-W64-LABEL: global_loadstore_uniform:
+; LLC-W64:       ; %bb.0: ; %entry
+; LLC-W64-NEXT:    s_load_dwordx2 s[34:35], s[8:9], 0x0
+; LLC-W64-NEXT:    s_add_u32 flat_scratch_lo, s12, s17
+; LLC-W64-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0
+; LLC-W64-NEXT:    s_add_u32 s0, s0, s17
+; LLC-W64-NEXT:    s_addc_u32 s1, s1, 0
+; LLC-W64-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W64-NEXT:    s_lshr_b64 s[12:13], s[34:35], 3
+; LLC-W64-NEXT:    v_mov_b32_e32 v1, s12
+; LLC-W64-NEXT:    v_add_co_u32_e32 v2, vcc, 0x7fff8000, v1
+; LLC-W64-NEXT:    v_mov_b32_e32 v1, s13
+; LLC-W64-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; LLC-W64-NEXT:    flat_load_sbyte v1, v[2:3]
+; LLC-W64-NEXT:    v_and_b32_e64 v2, s34, 7
+; LLC-W64-NEXT:    v_mov_b32_e32 v40, 0
+; LLC-W64-NEXT:    v_add_u16_e32 v2, 3, v2
+; LLC-W64-NEXT:    s_mov_b32 s32, 0
+; LLC-W64-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W64-NEXT:    v_cmp_ne_u16_sdwa s[12:13], v1, v40 src0_sel:BYTE_0 src1_sel:DWORD
+; LLC-W64-NEXT:    v_cmp_ge_i16_e32 vcc, v2, v1
+; LLC-W64-NEXT:    s_and_b64 vcc, s[12:13], vcc
+; LLC-W64-NEXT:    s_cbranch_vccz .LBB0_4
+; LLC-W64-NEXT:  ; %bb.1: ; %asan.report
+; LLC-W64-NEXT:    s_and_saveexec_b64 s[36:37], vcc
+; LLC-W64-NEXT:    s_cbranch_execz .LBB0_3
+; LLC-W64-NEXT:  ; %bb.2:
+; LLC-W64-NEXT:    s_add_u32 s8, s8, 8
+; LLC-W64-NEXT:    s_addc_u32 s9, s9, 0
+; LLC-W64-NEXT:    s_getpc_b64 s[12:13]
+; LLC-W64-NEXT:    s_add_u32 s12, s12, __asan_report_load4 at gotpcrel32@lo+4
+; LLC-W64-NEXT:    s_addc_u32 s13, s13, __asan_report_load4 at gotpcrel32@hi+12
+; LLC-W64-NEXT:    s_load_dwordx2 s[18:19], s[12:13], 0x0
+; LLC-W64-NEXT:    s_mov_b32 s12, s14
+; LLC-W64-NEXT:    s_mov_b32 s13, s15
+; LLC-W64-NEXT:    s_mov_b32 s14, s16
+; LLC-W64-NEXT:    v_mov_b32_e32 v31, v0
+; LLC-W64-NEXT:    v_mov_b32_e32 v0, s34
+; LLC-W64-NEXT:    v_mov_b32_e32 v1, s35
+; LLC-W64-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W64-NEXT:    s_swappc_b64 s[30:31], s[18:19]
+; LLC-W64-NEXT:    ; divergent unreachable
+; LLC-W64-NEXT:  .LBB0_3: ; %Flow
+; LLC-W64-NEXT:    s_or_b64 exec, exec, s[36:37]
+; LLC-W64-NEXT:  .LBB0_4:
+; LLC-W64-NEXT:    global_load_dword v0, v40, s[34:35] glc
+; LLC-W64-NEXT:    s_waitcnt vmcnt(0)
+; LLC-W64-NEXT:    global_store_dword v40, v0, s[34:35]
+; LLC-W64-NEXT:    s_waitcnt vmcnt(0)
+; LLC-W64-NEXT:    s_endpgm
+;
+; LLC-W32-LABEL: global_loadstore_uniform:
+; LLC-W32:       ; %bb.0: ; %entry
+; LLC-W32-NEXT:    s_load_b64 s[34:35], s[4:5], 0x0
+; LLC-W32-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; LLC-W32-NEXT:    s_mov_b32 s9, 0
+; LLC-W32-NEXT:    s_mov_b32 s32, 0
+; LLC-W32-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W32-NEXT:    s_lshr_b64 s[6:7], s[34:35], 3
+; LLC-W32-NEXT:    v_add_co_u32 v1, s6, 0x7fff8000, s6
+; LLC-W32-NEXT:    v_add_co_ci_u32_e64 v2, null, 0, s7, s6
+; LLC-W32-NEXT:    flat_load_i8 v1, v[1:2]
+; LLC-W32-NEXT:    v_and_b32_e64 v2, s34, 7
+; LLC-W32-NEXT:    v_add_nc_u16 v2, v2, 3
+; LLC-W32-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W32-NEXT:    v_and_b32_e32 v3, 0xff, v1
+; LLC-W32-NEXT:    v_cmp_ge_i16_e32 vcc_lo, v2, v1
+; LLC-W32-NEXT:    v_cmp_ne_u16_e64 s6, 0, v3
+; LLC-W32-NEXT:    s_and_b32 s6, s6, vcc_lo
+; LLC-W32-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s6
+; LLC-W32-NEXT:    v_cmp_ne_u32_e64 s8, 0, v1
+; LLC-W32-NEXT:    s_cmp_eq_u64 s[8:9], 0
+; LLC-W32-NEXT:    s_cbranch_scc1 .LBB0_4
+; LLC-W32-NEXT:  ; %bb.1: ; %asan.report
+; LLC-W32-NEXT:    s_and_saveexec_b32 s33, s6
+; LLC-W32-NEXT:    s_cbranch_execz .LBB0_3
+; LLC-W32-NEXT:  ; %bb.2:
+; LLC-W32-NEXT:    s_add_u32 s8, s4, 8
+; LLC-W32-NEXT:    s_addc_u32 s9, s5, 0
+; LLC-W32-NEXT:    s_getpc_b64 s[4:5]
+; LLC-W32-NEXT:    s_add_u32 s4, s4, __asan_report_load4 at gotpcrel32@lo+4
+; LLC-W32-NEXT:    s_addc_u32 s5, s5, __asan_report_load4 at gotpcrel32@hi+12
+; LLC-W32-NEXT:    v_dual_mov_b32 v31, v0 :: v_dual_mov_b32 v0, s34
+; LLC-W32-NEXT:    s_load_b64 s[16:17], s[4:5], 0x0
+; LLC-W32-NEXT:    v_mov_b32_e32 v1, s35
+; LLC-W32-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; LLC-W32-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; LLC-W32-NEXT:    s_mov_b32 s12, s13
+; LLC-W32-NEXT:    s_mov_b32 s13, s14
+; LLC-W32-NEXT:    s_mov_b32 s14, s15
+; LLC-W32-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W32-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; LLC-W32-NEXT:    ; divergent unreachable
+; LLC-W32-NEXT:  .LBB0_3: ; %Flow
+; LLC-W32-NEXT:    s_or_b32 exec_lo, exec_lo, s33
+; LLC-W32-NEXT:  .LBB0_4:
+; LLC-W32-NEXT:    v_mov_b32_e32 v0, 0
+; LLC-W32-NEXT:    global_load_b32 v1, v0, s[34:35] glc dlc
+; LLC-W32-NEXT:    s_waitcnt vmcnt(0)
+; LLC-W32-NEXT:    global_store_b32 v0, v1, s[34:35] dlc
+; LLC-W32-NEXT:    s_waitcnt_vscnt null, 0x0
+; LLC-W32-NEXT:    s_nop 0
+; LLC-W32-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; LLC-W32-NEXT:    s_endpgm
+entry:
+  %val = load volatile i32, ptr addrspace(1) %ptr, align 4
+  store volatile i32 %val, ptr addrspace(1) %ptr, align 4
+  ret void
+}
+
+define protected amdgpu_kernel void @generic_loadstore_uniform(ptr addrspace(0) %ptr) sanitize_address {
+; OPT-LABEL: define protected amdgpu_kernel void @generic_loadstore_uniform(
+; OPT-SAME: ptr [[PTR:%.*]]) #[[ATTR1]] {
+; OPT-NEXT:  entry:
+; OPT-NEXT:    [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[PTR]])
+; OPT-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR]])
+; OPT-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; OPT-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; OPT-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP21:%.*]]
+; OPT:       4:
+; OPT-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[PTR]] to i64
+; OPT-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
+; OPT-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
+; OPT-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; OPT-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
+; OPT-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; OPT-NEXT:    [[TMP11:%.*]] = and i64 [[TMP5]], 7
+; OPT-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
+; OPT-NEXT:    [[TMP13:%.*]] = trunc i64 [[TMP12]] to i8
+; OPT-NEXT:    [[TMP14:%.*]] = icmp sge i8 [[TMP13]], [[TMP9]]
+; OPT-NEXT:    [[TMP15:%.*]] = and i1 [[TMP10]], [[TMP14]]
+; OPT-NEXT:    [[TMP16:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP15]])
+; OPT-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; OPT-NEXT:    br i1 [[TMP17]], label [[ASAN_REPORT:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; OPT:       asan.report:
+; OPT-NEXT:    br i1 [[TMP15]], label [[TMP18:%.*]], label [[TMP19:%.*]]
+; OPT:       18:
+; OPT-NEXT:    call void @__asan_report_load4(i64 [[TMP5]]) #[[ATTR5]]
+; OPT-NEXT:    call void @llvm.amdgcn.unreachable()
+; OPT-NEXT:    br label [[TMP19]]
+; OPT:       19:
+; OPT-NEXT:    br label [[TMP20]]
+; OPT:       20:
+; OPT-NEXT:    br label [[TMP21]]
+; OPT:       21:
+; OPT-NEXT:    [[VAL:%.*]] = load volatile i32, ptr [[PTR]], align 4
+; OPT-NEXT:    store volatile i32 [[VAL]], ptr [[PTR]], align 4
+; OPT-NEXT:    ret void
+;
+; LLC-W64-LABEL: generic_loadstore_uniform:
+; LLC-W64:       ; %bb.0: ; %entry
+; LLC-W64-NEXT:    s_load_dwordx2 s[34:35], s[8:9], 0x0
+; LLC-W64-NEXT:    s_add_u32 flat_scratch_lo, s12, s17
+; LLC-W64-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0
+; LLC-W64-NEXT:    s_add_u32 s0, s0, s17
+; LLC-W64-NEXT:    s_addc_u32 s1, s1, 0
+; LLC-W64-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W64-NEXT:    s_lshr_b64 s[12:13], s[34:35], 3
+; LLC-W64-NEXT:    v_mov_b32_e32 v1, s12
+; LLC-W64-NEXT:    v_add_co_u32_e32 v2, vcc, 0x7fff8000, v1
+; LLC-W64-NEXT:    v_mov_b32_e32 v1, s13
+; LLC-W64-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; LLC-W64-NEXT:    flat_load_sbyte v1, v[2:3]
+; LLC-W64-NEXT:    v_and_b32_e64 v3, s34, 7
+; LLC-W64-NEXT:    v_mov_b32_e32 v2, 0
+; LLC-W64-NEXT:    v_add_u16_e32 v3, 3, v3
+; LLC-W64-NEXT:    s_mov_b32 s32, 0
+; LLC-W64-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W64-NEXT:    v_cmp_ne_u16_sdwa s[12:13], v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+; LLC-W64-NEXT:    v_cmp_ge_i16_e32 vcc, v3, v1
+; LLC-W64-NEXT:    s_and_b64 vcc, s[12:13], vcc
+; LLC-W64-NEXT:    s_cbranch_vccz .LBB1_4
+; LLC-W64-NEXT:  ; %bb.1: ; %asan.report
+; LLC-W64-NEXT:    s_and_saveexec_b64 s[36:37], vcc
+; LLC-W64-NEXT:    s_cbranch_execz .LBB1_3
+; LLC-W64-NEXT:  ; %bb.2:
+; LLC-W64-NEXT:    s_add_u32 s8, s8, 8
+; LLC-W64-NEXT:    s_addc_u32 s9, s9, 0
+; LLC-W64-NEXT:    s_getpc_b64 s[12:13]
+; LLC-W64-NEXT:    s_add_u32 s12, s12, __asan_report_load4 at gotpcrel32@lo+4
+; LLC-W64-NEXT:    s_addc_u32 s13, s13, __asan_report_load4 at gotpcrel32@hi+12
+; LLC-W64-NEXT:    s_load_dwordx2 s[18:19], s[12:13], 0x0
+; LLC-W64-NEXT:    s_mov_b32 s12, s14
+; LLC-W64-NEXT:    s_mov_b32 s13, s15
+; LLC-W64-NEXT:    s_mov_b32 s14, s16
+; LLC-W64-NEXT:    v_mov_b32_e32 v31, v0
+; LLC-W64-NEXT:    v_mov_b32_e32 v0, s34
+; LLC-W64-NEXT:    v_mov_b32_e32 v1, s35
+; LLC-W64-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W64-NEXT:    s_swappc_b64 s[30:31], s[18:19]
+; LLC-W64-NEXT:    ; divergent unreachable
+; LLC-W64-NEXT:  .LBB1_3: ; %Flow
+; LLC-W64-NEXT:    s_or_b64 exec, exec, s[36:37]
+; LLC-W64-NEXT:  .LBB1_4:
+; LLC-W64-NEXT:    v_pk_mov_b32 v[0:1], s[34:35], s[34:35] op_sel:[0,1]
+; LLC-W64-NEXT:    flat_load_dword v2, v[0:1] glc
+; LLC-W64-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W64-NEXT:    flat_store_dword v[0:1], v2
+; LLC-W64-NEXT:    s_waitcnt vmcnt(0)
+; LLC-W64-NEXT:    s_endpgm
+;
+; LLC-W32-LABEL: generic_loadstore_uniform:
+; LLC-W32:       ; %bb.0: ; %entry
+; LLC-W32-NEXT:    s_load_b64 s[34:35], s[4:5], 0x0
+; LLC-W32-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; LLC-W32-NEXT:    s_mov_b32 s9, 0
+; LLC-W32-NEXT:    s_mov_b32 s32, 0
+; LLC-W32-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W32-NEXT:    s_lshr_b64 s[6:7], s[34:35], 3
+; LLC-W32-NEXT:    v_add_co_u32 v1, s6, 0x7fff8000, s6
+; LLC-W32-NEXT:    v_add_co_ci_u32_e64 v2, null, 0, s7, s6
+; LLC-W32-NEXT:    flat_load_i8 v1, v[1:2]
+; LLC-W32-NEXT:    v_and_b32_e64 v2, s34, 7
+; LLC-W32-NEXT:    v_add_nc_u16 v2, v2, 3
+; LLC-W32-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W32-NEXT:    v_and_b32_e32 v3, 0xff, v1
+; LLC-W32-NEXT:    v_cmp_ge_i16_e32 vcc_lo, v2, v1
+; LLC-W32-NEXT:    v_cmp_ne_u16_e64 s6, 0, v3
+; LLC-W32-NEXT:    s_and_b32 s6, s6, vcc_lo
+; LLC-W32-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s6
+; LLC-W32-NEXT:    v_cmp_ne_u32_e64 s8, 0, v1
+; LLC-W32-NEXT:    s_cmp_eq_u64 s[8:9], 0
+; LLC-W32-NEXT:    s_cbranch_scc1 .LBB1_4
+; LLC-W32-NEXT:  ; %bb.1: ; %asan.report
+; LLC-W32-NEXT:    s_and_saveexec_b32 s33, s6
+; LLC-W32-NEXT:    s_cbranch_execz .LBB1_3
+; LLC-W32-NEXT:  ; %bb.2:
+; LLC-W32-NEXT:    s_add_u32 s8, s4, 8
+; LLC-W32-NEXT:    s_addc_u32 s9, s5, 0
+; LLC-W32-NEXT:    s_getpc_b64 s[4:5]
+; LLC-W32-NEXT:    s_add_u32 s4, s4, __asan_report_load4 at gotpcrel32@lo+4
+; LLC-W32-NEXT:    s_addc_u32 s5, s5, __asan_report_load4 at gotpcrel32@hi+12
+; LLC-W32-NEXT:    v_dual_mov_b32 v31, v0 :: v_dual_mov_b32 v0, s34
+; LLC-W32-NEXT:    s_load_b64 s[16:17], s[4:5], 0x0
+; LLC-W32-NEXT:    v_mov_b32_e32 v1, s35
+; LLC-W32-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; LLC-W32-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; LLC-W32-NEXT:    s_mov_b32 s12, s13
+; LLC-W32-NEXT:    s_mov_b32 s13, s14
+; LLC-W32-NEXT:    s_mov_b32 s14, s15
+; LLC-W32-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W32-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; LLC-W32-NEXT:    ; divergent unreachable
+; LLC-W32-NEXT:  .LBB1_3: ; %Flow
+; LLC-W32-NEXT:    s_or_b32 exec_lo, exec_lo, s33
+; LLC-W32-NEXT:  .LBB1_4:
+; LLC-W32-NEXT:    v_dual_mov_b32 v0, s34 :: v_dual_mov_b32 v1, s35
+; LLC-W32-NEXT:    flat_load_b32 v2, v[0:1] glc dlc
+; LLC-W32-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W32-NEXT:    flat_store_b32 v[0:1], v2 dlc
+; LLC-W32-NEXT:    s_waitcnt_vscnt null, 0x0
+; LLC-W32-NEXT:    s_endpgm
+entry:
+  %val = load volatile i32, ptr addrspace(0) %ptr, align 4
+  store volatile i32 %val, ptr addrspace(0) %ptr, align 4
+  ret void
+}
+
+define protected amdgpu_kernel void @global_store_nonuniform(ptr addrspace(1) %ptr) sanitize_address {
+; OPT-LABEL: define protected amdgpu_kernel void @global_store_nonuniform(
+; OPT-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; OPT-NEXT:  entry:
+; OPT-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; OPT-NEXT:    [[TID64:%.*]] = zext i32 [[TID]] to i64
+; OPT-NEXT:    [[PP1:%.*]] = getelementptr inbounds i64, ptr addrspace(1) [[PTR]], i64 [[TID64]]
+; OPT-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[PP1]] to i64
+; OPT-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; OPT-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; OPT-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; OPT-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; OPT-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; OPT-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; OPT-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; OPT-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; OPT-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; OPT-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; OPT-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP10]])
+; OPT-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; OPT-NEXT:    br i1 [[TMP12]], label [[ASAN_REPORT:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; OPT:       asan.report:
+; OPT-NEXT:    br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP14:%.*]]
+; OPT:       13:
+; OPT-NEXT:    call void @__asan_report_store4(i64 [[TMP0]]) #[[ATTR5]]
+; OPT-NEXT:    call void @llvm.amdgcn.unreachable()
+; OPT-NEXT:    br label [[TMP14]]
+; OPT:       14:
+; OPT-NEXT:    br label [[TMP15]]
+; OPT:       15:
+; OPT-NEXT:    store i32 42, ptr addrspace(1) [[PP1]], align 4
+; OPT-NEXT:    ret void
+;
+; LLC-W64-LABEL: global_store_nonuniform:
+; LLC-W64:       ; %bb.0: ; %entry
+; LLC-W64-NEXT:    s_add_u32 flat_scratch_lo, s12, s17
+; LLC-W64-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0
+; LLC-W64-NEXT:    s_load_dwordx2 s[12:13], s[8:9], 0x0
+; LLC-W64-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
+; LLC-W64-NEXT:    v_lshlrev_b32_e32 v1, 3, v1
+; LLC-W64-NEXT:    s_add_u32 s0, s0, s17
+; LLC-W64-NEXT:    s_addc_u32 s1, s1, 0
+; LLC-W64-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W64-NEXT:    v_mov_b32_e32 v2, s13
+; LLC-W64-NEXT:    v_add_co_u32_e32 v40, vcc, s12, v1
+; LLC-W64-NEXT:    v_addc_co_u32_e32 v41, vcc, 0, v2, vcc
+; LLC-W64-NEXT:    v_lshrrev_b64 v[2:3], 3, v[40:41]
+; LLC-W64-NEXT:    s_mov_b32 s12, 0x7fff8000
+; LLC-W64-NEXT:    v_add_co_u32_e32 v2, vcc, s12, v2
+; LLC-W64-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; LLC-W64-NEXT:    flat_load_sbyte v1, v[2:3]
+; LLC-W64-NEXT:    v_and_b32_e32 v3, 7, v40
+; LLC-W64-NEXT:    v_mov_b32_e32 v2, 0
+; LLC-W64-NEXT:    v_add_u16_e32 v3, 3, v3
+; LLC-W64-NEXT:    s_mov_b32 s32, 0
+; LLC-W64-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W64-NEXT:    v_cmp_ne_u16_sdwa s[12:13], v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+; LLC-W64-NEXT:    v_cmp_ge_i16_e32 vcc, v3, v1
+; LLC-W64-NEXT:    s_and_b64 vcc, s[12:13], vcc
+; LLC-W64-NEXT:    s_cbranch_vccz .LBB2_4
+; LLC-W64-NEXT:  ; %bb.1: ; %asan.report
+; LLC-W64-NEXT:    s_and_saveexec_b64 s[34:35], vcc
+; LLC-W64-NEXT:    s_cbranch_execz .LBB2_3
+; LLC-W64-NEXT:  ; %bb.2:
+; LLC-W64-NEXT:    s_add_u32 s8, s8, 8
+; LLC-W64-NEXT:    s_addc_u32 s9, s9, 0
+; LLC-W64-NEXT:    s_getpc_b64 s[12:13]
+; LLC-W64-NEXT:    s_add_u32 s12, s12, __asan_report_store4 at gotpcrel32@lo+4
+; LLC-W64-NEXT:    s_addc_u32 s13, s13, __asan_report_store4 at gotpcrel32@hi+12
+; LLC-W64-NEXT:    s_load_dwordx2 s[18:19], s[12:13], 0x0
+; LLC-W64-NEXT:    s_mov_b32 s12, s14
+; LLC-W64-NEXT:    s_mov_b32 s13, s15
+; LLC-W64-NEXT:    s_mov_b32 s14, s16
+; LLC-W64-NEXT:    v_mov_b32_e32 v31, v0
+; LLC-W64-NEXT:    v_mov_b32_e32 v0, v40
+; LLC-W64-NEXT:    v_mov_b32_e32 v1, v41
+; LLC-W64-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W64-NEXT:    s_swappc_b64 s[30:31], s[18:19]
+; LLC-W64-NEXT:    ; divergent unreachable
+; LLC-W64-NEXT:  .LBB2_3: ; %Flow
+; LLC-W64-NEXT:    s_or_b64 exec, exec, s[34:35]
+; LLC-W64-NEXT:  .LBB2_4:
+; LLC-W64-NEXT:    v_mov_b32_e32 v0, 42
+; LLC-W64-NEXT:    global_store_dword v[40:41], v0, off
+; LLC-W64-NEXT:    s_endpgm
+;
+; LLC-W32-LABEL: global_store_nonuniform:
+; LLC-W32:       ; %bb.0: ; %entry
+; LLC-W32-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; LLC-W32-NEXT:    s_load_b64 s[6:7], s[4:5], 0x0
+; LLC-W32-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
+; LLC-W32-NEXT:    s_mov_b32 s9, 0
+; LLC-W32-NEXT:    s_mov_b32 s32, 0
+; LLC-W32-NEXT:    v_lshlrev_b32_e32 v1, 3, v1
+; LLC-W32-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W32-NEXT:    v_add_co_u32 v40, s6, s6, v1
+; LLC-W32-NEXT:    v_add_co_ci_u32_e64 v41, null, s7, 0, s6
+; LLC-W32-NEXT:    v_lshrrev_b64 v[1:2], 3, v[40:41]
+; LLC-W32-NEXT:    v_add_co_u32 v1, vcc_lo, 0x7fff8000, v1
+; LLC-W32-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
+; LLC-W32-NEXT:    flat_load_i8 v1, v[1:2]
+; LLC-W32-NEXT:    v_and_b32_e32 v2, 7, v40
+; LLC-W32-NEXT:    v_add_nc_u16 v2, v2, 3
+; LLC-W32-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W32-NEXT:    v_and_b32_e32 v3, 0xff, v1
+; LLC-W32-NEXT:    v_cmp_ge_i16_e32 vcc_lo, v2, v1
+; LLC-W32-NEXT:    v_cmp_ne_u16_e64 s6, 0, v3
+; LLC-W32-NEXT:    s_and_b32 s6, s6, vcc_lo
+; LLC-W32-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s6
+; LLC-W32-NEXT:    v_cmp_ne_u32_e64 s8, 0, v1
+; LLC-W32-NEXT:    s_cmp_eq_u64 s[8:9], 0
+; LLC-W32-NEXT:    s_cbranch_scc1 .LBB2_4
+; LLC-W32-NEXT:  ; %bb.1: ; %asan.report
+; LLC-W32-NEXT:    s_and_saveexec_b32 s33, s6
+; LLC-W32-NEXT:    s_cbranch_execz .LBB2_3
+; LLC-W32-NEXT:  ; %bb.2:
+; LLC-W32-NEXT:    s_add_u32 s8, s4, 8
+; LLC-W32-NEXT:    s_addc_u32 s9, s5, 0
+; LLC-W32-NEXT:    s_getpc_b64 s[4:5]
+; LLC-W32-NEXT:    s_add_u32 s4, s4, __asan_report_store4 at gotpcrel32@lo+4
+; LLC-W32-NEXT:    s_addc_u32 s5, s5, __asan_report_store4 at gotpcrel32@hi+12
+; LLC-W32-NEXT:    v_mov_b32_e32 v31, v0
+; LLC-W32-NEXT:    s_load_b64 s[16:17], s[4:5], 0x0
+; LLC-W32-NEXT:    v_dual_mov_b32 v0, v40 :: v_dual_mov_b32 v1, v41
+; LLC-W32-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; LLC-W32-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; LLC-W32-NEXT:    s_mov_b32 s12, s13
+; LLC-W32-NEXT:    s_mov_b32 s13, s14
+; LLC-W32-NEXT:    s_mov_b32 s14, s15
+; LLC-W32-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W32-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; LLC-W32-NEXT:    ; divergent unreachable
+; LLC-W32-NEXT:  .LBB2_3: ; %Flow
+; LLC-W32-NEXT:    s_or_b32 exec_lo, exec_lo, s33
+; LLC-W32-NEXT:  .LBB2_4:
+; LLC-W32-NEXT:    v_mov_b32_e32 v0, 42
+; LLC-W32-NEXT:    global_store_b32 v[40:41], v0, off
+; LLC-W32-NEXT:    s_nop 0
+; LLC-W32-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; LLC-W32-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid64 = zext i32 %tid to i64
+
+  %pp1 = getelementptr inbounds i64, ptr addrspace(1) %ptr, i64 %tid64
+  store i32 42, ptr addrspace(1) %pp1, align 4
+  ret void
+}
+
+define protected amdgpu_kernel void @generic_store_nonuniform(ptr addrspace(0) %ptr) sanitize_address {
+; OPT-LABEL: define protected amdgpu_kernel void @generic_store_nonuniform(
+; OPT-SAME: ptr [[PTR:%.*]]) #[[ATTR1]] {
+; OPT-NEXT:  entry:
+; OPT-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; OPT-NEXT:    [[TID64:%.*]] = zext i32 [[TID]] to i64
+; OPT-NEXT:    [[PP1:%.*]] = getelementptr inbounds i64, ptr [[PTR]], i64 [[TID64]]
+; OPT-NEXT:    [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[PP1]])
+; OPT-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PP1]])
+; OPT-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; OPT-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; OPT-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP21:%.*]]
+; OPT:       4:
+; OPT-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[PP1]] to i64
+; OPT-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
+; OPT-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
+; OPT-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; OPT-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
+; OPT-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; OPT-NEXT:    [[TMP11:%.*]] = and i64 [[TMP5]], 7
+; OPT-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
+; OPT-NEXT:    [[TMP13:%.*]] = trunc i64 [[TMP12]] to i8
+; OPT-NEXT:    [[TMP14:%.*]] = icmp sge i8 [[TMP13]], [[TMP9]]
+; OPT-NEXT:    [[TMP15:%.*]] = and i1 [[TMP10]], [[TMP14]]
+; OPT-NEXT:    [[TMP16:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP15]])
+; OPT-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; OPT-NEXT:    br i1 [[TMP17]], label [[ASAN_REPORT:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; OPT:       asan.report:
+; OPT-NEXT:    br i1 [[TMP15]], label [[TMP18:%.*]], label [[TMP19:%.*]]
+; OPT:       18:
+; OPT-NEXT:    call void @__asan_report_store4(i64 [[TMP5]]) #[[ATTR5]]
+; OPT-NEXT:    call void @llvm.amdgcn.unreachable()
+; OPT-NEXT:    br label [[TMP19]]
+; OPT:       19:
+; OPT-NEXT:    br label [[TMP20]]
+; OPT:       20:
+; OPT-NEXT:    br label [[TMP21]]
+; OPT:       21:
+; OPT-NEXT:    store i32 42, ptr [[PP1]], align 4
+; OPT-NEXT:    ret void
+;
+; LLC-W64-LABEL: generic_store_nonuniform:
+; LLC-W64:       ; %bb.0: ; %entry
+; LLC-W64-NEXT:    s_add_u32 flat_scratch_lo, s12, s17
+; LLC-W64-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0
+; LLC-W64-NEXT:    s_load_dwordx2 s[12:13], s[8:9], 0x0
+; LLC-W64-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
+; LLC-W64-NEXT:    v_lshlrev_b32_e32 v1, 3, v1
+; LLC-W64-NEXT:    s_add_u32 s0, s0, s17
+; LLC-W64-NEXT:    s_addc_u32 s1, s1, 0
+; LLC-W64-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W64-NEXT:    v_mov_b32_e32 v2, s13
+; LLC-W64-NEXT:    v_add_co_u32_e32 v40, vcc, s12, v1
+; LLC-W64-NEXT:    v_addc_co_u32_e32 v41, vcc, 0, v2, vcc
+; LLC-W64-NEXT:    v_lshrrev_b64 v[2:3], 3, v[40:41]
+; LLC-W64-NEXT:    s_mov_b32 s12, 0x7fff8000
+; LLC-W64-NEXT:    v_add_co_u32_e32 v2, vcc, s12, v2
+; LLC-W64-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; LLC-W64-NEXT:    flat_load_sbyte v1, v[2:3]
+; LLC-W64-NEXT:    v_and_b32_e32 v3, 7, v40
+; LLC-W64-NEXT:    v_mov_b32_e32 v2, 0
+; LLC-W64-NEXT:    v_add_u16_e32 v3, 3, v3
+; LLC-W64-NEXT:    s_mov_b32 s32, 0
+; LLC-W64-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W64-NEXT:    v_cmp_ne_u16_sdwa s[12:13], v1, v2 src0_sel:BYTE_0 src1_sel:DWORD
+; LLC-W64-NEXT:    v_cmp_ge_i16_e32 vcc, v3, v1
+; LLC-W64-NEXT:    s_and_b64 vcc, s[12:13], vcc
+; LLC-W64-NEXT:    s_cbranch_vccz .LBB3_4
+; LLC-W64-NEXT:  ; %bb.1: ; %asan.report
+; LLC-W64-NEXT:    s_and_saveexec_b64 s[34:35], vcc
+; LLC-W64-NEXT:    s_cbranch_execz .LBB3_3
+; LLC-W64-NEXT:  ; %bb.2:
+; LLC-W64-NEXT:    s_add_u32 s8, s8, 8
+; LLC-W64-NEXT:    s_addc_u32 s9, s9, 0
+; LLC-W64-NEXT:    s_getpc_b64 s[12:13]
+; LLC-W64-NEXT:    s_add_u32 s12, s12, __asan_report_store4 at gotpcrel32@lo+4
+; LLC-W64-NEXT:    s_addc_u32 s13, s13, __asan_report_store4 at gotpcrel32@hi+12
+; LLC-W64-NEXT:    s_load_dwordx2 s[18:19], s[12:13], 0x0
+; LLC-W64-NEXT:    s_mov_b32 s12, s14
+; LLC-W64-NEXT:    s_mov_b32 s13, s15
+; LLC-W64-NEXT:    s_mov_b32 s14, s16
+; LLC-W64-NEXT:    v_mov_b32_e32 v31, v0
+; LLC-W64-NEXT:    v_mov_b32_e32 v0, v40
+; LLC-W64-NEXT:    v_mov_b32_e32 v1, v41
+; LLC-W64-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W64-NEXT:    s_swappc_b64 s[30:31], s[18:19]
+; LLC-W64-NEXT:    ; divergent unreachable
+; LLC-W64-NEXT:  .LBB3_3: ; %Flow
+; LLC-W64-NEXT:    s_or_b64 exec, exec, s[34:35]
+; LLC-W64-NEXT:  .LBB3_4:
+; LLC-W64-NEXT:    v_mov_b32_e32 v0, 42
+; LLC-W64-NEXT:    global_store_dword v[40:41], v0, off
+; LLC-W64-NEXT:    s_endpgm
+;
+; LLC-W32-LABEL: generic_store_nonuniform:
+; LLC-W32:       ; %bb.0: ; %entry
+; LLC-W32-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; LLC-W32-NEXT:    s_load_b64 s[6:7], s[4:5], 0x0
+; LLC-W32-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
+; LLC-W32-NEXT:    s_mov_b32 s9, 0
+; LLC-W32-NEXT:    s_mov_b32 s32, 0
+; LLC-W32-NEXT:    v_lshlrev_b32_e32 v1, 3, v1
+; LLC-W32-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W32-NEXT:    v_add_co_u32 v40, s6, s6, v1
+; LLC-W32-NEXT:    v_add_co_ci_u32_e64 v41, null, s7, 0, s6
+; LLC-W32-NEXT:    v_lshrrev_b64 v[1:2], 3, v[40:41]
+; LLC-W32-NEXT:    v_add_co_u32 v1, vcc_lo, 0x7fff8000, v1
+; LLC-W32-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
+; LLC-W32-NEXT:    flat_load_i8 v1, v[1:2]
+; LLC-W32-NEXT:    v_and_b32_e32 v2, 7, v40
+; LLC-W32-NEXT:    v_add_nc_u16 v2, v2, 3
+; LLC-W32-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; LLC-W32-NEXT:    v_and_b32_e32 v3, 0xff, v1
+; LLC-W32-NEXT:    v_cmp_ge_i16_e32 vcc_lo, v2, v1
+; LLC-W32-NEXT:    v_cmp_ne_u16_e64 s6, 0, v3
+; LLC-W32-NEXT:    s_and_b32 s6, s6, vcc_lo
+; LLC-W32-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s6
+; LLC-W32-NEXT:    v_cmp_ne_u32_e64 s8, 0, v1
+; LLC-W32-NEXT:    s_cmp_eq_u64 s[8:9], 0
+; LLC-W32-NEXT:    s_cbranch_scc1 .LBB3_4
+; LLC-W32-NEXT:  ; %bb.1: ; %asan.report
+; LLC-W32-NEXT:    s_and_saveexec_b32 s33, s6
+; LLC-W32-NEXT:    s_cbranch_execz .LBB3_3
+; LLC-W32-NEXT:  ; %bb.2:
+; LLC-W32-NEXT:    s_add_u32 s8, s4, 8
+; LLC-W32-NEXT:    s_addc_u32 s9, s5, 0
+; LLC-W32-NEXT:    s_getpc_b64 s[4:5]
+; LLC-W32-NEXT:    s_add_u32 s4, s4, __asan_report_store4 at gotpcrel32@lo+4
+; LLC-W32-NEXT:    s_addc_u32 s5, s5, __asan_report_store4 at gotpcrel32@hi+12
+; LLC-W32-NEXT:    v_mov_b32_e32 v31, v0
+; LLC-W32-NEXT:    s_load_b64 s[16:17], s[4:5], 0x0
+; LLC-W32-NEXT:    v_dual_mov_b32 v0, v40 :: v_dual_mov_b32 v1, v41
+; LLC-W32-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; LLC-W32-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; LLC-W32-NEXT:    s_mov_b32 s12, s13
+; LLC-W32-NEXT:    s_mov_b32 s13, s14
+; LLC-W32-NEXT:    s_mov_b32 s14, s15
+; LLC-W32-NEXT:    s_waitcnt lgkmcnt(0)
+; LLC-W32-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; LLC-W32-NEXT:    ; divergent unreachable
+; LLC-W32-NEXT:  .LBB3_3: ; %Flow
+; LLC-W32-NEXT:    s_or_b32 exec_lo, exec_lo, s33
+; LLC-W32-NEXT:  .LBB3_4:
+; LLC-W32-NEXT:    v_mov_b32_e32 v0, 42
+; LLC-W32-NEXT:    global_store_b32 v[40:41], v0, off
+; LLC-W32-NEXT:    s_nop 0
+; LLC-W32-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; LLC-W32-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid64 = zext i32 %tid to i64
+
+  %pp1 = getelementptr inbounds i64, ptr addrspace(0) %ptr, i64 %tid64
+  store i32 42, ptr addrspace(0) %pp1, align 4
+  ret void
+}
+
+attributes #0 = { nounwind readnone }
diff --git a/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_constant_address_space.ll b/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_constant_address_space.ll
index 911e8021a7361d9..36f0918bf5da81e 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_constant_address_space.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_constant_address_space.ll
@@ -1,9 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
+; RUN: opt < %s -passes=asan -asan-recover -S | FileCheck %s --check-prefix=RECOV
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8"
 target triple = "amdgcn-amd-amdhsa"
 
 @x = addrspace(4) global [2 x i32] zeroinitializer, align 4
+ at x8 = addrspace(4) global [2 x i64] zeroinitializer, align 8
 
 define protected amdgpu_kernel void @constant_load(i64 %i) sanitize_address {
 ; CHECK-LABEL: define protected amdgpu_kernel void @constant_load(
@@ -16,23 +18,102 @@ define protected amdgpu_kernel void @constant_load(i64 %i) sanitize_address {
 ; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
 ; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP12:%.*]], !prof [[PROF1:![0-9]+]]
-; CHECK:       6:
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP0]], 7
-; CHECK-NEXT:    [[TMP8:%.*]] = add i64 [[TMP7]], 3
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc i64 [[TMP8]] to i8
-; CHECK-NEXT:    [[TMP10:%.*]] = icmp sge i8 [[TMP9]], [[TMP4]]
-; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP12]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__asan_report_load4(i64 [[TMP0]]) #[[ATTR3:[0-9]+]]
-; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; CHECK-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP10]])
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; CHECK-NEXT:    br i1 [[TMP12]], label [[ASAN_REPORT:%.*]], label [[TMP15:%.*]], !prof [[PROF2:![0-9]+]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP14:%.*]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__asan_report_load4(i64 [[TMP0]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       14:
+; CHECK-NEXT:    br label [[TMP15]]
+; CHECK:       15:
 ; CHECK-NEXT:    [[Q:%.*]] = load i32, ptr addrspace(4) [[A]], align 4
 ; CHECK-NEXT:    ret void
 ;
+; RECOV-LABEL: define protected amdgpu_kernel void @constant_load(
+; RECOV-SAME: i64 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[A:%.*]] = getelementptr inbounds [2 x i32], ptr addrspace(4) @x, i64 0, i64 [[I]]
+; RECOV-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(4) [[A]] to i64
+; RECOV-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; RECOV-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; RECOV-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; RECOV-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; RECOV-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; RECOV-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; RECOV-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; RECOV-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; RECOV-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; RECOV-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; RECOV-NEXT:    br i1 [[TMP10]], label [[ASAN_REPORT:%.*]], label [[TMP11:%.*]], !prof [[PROF2:![0-9]+]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_load4_noabort(i64 [[TMP0]]) #[[ATTR5:[0-9]+]]
+; RECOV-NEXT:    br label [[TMP11]]
+; RECOV:       11:
+; RECOV-NEXT:    [[Q:%.*]] = load i32, ptr addrspace(4) [[A]], align 4
+; RECOV-NEXT:    ret void
+;
 entry:
 
   %a = getelementptr inbounds [2 x i32], ptr  addrspace(4) @x, i64 0, i64 %i
   %q = load i32, ptr addrspace(4) %a, align 4
   ret void
 }
+
+define protected amdgpu_kernel void @constant_load_8(i64 %i) sanitize_address {
+; CHECK-LABEL: define protected amdgpu_kernel void @constant_load_8(
+; CHECK-SAME: i64 [[I:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [2 x i64], ptr addrspace(4) @x8, i64 0, i64 [[I]]
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(4) [[A]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP5]])
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i64 [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[TMP7]], label [[ASAN_REPORT:%.*]], label [[TMP10:%.*]], !prof [[PROF2]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP5]], label [[TMP8:%.*]], label [[TMP9:%.*]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__asan_report_load8(i64 [[TMP0]]) #[[ATTR5]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP9]]
+; CHECK:       9:
+; CHECK-NEXT:    br label [[TMP10]]
+; CHECK:       10:
+; CHECK-NEXT:    [[Q:%.*]] = load i64, ptr addrspace(4) [[A]], align 8
+; CHECK-NEXT:    ret void
+;
+; RECOV-LABEL: define protected amdgpu_kernel void @constant_load_8(
+; RECOV-SAME: i64 [[I:%.*]]) #[[ATTR0]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[A:%.*]] = getelementptr inbounds [2 x i64], ptr addrspace(4) @x8, i64 0, i64 [[I]]
+; RECOV-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(4) [[A]] to i64
+; RECOV-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; RECOV-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; RECOV-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; RECOV-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; RECOV-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; RECOV-NEXT:    br i1 [[TMP5]], label [[ASAN_REPORT:%.*]], label [[TMP6:%.*]], !prof [[PROF2]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_load8_noabort(i64 [[TMP0]]) #[[ATTR5]]
+; RECOV-NEXT:    br label [[TMP6]]
+; RECOV:       6:
+; RECOV-NEXT:    [[Q:%.*]] = load i64, ptr addrspace(4) [[A]], align 8
+; RECOV-NEXT:    ret void
+;
+entry:
+  %a = getelementptr inbounds [2 x i64], ptr  addrspace(4) @x8, i64 0, i64 %i
+  %q = load i64, ptr addrspace(4) %a, align 8
+  ret void
+}
diff --git a/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_generic_address_space.ll b/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_generic_address_space.ll
index 34b7f04592e25cc..b4dba5b16e5709c 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_generic_address_space.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_generic_address_space.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
+; RUN: opt < %s -passes=asan -asan-recover -S | FileCheck %s --check-prefix=RECOV
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8"
 target triple = "amdgcn-amd-amdhsa"
 
@@ -12,7 +13,7 @@ define protected amdgpu_kernel void @generic_store(ptr addrspace(1) %p, i32 %i)
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
-; CHECK-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP18:%.*]]
+; CHECK-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP21:%.*]]
 ; CHECK:       4:
 ; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
@@ -20,22 +21,59 @@ define protected amdgpu_kernel void @generic_store(ptr addrspace(1) %p, i32 %i)
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP17:%.*]], !prof [[PROF0:![0-9]+]]
-; CHECK:       11:
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP5]], 7
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP12]], 3
-; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i8
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp sge i8 [[TMP14]], [[TMP9]]
-; CHECK-NEXT:    br i1 [[TMP15]], label [[TMP16:%.*]], label [[TMP17]]
-; CHECK:       16:
-; CHECK-NEXT:    call void @__asan_report_store4(i64 [[TMP5]]) #[[ATTR3:[0-9]+]]
-; CHECK-NEXT:    unreachable
-; CHECK:       17:
-; CHECK-NEXT:    br label [[TMP18]]
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP5]], 7
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
+; CHECK-NEXT:    [[TMP13:%.*]] = trunc i64 [[TMP12]] to i8
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp sge i8 [[TMP13]], [[TMP9]]
+; CHECK-NEXT:    [[TMP15:%.*]] = and i1 [[TMP10]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP15]])
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; CHECK-NEXT:    br i1 [[TMP17]], label [[ASAN_REPORT:%.*]], label [[TMP20:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP15]], label [[TMP18:%.*]], label [[TMP19:%.*]]
 ; CHECK:       18:
+; CHECK-NEXT:    call void @__asan_report_store4(i64 [[TMP5]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP19]]
+; CHECK:       19:
+; CHECK-NEXT:    br label [[TMP20]]
+; CHECK:       20:
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       21:
 ; CHECK-NEXT:    store i32 0, ptr [[Q]], align 4
 ; CHECK-NEXT:    ret void
 ;
+; RECOV-LABEL: define protected amdgpu_kernel void @generic_store(
+; RECOV-SAME: ptr addrspace(1) [[P:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[Q:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
+; RECOV-NEXT:    [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[Q]])
+; RECOV-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
+; RECOV-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; RECOV-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; RECOV-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP17:%.*]]
+; RECOV:       4:
+; RECOV-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
+; RECOV-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
+; RECOV-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
+; RECOV-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; RECOV-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
+; RECOV-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; RECOV-NEXT:    [[TMP11:%.*]] = and i64 [[TMP5]], 7
+; RECOV-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
+; RECOV-NEXT:    [[TMP13:%.*]] = trunc i64 [[TMP12]] to i8
+; RECOV-NEXT:    [[TMP14:%.*]] = icmp sge i8 [[TMP13]], [[TMP9]]
+; RECOV-NEXT:    [[TMP15:%.*]] = and i1 [[TMP10]], [[TMP14]]
+; RECOV-NEXT:    br i1 [[TMP15]], label [[ASAN_REPORT:%.*]], label [[TMP16:%.*]], !prof [[PROF0:![0-9]+]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_store4_noabort(i64 [[TMP5]]) #[[ATTR5:[0-9]+]]
+; RECOV-NEXT:    br label [[TMP16]]
+; RECOV:       16:
+; RECOV-NEXT:    br label [[TMP17]]
+; RECOV:       17:
+; RECOV-NEXT:    store i32 0, ptr [[Q]], align 4
+; RECOV-NEXT:    ret void
+;
 entry:
 
   %q = addrspacecast ptr addrspace(1) %p to ptr
@@ -52,7 +90,7 @@ define protected amdgpu_kernel void @generic_load(ptr addrspace(1) %p, i32 %i) s
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
-; CHECK-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP18:%.*]]
+; CHECK-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP21:%.*]]
 ; CHECK:       4:
 ; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
 ; CHECK-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
@@ -60,25 +98,195 @@ define protected amdgpu_kernel void @generic_load(ptr addrspace(1) %p, i32 %i) s
 ; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
 ; CHECK-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
-; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
-; CHECK:       11:
-; CHECK-NEXT:    [[TMP12:%.*]] = and i64 [[TMP5]], 7
-; CHECK-NEXT:    [[TMP13:%.*]] = add i64 [[TMP12]], 3
-; CHECK-NEXT:    [[TMP14:%.*]] = trunc i64 [[TMP13]] to i8
-; CHECK-NEXT:    [[TMP15:%.*]] = icmp sge i8 [[TMP14]], [[TMP9]]
-; CHECK-NEXT:    br i1 [[TMP15]], label [[TMP16:%.*]], label [[TMP17]]
-; CHECK:       16:
-; CHECK-NEXT:    call void @__asan_report_load4(i64 [[TMP5]]) #[[ATTR3]]
-; CHECK-NEXT:    unreachable
-; CHECK:       17:
-; CHECK-NEXT:    br label [[TMP18]]
+; CHECK-NEXT:    [[TMP11:%.*]] = and i64 [[TMP5]], 7
+; CHECK-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
+; CHECK-NEXT:    [[TMP13:%.*]] = trunc i64 [[TMP12]] to i8
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp sge i8 [[TMP13]], [[TMP9]]
+; CHECK-NEXT:    [[TMP15:%.*]] = and i1 [[TMP10]], [[TMP14]]
+; CHECK-NEXT:    [[TMP16:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP15]])
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ne i64 [[TMP16]], 0
+; CHECK-NEXT:    br i1 [[TMP17]], label [[ASAN_REPORT:%.*]], label [[TMP20:%.*]], !prof [[PROF0]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP15]], label [[TMP18:%.*]], label [[TMP19:%.*]]
 ; CHECK:       18:
+; CHECK-NEXT:    call void @__asan_report_load4(i64 [[TMP5]]) #[[ATTR5]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP19]]
+; CHECK:       19:
+; CHECK-NEXT:    br label [[TMP20]]
+; CHECK:       20:
+; CHECK-NEXT:    br label [[TMP21]]
+; CHECK:       21:
 ; CHECK-NEXT:    [[R:%.*]] = load i32, ptr [[Q]], align 4
 ; CHECK-NEXT:    ret void
 ;
+; RECOV-LABEL: define protected amdgpu_kernel void @generic_load(
+; RECOV-SAME: ptr addrspace(1) [[P:%.*]], i32 [[I:%.*]]) #[[ATTR0]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[Q:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
+; RECOV-NEXT:    [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[Q]])
+; RECOV-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
+; RECOV-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; RECOV-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; RECOV-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP17:%.*]]
+; RECOV:       4:
+; RECOV-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
+; RECOV-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
+; RECOV-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
+; RECOV-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; RECOV-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
+; RECOV-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; RECOV-NEXT:    [[TMP11:%.*]] = and i64 [[TMP5]], 7
+; RECOV-NEXT:    [[TMP12:%.*]] = add i64 [[TMP11]], 3
+; RECOV-NEXT:    [[TMP13:%.*]] = trunc i64 [[TMP12]] to i8
+; RECOV-NEXT:    [[TMP14:%.*]] = icmp sge i8 [[TMP13]], [[TMP9]]
+; RECOV-NEXT:    [[TMP15:%.*]] = and i1 [[TMP10]], [[TMP14]]
+; RECOV-NEXT:    br i1 [[TMP15]], label [[ASAN_REPORT:%.*]], label [[TMP16:%.*]], !prof [[PROF0]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_load4_noabort(i64 [[TMP5]]) #[[ATTR5]]
+; RECOV-NEXT:    br label [[TMP16]]
+; RECOV:       16:
+; RECOV-NEXT:    br label [[TMP17]]
+; RECOV:       17:
+; RECOV-NEXT:    [[R:%.*]] = load i32, ptr [[Q]], align 4
+; RECOV-NEXT:    ret void
+;
 entry:
 
   %q = addrspacecast ptr addrspace(1) %p to ptr
   %r = load i32, ptr %q, align 4
   ret void
 }
+
+define protected amdgpu_kernel void @generic_store_8(ptr addrspace(1) %p) sanitize_address {
+; CHECK-LABEL: define protected amdgpu_kernel void @generic_store_8(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[Q:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
+; CHECK-NEXT:    [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[Q]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
+; CHECK-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; CHECK-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP16:%.*]]
+; CHECK:       4:
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
+; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP10]])
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; CHECK-NEXT:    br i1 [[TMP12]], label [[ASAN_REPORT:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP14:%.*]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__asan_report_store8(i64 [[TMP5]]) #[[ATTR5]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       14:
+; CHECK-NEXT:    br label [[TMP15]]
+; CHECK:       15:
+; CHECK-NEXT:    br label [[TMP16]]
+; CHECK:       16:
+; CHECK-NEXT:    store i64 0, ptr [[Q]], align 8
+; CHECK-NEXT:    ret void
+;
+; RECOV-LABEL: define protected amdgpu_kernel void @generic_store_8(
+; RECOV-SAME: ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[Q:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
+; RECOV-NEXT:    [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[Q]])
+; RECOV-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
+; RECOV-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; RECOV-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; RECOV-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP12:%.*]]
+; RECOV:       4:
+; RECOV-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
+; RECOV-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
+; RECOV-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
+; RECOV-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; RECOV-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
+; RECOV-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; RECOV-NEXT:    br i1 [[TMP10]], label [[ASAN_REPORT:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_store8_noabort(i64 [[TMP5]]) #[[ATTR5]]
+; RECOV-NEXT:    br label [[TMP11]]
+; RECOV:       11:
+; RECOV-NEXT:    br label [[TMP12]]
+; RECOV:       12:
+; RECOV-NEXT:    store i64 0, ptr [[Q]], align 8
+; RECOV-NEXT:    ret void
+;
+entry:
+  %q = addrspacecast ptr addrspace(1) %p to ptr
+  store i64 0, ptr %q, align 8
+  ret void
+}
+
+define protected amdgpu_kernel void @generic_load_8(ptr addrspace(1) %p) sanitize_address {
+; CHECK-LABEL: define protected amdgpu_kernel void @generic_load_8(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[Q:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
+; CHECK-NEXT:    [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[Q]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
+; CHECK-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; CHECK-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP16:%.*]]
+; CHECK:       4:
+; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
+; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
+; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; CHECK-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
+; CHECK-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; CHECK-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP10]])
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; CHECK-NEXT:    br i1 [[TMP12]], label [[ASAN_REPORT:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP14:%.*]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__asan_report_load8(i64 [[TMP5]]) #[[ATTR5]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       14:
+; CHECK-NEXT:    br label [[TMP15]]
+; CHECK:       15:
+; CHECK-NEXT:    br label [[TMP16]]
+; CHECK:       16:
+; CHECK-NEXT:    [[R:%.*]] = load i64, ptr [[Q]], align 8
+; CHECK-NEXT:    ret void
+;
+; RECOV-LABEL: define protected amdgpu_kernel void @generic_load_8(
+; RECOV-SAME: ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[Q:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
+; RECOV-NEXT:    [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[Q]])
+; RECOV-NEXT:    [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
+; RECOV-NEXT:    [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
+; RECOV-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; RECOV-NEXT:    br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP12:%.*]]
+; RECOV:       4:
+; RECOV-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
+; RECOV-NEXT:    [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
+; RECOV-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
+; RECOV-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
+; RECOV-NEXT:    [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
+; RECOV-NEXT:    [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
+; RECOV-NEXT:    br i1 [[TMP10]], label [[ASAN_REPORT:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_load8_noabort(i64 [[TMP5]]) #[[ATTR5]]
+; RECOV-NEXT:    br label [[TMP11]]
+; RECOV:       11:
+; RECOV-NEXT:    br label [[TMP12]]
+; RECOV:       12:
+; RECOV-NEXT:    [[R:%.*]] = load i64, ptr [[Q]], align 8
+; RECOV-NEXT:    ret void
+;
+entry:
+
+  %q = addrspacecast ptr addrspace(1) %p to ptr
+  %r = load i64, ptr %q, align 8
+  ret void
+}
diff --git a/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_global_address_space.ll b/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_global_address_space.ll
index d8708e7448355bd..14832678a8e9cf3 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_global_address_space.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/AMDGPU/asan_instrument_global_address_space.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
 ; RUN: opt < %s -passes=asan -S | FileCheck %s
+; RUN: opt < %s -passes=asan -asan-recover -S | FileCheck %s --check-prefix=RECOV
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8"
 target triple = "amdgcn-amd-amdhsa"
 
@@ -13,20 +14,48 @@ define protected amdgpu_kernel void @global_store(ptr addrspace(1) %p, i32 %i) s
 ; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
 ; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP12:%.*]], !prof [[PROF0:![0-9]+]]
-; CHECK:       6:
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP0]], 7
-; CHECK-NEXT:    [[TMP8:%.*]] = add i64 [[TMP7]], 3
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc i64 [[TMP8]] to i8
-; CHECK-NEXT:    [[TMP10:%.*]] = icmp sge i8 [[TMP9]], [[TMP4]]
-; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP12]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__asan_report_store4(i64 [[TMP0]]) #[[ATTR3:[0-9]+]]
-; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; CHECK-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP10]])
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; CHECK-NEXT:    br i1 [[TMP12]], label [[ASAN_REPORT:%.*]], label [[TMP15:%.*]], !prof [[PROF0:![0-9]+]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP14:%.*]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__asan_report_store4(i64 [[TMP0]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       14:
+; CHECK-NEXT:    br label [[TMP15]]
+; CHECK:       15:
 ; CHECK-NEXT:    store i32 0, ptr addrspace(1) [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
+; RECOV-LABEL: define protected amdgpu_kernel void @global_store(
+; RECOV-SAME: ptr addrspace(1) [[P:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64
+; RECOV-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; RECOV-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; RECOV-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; RECOV-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; RECOV-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; RECOV-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; RECOV-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; RECOV-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; RECOV-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; RECOV-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; RECOV-NEXT:    br i1 [[TMP10]], label [[ASAN_REPORT:%.*]], label [[TMP11:%.*]], !prof [[PROF0:![0-9]+]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_store4_noabort(i64 [[TMP0]]) #[[ATTR5:[0-9]+]]
+; RECOV-NEXT:    br label [[TMP11]]
+; RECOV:       11:
+; RECOV-NEXT:    store i32 0, ptr addrspace(1) [[P]], align 4
+; RECOV-NEXT:    ret void
+;
 entry:
 
   store i32 0, ptr addrspace(1) %p, align 4
@@ -43,22 +72,144 @@ define protected amdgpu_kernel void @global_load(ptr addrspace(1) %p, i32 %i) sa
 ; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
 ; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
-; CHECK-NEXT:    br i1 [[TMP5]], label [[TMP6:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
-; CHECK:       6:
-; CHECK-NEXT:    [[TMP7:%.*]] = and i64 [[TMP0]], 7
-; CHECK-NEXT:    [[TMP8:%.*]] = add i64 [[TMP7]], 3
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc i64 [[TMP8]] to i8
-; CHECK-NEXT:    [[TMP10:%.*]] = icmp sge i8 [[TMP9]], [[TMP4]]
-; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP12]]
-; CHECK:       11:
-; CHECK-NEXT:    call void @__asan_report_load4(i64 [[TMP0]]) #[[ATTR3]]
-; CHECK-NEXT:    unreachable
-; CHECK:       12:
+; CHECK-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; CHECK-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; CHECK-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; CHECK-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP10]])
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp ne i64 [[TMP11]], 0
+; CHECK-NEXT:    br i1 [[TMP12]], label [[ASAN_REPORT:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP10]], label [[TMP13:%.*]], label [[TMP14:%.*]]
+; CHECK:       13:
+; CHECK-NEXT:    call void @__asan_report_load4(i64 [[TMP0]]) #[[ATTR5]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP14]]
+; CHECK:       14:
+; CHECK-NEXT:    br label [[TMP15]]
+; CHECK:       15:
 ; CHECK-NEXT:    [[Q:%.*]] = load i32, ptr addrspace(1) [[P]], align 4
 ; CHECK-NEXT:    ret void
 ;
+; RECOV-LABEL: define protected amdgpu_kernel void @global_load(
+; RECOV-SAME: ptr addrspace(1) [[P:%.*]], i32 [[I:%.*]]) #[[ATTR0]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64
+; RECOV-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; RECOV-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; RECOV-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; RECOV-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; RECOV-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; RECOV-NEXT:    [[TMP6:%.*]] = and i64 [[TMP0]], 7
+; RECOV-NEXT:    [[TMP7:%.*]] = add i64 [[TMP6]], 3
+; RECOV-NEXT:    [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
+; RECOV-NEXT:    [[TMP9:%.*]] = icmp sge i8 [[TMP8]], [[TMP4]]
+; RECOV-NEXT:    [[TMP10:%.*]] = and i1 [[TMP5]], [[TMP9]]
+; RECOV-NEXT:    br i1 [[TMP10]], label [[ASAN_REPORT:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_load4_noabort(i64 [[TMP0]]) #[[ATTR5]]
+; RECOV-NEXT:    br label [[TMP11]]
+; RECOV:       11:
+; RECOV-NEXT:    [[Q:%.*]] = load i32, ptr addrspace(1) [[P]], align 4
+; RECOV-NEXT:    ret void
+;
 entry:
 
   %q = load i32, ptr addrspace(1) %p, align 4
   ret void
 }
+
+define protected amdgpu_kernel void @global_store_8(ptr addrspace(1) %p) sanitize_address {
+; CHECK-LABEL: define protected amdgpu_kernel void @global_store_8(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP5]])
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i64 [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[TMP7]], label [[ASAN_REPORT:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP5]], label [[TMP8:%.*]], label [[TMP9:%.*]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__asan_report_store8(i64 [[TMP0]]) #[[ATTR5]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP9]]
+; CHECK:       9:
+; CHECK-NEXT:    br label [[TMP10]]
+; CHECK:       10:
+; CHECK-NEXT:    store i64 0, ptr addrspace(1) [[P]], align 8
+; CHECK-NEXT:    ret void
+;
+; RECOV-LABEL: define protected amdgpu_kernel void @global_store_8(
+; RECOV-SAME: ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64
+; RECOV-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; RECOV-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; RECOV-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; RECOV-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; RECOV-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; RECOV-NEXT:    br i1 [[TMP5]], label [[ASAN_REPORT:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_store8_noabort(i64 [[TMP0]]) #[[ATTR5]]
+; RECOV-NEXT:    br label [[TMP6]]
+; RECOV:       6:
+; RECOV-NEXT:    store i64 0, ptr addrspace(1) [[P]], align 8
+; RECOV-NEXT:    ret void
+;
+entry:
+  store i64 0, ptr addrspace(1) %p, align 8
+  ret void
+}
+
+define protected amdgpu_kernel void @global_load_8(ptr addrspace(1) %p) sanitize_address {
+; CHECK-LABEL: define protected amdgpu_kernel void @global_load_8(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP5]])
+; CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i64 [[TMP6]], 0
+; CHECK-NEXT:    br i1 [[TMP7]], label [[ASAN_REPORT:%.*]], label [[TMP10:%.*]], !prof [[PROF0]]
+; CHECK:       asan.report:
+; CHECK-NEXT:    br i1 [[TMP5]], label [[TMP8:%.*]], label [[TMP9:%.*]]
+; CHECK:       8:
+; CHECK-NEXT:    call void @__asan_report_load8(i64 [[TMP0]]) #[[ATTR5]]
+; CHECK-NEXT:    call void @llvm.amdgcn.unreachable()
+; CHECK-NEXT:    br label [[TMP9]]
+; CHECK:       9:
+; CHECK-NEXT:    br label [[TMP10]]
+; CHECK:       10:
+; CHECK-NEXT:    [[Q:%.*]] = load i64, ptr addrspace(1) [[P]], align 8
+; CHECK-NEXT:    ret void
+;
+; RECOV-LABEL: define protected amdgpu_kernel void @global_load_8(
+; RECOV-SAME: ptr addrspace(1) [[P:%.*]]) #[[ATTR0]] {
+; RECOV-NEXT:  entry:
+; RECOV-NEXT:    [[TMP0:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64
+; RECOV-NEXT:    [[TMP1:%.*]] = lshr i64 [[TMP0]], 3
+; RECOV-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], 2147450880
+; RECOV-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; RECOV-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP3]], align 1
+; RECOV-NEXT:    [[TMP5:%.*]] = icmp ne i8 [[TMP4]], 0
+; RECOV-NEXT:    br i1 [[TMP5]], label [[ASAN_REPORT:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; RECOV:       asan.report:
+; RECOV-NEXT:    call void @__asan_report_load8_noabort(i64 [[TMP0]]) #[[ATTR5]]
+; RECOV-NEXT:    br label [[TMP6]]
+; RECOV:       6:
+; RECOV-NEXT:    [[Q:%.*]] = load i64, ptr addrspace(1) [[P]], align 8
+; RECOV-NEXT:    ret void
+;
+entry:
+  %q = load i64, ptr addrspace(1) %p, align 8
+  ret void
+}



More information about the llvm-commits mailing list