[llvm] [AMDGPU] Improve detection of non-null addrspacecast operands (PR #82311)
Pierre van Houtryve via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 27 03:33:49 PST 2024
https://github.com/Pierre-vh updated https://github.com/llvm/llvm-project/pull/82311
>From 1f44df2ed791651473235cf995b94fe4f542ebe6 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Tue, 20 Feb 2024 08:02:46 +0100
Subject: [PATCH 1/3] [AMDGPU] Add codegen-prepare-addrspacecast-non-null
---
.../codegen-prepare-addrspacecast-non-null.ll | 349 ++++++++++++++++++
1 file changed, 349 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
diff --git a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
new file mode 100644
index 00000000000000..a7c48955b043f1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
@@ -0,0 +1,349 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=amdgcn-- -amdgpu-codegenprepare -S < %s | FileCheck -check-prefix=OPT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,DAGISEL-ASM
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,GISEL-ASM
+
+; Tests that we can avoid nullptr checks for addrspacecasts from/to priv/local.
+;
+; Whenever a testcase is successful, we should see the addrspacecast replaced with the intrinsic
+; and the resulting code should have no select/cndmask null check for the pointer.
+
+define void @local_to_flat_nonnull_arg(ptr addrspace(3) nonnull %ptr) {
+; OPT-LABEL: define void @local_to_flat_nonnull_arg(
+; OPT-SAME: ptr addrspace(3) nonnull [[PTR:%.*]]) {
+; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr
+; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: ret void
+;
+; DAGISEL-ASM-LABEL: local_to_flat_nonnull_arg:
+; DAGISEL-ASM: ; %bb.0:
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_shared_base
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; GISEL-ASM-LABEL: local_to_flat_nonnull_arg:
+; GISEL-ASM: ; %bb.0:
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_shared_base
+; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+ %x = addrspacecast ptr addrspace(3) %ptr to ptr
+ store volatile i32 7, ptr %x
+ ret void
+}
+
+define void @private_to_flat_nonnull_arg(ptr addrspace(5) nonnull %ptr) {
+; OPT-LABEL: define void @private_to_flat_nonnull_arg(
+; OPT-SAME: ptr addrspace(5) nonnull [[PTR:%.*]]) {
+; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(5) [[PTR]] to ptr
+; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: ret void
+;
+; DAGISEL-ASM-LABEL: private_to_flat_nonnull_arg:
+; DAGISEL-ASM: ; %bb.0:
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; GISEL-ASM-LABEL: private_to_flat_nonnull_arg:
+; GISEL-ASM: ; %bb.0:
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+ %x = addrspacecast ptr addrspace(5) %ptr to ptr
+ store volatile i32 7, ptr %x
+ ret void
+}
+
+define void @flat_to_local_nonnull_arg(ptr nonnull %ptr) {
+; OPT-LABEL: define void @flat_to_local_nonnull_arg(
+; OPT-SAME: ptr nonnull [[PTR:%.*]]) {
+; OPT-NEXT: [[X:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(3)
+; OPT-NEXT: store volatile i32 7, ptr addrspace(3) [[X]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: flat_to_local_nonnull_arg:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; ASM-NEXT: v_mov_b32_e32 v1, 7
+; ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
+; ASM-NEXT: ds_write_b32 v0, v1
+; ASM-NEXT: s_waitcnt lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %x = addrspacecast ptr %ptr to ptr addrspace(3)
+ store volatile i32 7, ptr addrspace(3) %x
+ ret void
+}
+
+define void @flat_to_private_nonnull_arg(ptr nonnull %ptr) {
+; OPT-LABEL: define void @flat_to_private_nonnull_arg(
+; OPT-SAME: ptr nonnull [[PTR:%.*]]) {
+; OPT-NEXT: [[X:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
+; OPT-NEXT: store volatile i32 7, ptr addrspace(5) [[X]], align 4
+; OPT-NEXT: ret void
+;
+; ASM-LABEL: flat_to_private_nonnull_arg:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; ASM-NEXT: v_mov_b32_e32 v1, 7
+; ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
+; ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; ASM-NEXT: s_waitcnt vmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
+ %x = addrspacecast ptr %ptr to ptr addrspace(5)
+ store volatile i32 7, ptr addrspace(5) %x
+ ret void
+}
+
+define void @private_alloca_to_flat(ptr %ptr) {
+; OPT-LABEL: define void @private_alloca_to_flat(
+; OPT-SAME: ptr [[PTR:%.*]]) {
+; OPT-NEXT: [[ALLOCA:%.*]] = alloca i8, align 1, addrspace(5)
+; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(5) [[ALLOCA]] to ptr
+; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: ret void
+;
+; DAGISEL-ASM-LABEL: private_alloca_to_flat:
+; DAGISEL-ASM: ; %bb.0:
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; DAGISEL-ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; GISEL-ASM-LABEL: private_alloca_to_flat:
+; GISEL-ASM: ; %bb.0:
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; GISEL-ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+ %alloca = alloca i8, addrspace(5)
+ %x = addrspacecast ptr addrspace(5) %alloca to ptr
+ store volatile i32 7, ptr %x
+ ret void
+}
+
+ at lds = internal unnamed_addr addrspace(3) global i8 undef, align 4
+
+define void @knownbits_on_flat_to_priv(ptr %ptr) {
+; OPT-LABEL: define void @knownbits_on_flat_to_priv(
+; OPT-SAME: ptr [[PTR:%.*]]) {
+; OPT-NEXT: [[PTR_INT:%.*]] = ptrtoint ptr [[PTR]] to i64
+; OPT-NEXT: [[PTR_OR:%.*]] = or i64 [[PTR_INT]], 15
+; OPT-NEXT: [[KB_PTR:%.*]] = inttoptr i64 [[PTR_OR]] to ptr
+; OPT-NEXT: [[X:%.*]] = addrspacecast ptr [[KB_PTR]] to ptr addrspace(5)
+; OPT-NEXT: store volatile i32 7, ptr addrspace(5) [[X]], align 4
+; OPT-NEXT: ret void
+;
+; DAGISEL-ASM-LABEL: knownbits_on_flat_to_priv:
+; DAGISEL-ASM: ; %bb.0:
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: v_or_b32_e32 v0, 15, v0
+; DAGISEL-ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, 7
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
+; DAGISEL-ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0)
+; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; GISEL-ASM-LABEL: knownbits_on_flat_to_priv:
+; GISEL-ASM: ; %bb.0:
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: v_or_b32_e32 v0, 15, v0
+; GISEL-ASM-NEXT: v_mov_b32_e32 v1, 7
+; GISEL-ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0)
+; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+ %ptr.int = ptrtoint ptr %ptr to i64
+ %ptr.or = or i64 %ptr.int, 15 ; set some low bits
+ %kb.ptr = inttoptr i64 %ptr.or to ptr
+ %x = addrspacecast ptr %kb.ptr to ptr addrspace(5)
+ store volatile i32 7, ptr addrspace(5) %x
+ ret void
+}
+
+define void @knownbits_on_priv_to_flat(ptr addrspace(5) %ptr) {
+; OPT-LABEL: define void @knownbits_on_priv_to_flat(
+; OPT-SAME: ptr addrspace(5) [[PTR:%.*]]) {
+; OPT-NEXT: [[PTR_INT:%.*]] = ptrtoint ptr addrspace(5) [[PTR]] to i32
+; OPT-NEXT: [[PTR_OR:%.*]] = and i32 [[PTR_INT]], 65535
+; OPT-NEXT: [[KB_PTR:%.*]] = inttoptr i32 [[PTR_OR]] to ptr addrspace(5)
+; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(5) [[KB_PTR]] to ptr
+; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: ret void
+;
+; DAGISEL-ASM-LABEL: knownbits_on_priv_to_flat:
+; DAGISEL-ASM: ; %bb.0:
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; GISEL-ASM-LABEL: knownbits_on_priv_to_flat:
+; GISEL-ASM: ; %bb.0:
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; GISEL-ASM-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
+; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+ %ptr.int = ptrtoint ptr addrspace(5) %ptr to i32
+ %ptr.or = and i32 %ptr.int, 65535 ; ensure low bits are zeroes
+ %kb.ptr = inttoptr i32 %ptr.or to ptr addrspace(5)
+ %x = addrspacecast ptr addrspace(5) %kb.ptr to ptr
+ store volatile i32 7, ptr %x
+ ret void
+}
+
+; this would recursive infinitely and we'll give up once we notice it.
+define void @recursive_phis(i1 %cond, ptr addrspace(5) %ptr) {
+; OPT-LABEL: define void @recursive_phis(
+; OPT-SAME: i1 [[COND:%.*]], ptr addrspace(5) [[PTR:%.*]]) {
+; OPT-NEXT: entry:
+; OPT-NEXT: [[ALLOCA:%.*]] = alloca i8, align 1, addrspace(5)
+; OPT-NEXT: br i1 [[COND]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; OPT: then:
+; OPT-NEXT: [[PTR_INT:%.*]] = ptrtoint ptr addrspace(5) [[PTR]] to i32
+; OPT-NEXT: [[PTR_OR:%.*]] = and i32 [[PTR_INT]], 65535
+; OPT-NEXT: [[KB_PTR:%.*]] = inttoptr i32 [[PTR_OR]] to ptr addrspace(5)
+; OPT-NEXT: br label [[FINALLY:%.*]]
+; OPT: else:
+; OPT-NEXT: [[OTHER_PHI:%.*]] = phi ptr addrspace(5) [ [[ALLOCA]], [[ENTRY:%.*]] ], [ [[PHI_PTR:%.*]], [[FINALLY]] ]
+; OPT-NEXT: br label [[FINALLY]]
+; OPT: finally:
+; OPT-NEXT: [[PHI_PTR]] = phi ptr addrspace(5) [ [[KB_PTR]], [[THEN]] ], [ [[OTHER_PHI]], [[ELSE]] ]
+; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(5) [[PHI_PTR]] to ptr
+; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: br i1 [[COND]], label [[ELSE]], label [[END:%.*]]
+; OPT: end:
+; OPT-NEXT: ret void
+;
+; DAGISEL-ASM-LABEL: recursive_phis:
+; DAGISEL-ASM: ; %bb.0: ; %entry
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-ASM-NEXT: v_and_b32_e32 v0, 1, v0
+; DAGISEL-ASM-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; DAGISEL-ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; DAGISEL-ASM-NEXT: s_and_saveexec_b64 s[4:5], vcc
+; DAGISEL-ASM-NEXT: ; %bb.1: ; %then
+; DAGISEL-ASM-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; DAGISEL-ASM-NEXT: ; %bb.2: ; %finallyendcf.split
+; DAGISEL-ASM-NEXT: s_or_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT: s_mov_b64 s[8:9], src_private_base
+; DAGISEL-ASM-NEXT: s_xor_b64 s[6:7], vcc, -1
+; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], 0
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s9
+; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; DAGISEL-ASM-NEXT: .LBB7_3: ; %finally
+; DAGISEL-ASM-NEXT: ; =>This Inner Loop Header: Depth=1
+; DAGISEL-ASM-NEXT: s_and_b64 s[8:9], exec, s[6:7]
+; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; DAGISEL-ASM-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v4, 0, v1, vcc
+; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v3, 0, v0, vcc
+; DAGISEL-ASM-NEXT: flat_store_dword v[3:4], v2
+; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0)
+; DAGISEL-ASM-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT: s_cbranch_execnz .LBB7_3
+; DAGISEL-ASM-NEXT: ; %bb.4: ; %end
+; DAGISEL-ASM-NEXT: s_or_b64 exec, exec, s[4:5]
+; DAGISEL-ASM-NEXT: s_waitcnt lgkmcnt(0)
+; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-ASM-LABEL: recursive_phis:
+; GISEL-ASM: ; %bb.0: ; %entry
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-ASM-NEXT: v_and_b32_e32 v0, 1, v0
+; GISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GISEL-ASM-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GISEL-ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; GISEL-ASM-NEXT: s_and_saveexec_b64 s[6:7], vcc
+; GISEL-ASM-NEXT: ; %bb.1: ; %then
+; GISEL-ASM-NEXT: v_and_b32_e32 v0, 0xffff, v1
+; GISEL-ASM-NEXT: ; %bb.2: ; %finallyendcf.split
+; GISEL-ASM-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT: s_mov_b64 s[8:9], src_private_base
+; GISEL-ASM-NEXT: s_mov_b64 s[6:7], 0
+; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s9
+; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
+; GISEL-ASM-NEXT: .LBB7_3: ; %finally
+; GISEL-ASM-NEXT: ; =>This Inner Loop Header: Depth=1
+; GISEL-ASM-NEXT: s_and_b64 s[8:9], exec, s[4:5]
+; GISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
+; GISEL-ASM-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v3, 0, v0, vcc
+; GISEL-ASM-NEXT: v_cndmask_b32_e32 v4, 0, v1, vcc
+; GISEL-ASM-NEXT: flat_store_dword v[3:4], v2
+; GISEL-ASM-NEXT: s_waitcnt vmcnt(0)
+; GISEL-ASM-NEXT: s_andn2_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT: s_cbranch_execnz .LBB7_3
+; GISEL-ASM-NEXT: ; %bb.4: ; %end
+; GISEL-ASM-NEXT: s_or_b64 exec, exec, s[6:7]
+; GISEL-ASM-NEXT: s_waitcnt lgkmcnt(0)
+; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %alloca = alloca i8, addrspace(5)
+ br i1 %cond, label %then, label %else
+
+then:
+ %ptr.int = ptrtoint ptr addrspace(5) %ptr to i32
+ %ptr.or = and i32 %ptr.int, 65535 ; ensure low bits are zeroes
+ %kb.ptr = inttoptr i32 %ptr.or to ptr addrspace(5)
+ br label %finally
+
+else:
+ %other.phi = phi ptr addrspace(5) [%alloca, %entry], [%phi.ptr, %finally]
+ br label %finally
+
+finally:
+ %phi.ptr = phi ptr addrspace(5) [%kb.ptr, %then], [%other.phi, %else]
+ %x = addrspacecast ptr addrspace(5) %phi.ptr to ptr
+ store volatile i32 7, ptr %x
+ br i1 %cond, label %else, label %end
+
+end:
+ ret void
+}
>From 1e027eb5253aa24ec479a23c37c3a2edd47a3bbd Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Tue, 20 Feb 2024 08:12:00 +0100
Subject: [PATCH 2/3] [AMDGPU] Improve detection of non-null addrspacecast
operands
Use IR analysis to infer when an addrspacecast operand is nonnull, then
lower it to an intrinsic that the DAG can use to infer nonnull.
Solves SWDEV-316445
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 7 +
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 82 ++++++++
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 21 +-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 59 ++++--
llvm/lib/Target/AMDGPU/SIISelLowering.h | 4 +
.../codegen-prepare-addrspacecast-non-null.ll | 194 ++++++------------
6 files changed, 218 insertions(+), 149 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 202fa4e8f4ea81..b8899784aba71f 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3196,4 +3196,11 @@ def int_amdgcn_fdiv_fast : DefaultAttrsIntrinsic<
[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
[IntrNoMem, IntrSpeculatable]
>;
+
+/// Emit an addrspacecast without null pointer checking.
+/// Should only be inserted by a pass based on analysis of an addrspacecast's src.
+def int_amdgcn_addrspacecast_nonnull : DefaultAttrsIntrinsic<
+ [llvm_anyptr_ty], [llvm_anyptr_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 1c75c5a47c9d27..e5beae49f38503 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -51,6 +51,12 @@ static cl::opt<bool> Widen16BitOps(
cl::ReallyHidden,
cl::init(true));
+static cl::opt<bool> LowerAddrSpaceCast(
+ "amdgpu-codegenprepare-addrspacecast",
+ cl::desc("Detect non-null addrspacecast source and lower them early to "
+ "avoid the null pointer check"),
+ cl::ReallyHidden, cl::init(true));
+
static cl::opt<bool>
BreakLargePHIs("amdgpu-codegenprepare-break-large-phis",
cl::desc("Break large PHI nodes for DAGISel"),
@@ -99,6 +105,7 @@ class AMDGPUCodeGenPrepareImpl
: public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
public:
const GCNSubtarget *ST = nullptr;
+ const AMDGPUTargetMachine *TM = nullptr;
const TargetLibraryInfo *TLInfo = nullptr;
AssumptionCache *AC = nullptr;
DominatorTree *DT = nullptr;
@@ -310,6 +317,7 @@ class AMDGPUCodeGenPrepareImpl
bool visitICmpInst(ICmpInst &I);
bool visitSelectInst(SelectInst &I);
bool visitPHINode(PHINode &I);
+ bool visitAddrSpaceCastInst(AddrSpaceCastInst &I);
bool visitIntrinsicInst(IntrinsicInst &I);
bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
@@ -2013,6 +2021,78 @@ bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
return true;
}
+bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
+ if (!LowerAddrSpaceCast)
+ return false;
+
+ // Check if this can be lowered to a amdgcn.addrspacecast.nonnull.
+ // This is only worthwhile for casts from/to priv/local to flat.
+ const unsigned SrcAS = I.getSrcAddressSpace();
+ const unsigned DstAS = I.getDestAddressSpace();
+
+ bool CanLower = false;
+ if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
+ CanLower = (DstAS == AMDGPUAS::LOCAL_ADDRESS ||
+ DstAS == AMDGPUAS::PRIVATE_ADDRESS);
+ else if (DstAS == AMDGPUAS::FLAT_ADDRESS)
+ CanLower = (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
+ SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
+ if (!CanLower)
+ return false;
+
+ // Check the Src operand, and look through Phis.
+ SmallVector<Value *, 4> WorkList;
+ DenseSet<const PHINode *> SeenPHIs;
+ WorkList.push_back(I.getOperand(0));
+ while (!WorkList.empty()) {
+ Value *Cur = getUnderlyingObject(WorkList.pop_back_val());
+
+ // Pointer cannot be null if it's a block address, GV or alloca.
+ // NOTE: We don't support extern_weak, but if we did, we'd need to check for
+ // it as the symbol could be null in such cases.
+ if (isa<BlockAddress>(Cur) || isa<GlobalValue>(Cur) || isa<AllocaInst>(Cur))
+ continue;
+
+ // Check nonnull arguments.
+ if (const auto *Arg = dyn_cast<Argument>(Cur); Arg && Arg->hasNonNullAttr())
+ continue;
+
+ // TODO: Calls that return nonnull?
+
+ // Look through PHIs - add all incoming values to the queue.
+ if (const auto *Phi = dyn_cast<PHINode>(Cur)) {
+ auto [It, Inserted] = SeenPHIs.insert(Phi);
+ if (!Inserted)
+ return false; // infinite recursion
+
+ for (auto &Inc : Phi->incoming_values())
+ WorkList.push_back(Inc.get());
+ continue;
+ }
+
+ // For all other things, use KnownBits.
+ // We either use 0 or all bits set to indicate null, so check whether the
+ // value can be zero or all ones.
+ auto SrcPtrKB =
+ computeKnownBits(Cur, *DL).trunc(DL->getPointerSizeInBits(SrcAS));
+ const auto NullVal = TM->getNullPointerValue(SrcAS);
+ assert((NullVal == 0 || NullVal == -1) &&
+ "don't know how to check for this null value!");
+ if (NullVal ? !SrcPtrKB.getMaxValue().isAllOnes() : SrcPtrKB.isNonZero())
+ continue;
+
+ // Value is unknown so we can't lower.
+ return false;
+ }
+
+ IRBuilder<> B(&I);
+ auto *Intrin = B.CreateIntrinsic(
+ I.getType(), Intrinsic::amdgcn_addrspacecast_nonnull, {I.getOperand(0)});
+ I.replaceAllUsesWith(Intrin);
+ I.eraseFromParent();
+ return true;
+}
+
bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case Intrinsic::bitreverse:
@@ -2196,6 +2276,7 @@ bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
return false;
const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
+ Impl.TM = &TM;
Impl.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
Impl.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
@@ -2214,6 +2295,7 @@ PreservedAnalyses AMDGPUCodeGenPreparePass::run(Function &F,
AMDGPUCodeGenPrepareImpl Impl;
Impl.Mod = F.getParent();
Impl.DL = &Impl.Mod->getDataLayout();
+ Impl.TM = static_cast<const AMDGPUTargetMachine *>(&TM);
Impl.TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
Impl.AC = &FAM.getResult<AssumptionAnalysis>(F);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 0d3b158a0df731..aa9ccb7cf75262 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2247,10 +2247,16 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
MachineIRBuilder &B) const {
MachineFunction &MF = B.getMF();
+ // MI can either be a G_ADDRSPACE_CAST or a
+ // G_INTRINSIC @llvm.amdgcn.addrspacecast.nonnull
+ assert(MI.getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
+ (isa<GIntrinsic>(MI) && cast<GIntrinsic>(MI).getIntrinsicID() ==
+ Intrinsic::amdgcn_addrspacecast_nonnull));
+
const LLT S32 = LLT::scalar(32);
Register Dst = MI.getOperand(0).getReg();
- Register Src = MI.getOperand(1).getReg();
-
+ Register Src = isa<GIntrinsic>(MI) ? MI.getOperand(2).getReg()
+ : MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(Dst);
LLT SrcTy = MRI.getType(Src);
unsigned DestAS = DstTy.getAddressSpace();
@@ -2263,6 +2269,11 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
const AMDGPUTargetMachine &TM
= static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
+ // For llvm.amdgcn.addrspacecast.nonnull we can always assume non-null, for
+ // G_ADDRSPACE_CAST we need to guess.
+ const bool IsKnownNonNull =
+ isa<GIntrinsic>(MI) ? true : isKnownNonNull(Src, MRI, TM, SrcAS);
+
if (TM.isNoopAddrSpaceCast(SrcAS, DestAS)) {
MI.setDesc(B.getTII().get(TargetOpcode::G_BITCAST));
return true;
@@ -2271,7 +2282,7 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
if (SrcAS == AMDGPUAS::FLAT_ADDRESS &&
(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
DestAS == AMDGPUAS::PRIVATE_ADDRESS)) {
- if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
+ if (IsKnownNonNull) {
// Extract low 32-bits of the pointer.
B.buildExtract(Dst, Src, 0);
MI.eraseFromParent();
@@ -2308,7 +2319,7 @@ bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
// avoid the ptrtoint?
auto BuildPtr = B.buildMergeLikeInstr(DstTy, {SrcAsInt, ApertureReg});
- if (isKnownNonNull(Src, MRI, TM, SrcAS)) {
+ if (IsKnownNonNull) {
B.buildCopy(Dst, BuildPtr);
MI.eraseFromParent();
return true;
@@ -7021,6 +7032,8 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
return false;
}
+ case Intrinsic::amdgcn_addrspacecast_nonnull:
+ return legalizeAddrSpaceCast(MI, MRI, B);
case Intrinsic::amdgcn_make_buffer_rsrc:
return legalizePointerAsRsrcIntrin(MI, MRI, B);
case Intrinsic::amdgcn_kernarg_segment_ptr:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7be670f8e76c37..d2d761e36ee084 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1415,6 +1415,24 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
}
}
+void SITargetLowering::CollectTargetIntrinsicOperands(
+ const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG) const {
+ switch (cast<IntrinsicInst>(I).getIntrinsicID()) {
+ case Intrinsic::amdgcn_addrspacecast_nonnull: {
+ // The DAG's ValueType loses the addrspaces.
+ // Add them as 2 extra Constant operands "from" and "to".
+ unsigned SrcAS =
+ I.getOperand(0)->getType()->getScalarType()->getPointerAddressSpace();
+ unsigned DstAS = I.getType()->getScalarType()->getPointerAddressSpace();
+ Ops.push_back(DAG.getTargetConstant(SrcAS, SDLoc(), MVT::i32));
+ Ops.push_back(DAG.getTargetConstant(DstAS, SDLoc(), MVT::i32));
+ break;
+ }
+ default:
+ break;
+ }
+}
+
bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
SmallVectorImpl<Value*> &Ops,
Type *&AccessTy) const {
@@ -6649,24 +6667,37 @@ static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG,
SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
SelectionDAG &DAG) const {
SDLoc SL(Op);
- const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
-
- SDValue Src = ASC->getOperand(0);
- SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
- unsigned SrcAS = ASC->getSrcAddressSpace();
const AMDGPUTargetMachine &TM =
static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
+ unsigned DestAS, SrcAS;
+ SDValue Src;
+ bool KnownNonNull;
+ if (const auto *ASC = dyn_cast<AddrSpaceCastSDNode>(Op)) {
+ SrcAS = ASC->getSrcAddressSpace();
+ Src = ASC->getOperand(0);
+ DestAS = ASC->getDestAddressSpace();
+ KnownNonNull = isKnownNonNull(Op, DAG, TM, SrcAS);
+ } else {
+ assert(Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
+ Op.getConstantOperandVal(0) ==
+ Intrinsic::amdgcn_addrspacecast_nonnull);
+ Src = Op->getOperand(1);
+ SrcAS = Op->getConstantOperandVal(2);
+ DestAS = Op->getConstantOperandVal(3);
+ KnownNonNull = true;
+ }
+
+ SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
+
// flat -> local/private
if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
- unsigned DestAS = ASC->getDestAddressSpace();
-
if (DestAS == AMDGPUAS::LOCAL_ADDRESS ||
DestAS == AMDGPUAS::PRIVATE_ADDRESS) {
SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
- if (isKnownNonNull(Src, DAG, TM, SrcAS))
+ if (KnownNonNull)
return Ptr;
unsigned NullVal = TM.getNullPointerValue(DestAS);
@@ -6679,16 +6710,16 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
}
// local/private -> flat
- if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
+ if (DestAS == AMDGPUAS::FLAT_ADDRESS) {
if (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
SrcAS == AMDGPUAS::PRIVATE_ADDRESS) {
- SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
+ SDValue Aperture = getSegmentAperture(SrcAS, SL, DAG);
SDValue CvtPtr =
DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
CvtPtr = DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr);
- if (isKnownNonNull(Src, DAG, TM, SrcAS))
+ if (KnownNonNull)
return CvtPtr;
unsigned NullVal = TM.getNullPointerValue(SrcAS);
@@ -6711,7 +6742,7 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
}
- if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
+ if (DestAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
Src.getValueType() == MVT::i64)
return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
@@ -6722,7 +6753,7 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
DAG.getContext()->diagnose(InvalidAddrSpaceCast);
- return DAG.getUNDEF(ASC->getValueType(0));
+ return DAG.getUNDEF(Op->getValueType(0));
}
// This lowers an INSERT_SUBVECTOR by extracting the individual elements from
@@ -8339,6 +8370,8 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Op.getOperand(3), Op.getOperand(4), Op.getOperand(5),
IndexKeyi32, Op.getOperand(7)});
}
+ case Intrinsic::amdgcn_addrspacecast_nonnull:
+ return lowerADDRSPACECAST(Op, DAG);
default:
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
AMDGPU::getImageDimIntrinsicInfo(IntrinsicID))
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index e436c23af5bcac..6ec9e068c49042 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -305,6 +305,10 @@ class SITargetLowering final : public AMDGPUTargetLowering {
MachineFunction &MF,
unsigned IntrinsicID) const override;
+ void CollectTargetIntrinsicOperands(const CallInst &I,
+ SmallVectorImpl<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+
bool getAddrModeArguments(IntrinsicInst * /*I*/,
SmallVectorImpl<Value*> &/*Ops*/,
Type *&/*AccessTy*/) const override;
diff --git a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
index a7c48955b043f1..b5bf369c7dd0c4 100644
--- a/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
+++ b/llvm/test/CodeGen/AMDGPU/codegen-prepare-addrspacecast-non-null.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: opt -mtriple=amdgcn-- -amdgpu-codegenprepare -S < %s | FileCheck -check-prefix=OPT %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,DAGISEL-ASM
; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel -mcpu=gfx900 < %s | FileCheck %s --check-prefixes=ASM,GISEL-ASM
@@ -11,34 +11,19 @@
define void @local_to_flat_nonnull_arg(ptr addrspace(3) nonnull %ptr) {
; OPT-LABEL: define void @local_to_flat_nonnull_arg(
; OPT-SAME: ptr addrspace(3) nonnull [[PTR:%.*]]) {
-; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr
-; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p3(ptr addrspace(3) [[PTR]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP1]], align 4
; OPT-NEXT: ret void
;
-; DAGISEL-ASM-LABEL: local_to_flat_nonnull_arg:
-; DAGISEL-ASM: ; %bb.0:
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_shared_base
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
-; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
-; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
-; GISEL-ASM-LABEL: local_to_flat_nonnull_arg:
-; GISEL-ASM: ; %bb.0:
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_shared_base
-; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
-; GISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
-; GISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; GISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
-; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; ASM-LABEL: local_to_flat_nonnull_arg:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_shared_base
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
%x = addrspacecast ptr addrspace(3) %ptr to ptr
store volatile i32 7, ptr %x
ret void
@@ -47,34 +32,19 @@ define void @local_to_flat_nonnull_arg(ptr addrspace(3) nonnull %ptr) {
define void @private_to_flat_nonnull_arg(ptr addrspace(5) nonnull %ptr) {
; OPT-LABEL: define void @private_to_flat_nonnull_arg(
; OPT-SAME: ptr addrspace(5) nonnull [[PTR:%.*]]) {
-; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(5) [[PTR]] to ptr
-; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[PTR]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP1]], align 4
; OPT-NEXT: ret void
;
-; DAGISEL-ASM-LABEL: private_to_flat_nonnull_arg:
-; DAGISEL-ASM: ; %bb.0:
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
-; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
-; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
-; GISEL-ASM-LABEL: private_to_flat_nonnull_arg:
-; GISEL-ASM: ; %bb.0:
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
-; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
-; GISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
-; GISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; GISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
-; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; ASM-LABEL: private_to_flat_nonnull_arg:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
%x = addrspacecast ptr addrspace(5) %ptr to ptr
store volatile i32 7, ptr %x
ret void
@@ -83,16 +53,14 @@ define void @private_to_flat_nonnull_arg(ptr addrspace(5) nonnull %ptr) {
define void @flat_to_local_nonnull_arg(ptr nonnull %ptr) {
; OPT-LABEL: define void @flat_to_local_nonnull_arg(
; OPT-SAME: ptr nonnull [[PTR:%.*]]) {
-; OPT-NEXT: [[X:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(3)
-; OPT-NEXT: store volatile i32 7, ptr addrspace(3) [[X]], align 4
+; OPT-NEXT: [[TMP1:%.*]] = call ptr addrspace(3) @llvm.amdgcn.addrspacecast.nonnull.p3.p0(ptr [[PTR]])
+; OPT-NEXT: store volatile i32 7, ptr addrspace(3) [[TMP1]], align 4
; OPT-NEXT: ret void
;
; ASM-LABEL: flat_to_local_nonnull_arg:
; ASM: ; %bb.0:
; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; ASM-NEXT: v_mov_b32_e32 v1, 7
-; ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; ASM-NEXT: ds_write_b32 v0, v1
; ASM-NEXT: s_waitcnt lgkmcnt(0)
; ASM-NEXT: s_setpc_b64 s[30:31]
@@ -104,16 +72,14 @@ define void @flat_to_local_nonnull_arg(ptr nonnull %ptr) {
define void @flat_to_private_nonnull_arg(ptr nonnull %ptr) {
; OPT-LABEL: define void @flat_to_private_nonnull_arg(
; OPT-SAME: ptr nonnull [[PTR:%.*]]) {
-; OPT-NEXT: [[X:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
-; OPT-NEXT: store volatile i32 7, ptr addrspace(5) [[X]], align 4
+; OPT-NEXT: [[TMP1:%.*]] = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr [[PTR]])
+; OPT-NEXT: store volatile i32 7, ptr addrspace(5) [[TMP1]], align 4
; OPT-NEXT: ret void
;
; ASM-LABEL: flat_to_private_nonnull_arg:
; ASM: ; %bb.0:
; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; ASM-NEXT: v_mov_b32_e32 v1, 7
-; ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
; ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; ASM-NEXT: s_waitcnt vmcnt(0)
; ASM-NEXT: s_setpc_b64 s[30:31]
@@ -126,33 +92,20 @@ define void @private_alloca_to_flat(ptr %ptr) {
; OPT-LABEL: define void @private_alloca_to_flat(
; OPT-SAME: ptr [[PTR:%.*]]) {
; OPT-NEXT: [[ALLOCA:%.*]] = alloca i8, align 1, addrspace(5)
-; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(5) [[ALLOCA]] to ptr
-; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[ALLOCA]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP1]], align 4
; OPT-NEXT: ret void
;
-; DAGISEL-ASM-LABEL: private_alloca_to_flat:
-; DAGISEL-ASM: ; %bb.0:
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
-; DAGISEL-ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
-; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
-; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
-; GISEL-ASM-LABEL: private_alloca_to_flat:
-; GISEL-ASM: ; %bb.0:
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
-; GISEL-ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
-; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
-; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
-; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; ASM-LABEL: private_alloca_to_flat:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
%alloca = alloca i8, addrspace(5)
%x = addrspacecast ptr addrspace(5) %alloca to ptr
store volatile i32 7, ptr %x
@@ -167,28 +120,18 @@ define void @knownbits_on_flat_to_priv(ptr %ptr) {
; OPT-NEXT: [[PTR_INT:%.*]] = ptrtoint ptr [[PTR]] to i64
; OPT-NEXT: [[PTR_OR:%.*]] = or i64 [[PTR_INT]], 15
; OPT-NEXT: [[KB_PTR:%.*]] = inttoptr i64 [[PTR_OR]] to ptr
-; OPT-NEXT: [[X:%.*]] = addrspacecast ptr [[KB_PTR]] to ptr addrspace(5)
-; OPT-NEXT: store volatile i32 7, ptr addrspace(5) [[X]], align 4
+; OPT-NEXT: [[TMP1:%.*]] = call ptr addrspace(5) @llvm.amdgcn.addrspacecast.nonnull.p5.p0(ptr [[KB_PTR]])
+; OPT-NEXT: store volatile i32 7, ptr addrspace(5) [[TMP1]], align 4
; OPT-NEXT: ret void
;
-; DAGISEL-ASM-LABEL: knownbits_on_flat_to_priv:
-; DAGISEL-ASM: ; %bb.0:
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: v_or_b32_e32 v0, 15, v0
-; DAGISEL-ASM-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, 7
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
-; DAGISEL-ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0)
-; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
-; GISEL-ASM-LABEL: knownbits_on_flat_to_priv:
-; GISEL-ASM: ; %bb.0:
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: v_or_b32_e32 v0, 15, v0
-; GISEL-ASM-NEXT: v_mov_b32_e32 v1, 7
-; GISEL-ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0)
-; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; ASM-LABEL: knownbits_on_flat_to_priv:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: v_or_b32_e32 v0, 15, v0
+; ASM-NEXT: v_mov_b32_e32 v1, 7
+; ASM-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; ASM-NEXT: s_waitcnt vmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
%ptr.int = ptrtoint ptr %ptr to i64
%ptr.or = or i64 %ptr.int, 15 ; set some low bits
%kb.ptr = inttoptr i64 %ptr.or to ptr
@@ -203,35 +146,22 @@ define void @knownbits_on_priv_to_flat(ptr addrspace(5) %ptr) {
; OPT-NEXT: [[PTR_INT:%.*]] = ptrtoint ptr addrspace(5) [[PTR]] to i32
; OPT-NEXT: [[PTR_OR:%.*]] = and i32 [[PTR_INT]], 65535
; OPT-NEXT: [[KB_PTR:%.*]] = inttoptr i32 [[PTR_OR]] to ptr addrspace(5)
-; OPT-NEXT: [[X:%.*]] = addrspacecast ptr addrspace(5) [[KB_PTR]] to ptr
-; OPT-NEXT: store volatile i32 7, ptr [[X]], align 4
+; OPT-NEXT: [[TMP1:%.*]] = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) [[KB_PTR]])
+; OPT-NEXT: store volatile i32 7, ptr [[TMP1]], align 4
; OPT-NEXT: ret void
;
-; DAGISEL-ASM-LABEL: knownbits_on_priv_to_flat:
-; DAGISEL-ASM: ; %bb.0:
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; DAGISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
-; DAGISEL-ASM-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; DAGISEL-ASM-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; DAGISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
-; DAGISEL-ASM-NEXT: flat_store_dword v[0:1], v2
-; DAGISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; DAGISEL-ASM-NEXT: s_setpc_b64 s[30:31]
-; GISEL-ASM-LABEL: knownbits_on_priv_to_flat:
-; GISEL-ASM: ; %bb.0:
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: s_mov_b64 s[4:5], src_private_base
-; GISEL-ASM-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GISEL-ASM-NEXT: v_mov_b32_e32 v1, s5
-; GISEL-ASM-NEXT: v_mov_b32_e32 v2, 7
-; GISEL-ASM-NEXT: flat_store_dword v[0:1], v2
-; GISEL-ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GISEL-ASM-NEXT: s_setpc_b64 s[30:31]
+; ASM-LABEL: knownbits_on_priv_to_flat:
+; ASM: ; %bb.0:
+; ASM-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_mov_b64 s[4:5], src_private_base
+; ASM-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; ASM-NEXT: v_mov_b32_e32 v1, s5
+; ASM-NEXT: v_mov_b32_e32 v2, 7
+; ASM-NEXT: flat_store_dword v[0:1], v2
+; ASM-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; ASM-NEXT: s_setpc_b64 s[30:31]
%ptr.int = ptrtoint ptr addrspace(5) %ptr to i32
- %ptr.or = and i32 %ptr.int, 65535 ; ensure low bits are zeroes
+ %ptr.or = and i32 %ptr.int, 65535 ; ensure only lower 16 bits can be set.
%kb.ptr = inttoptr i32 %ptr.or to ptr addrspace(5)
%x = addrspacecast ptr addrspace(5) %kb.ptr to ptr
store volatile i32 7, ptr %x
>From aa1e90763607efb2cb1a25d19a1b157745a39ae7 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Tue, 27 Feb 2024 12:33:36 +0100
Subject: [PATCH 3/3] comments
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 67 ++++++++++---------
1 file changed, 34 insertions(+), 33 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index e5beae49f38503..7e46e290886140 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -51,12 +51,6 @@ static cl::opt<bool> Widen16BitOps(
cl::ReallyHidden,
cl::init(true));
-static cl::opt<bool> LowerAddrSpaceCast(
- "amdgpu-codegenprepare-addrspacecast",
- cl::desc("Detect non-null addrspacecast source and lower them early to "
- "avoid the null pointer check"),
- cl::ReallyHidden, cl::init(true));
-
static cl::opt<bool>
BreakLargePHIs("amdgpu-codegenprepare-break-large-phis",
cl::desc("Break large PHI nodes for DAGISel"),
@@ -2021,10 +2015,39 @@ bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
return true;
}
-bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
- if (!LowerAddrSpaceCast)
- return false;
+/// \param V Value to check
+/// \param DL DataLayout
+/// \param TM TargetMachine (TODO: remove once DL contains nullptr values)
+/// \param AS Target Address Space
+/// \return true if \p V cannot be the null value of \p AS, false otherwise.
+static bool isPtrKnownNeverNull(Value *V, const DataLayout &DL,
+ const AMDGPUTargetMachine &TM, unsigned AS) {
+ // Pointer cannot be null if it's a block address, GV or alloca.
+ // NOTE: We don't support extern_weak, but if we did, we'd need to check for
+ // it as the symbol could be null in such cases.
+ if (isa<BlockAddress>(V) || isa<GlobalValue>(V) || isa<AllocaInst>(V))
+ return true;
+ // Check nonnull arguments.
+ if (const auto *Arg = dyn_cast<Argument>(V); Arg && Arg->hasNonNullAttr())
+ return true;
+
+ // TODO: Calls that return nonnull?
+
+ // For all other things, use KnownBits.
+ // We either use 0 or all bits set to indicate null, so check whether the
+ // value can be zero or all ones.
+ //
+ // TODO: Use ValueTracking's isKnownNeverNull if it becomes aware that some
+ // address spaces have non-zero null values.
+ auto SrcPtrKB = computeKnownBits(V, DL).trunc(DL.getPointerSizeInBits(AS));
+ const auto NullVal = TM.getNullPointerValue(AS);
+ assert((NullVal == 0 || NullVal == -1) &&
+ "don't know how to check for this null value!");
+ return NullVal ? !SrcPtrKB.getMaxValue().isAllOnes() : SrcPtrKB.isNonZero();
+}
+
+bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
// Check if this can be lowered to a amdgcn.addrspacecast.nonnull.
// This is only worthwhile for casts from/to priv/local to flat.
const unsigned SrcAS = I.getSrcAddressSpace();
@@ -2040,25 +2063,13 @@ bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
if (!CanLower)
return false;
- // Check the Src operand, and look through Phis.
+ // Check the Src operand, looking through any PHIs.
SmallVector<Value *, 4> WorkList;
DenseSet<const PHINode *> SeenPHIs;
WorkList.push_back(I.getOperand(0));
while (!WorkList.empty()) {
Value *Cur = getUnderlyingObject(WorkList.pop_back_val());
- // Pointer cannot be null if it's a block address, GV or alloca.
- // NOTE: We don't support extern_weak, but if we did, we'd need to check for
- // it as the symbol could be null in such cases.
- if (isa<BlockAddress>(Cur) || isa<GlobalValue>(Cur) || isa<AllocaInst>(Cur))
- continue;
-
- // Check nonnull arguments.
- if (const auto *Arg = dyn_cast<Argument>(Cur); Arg && Arg->hasNonNullAttr())
- continue;
-
- // TODO: Calls that return nonnull?
-
// Look through PHIs - add all incoming values to the queue.
if (const auto *Phi = dyn_cast<PHINode>(Cur)) {
auto [It, Inserted] = SeenPHIs.insert(Phi);
@@ -2070,18 +2081,8 @@ bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
continue;
}
- // For all other things, use KnownBits.
- // We either use 0 or all bits set to indicate null, so check whether the
- // value can be zero or all ones.
- auto SrcPtrKB =
- computeKnownBits(Cur, *DL).trunc(DL->getPointerSizeInBits(SrcAS));
- const auto NullVal = TM->getNullPointerValue(SrcAS);
- assert((NullVal == 0 || NullVal == -1) &&
- "don't know how to check for this null value!");
- if (NullVal ? !SrcPtrKB.getMaxValue().isAllOnes() : SrcPtrKB.isNonZero())
+ if (isPtrKnownNeverNull(Cur, *DL, *TM, SrcAS))
continue;
-
- // Value is unknown so we can't lower.
return false;
}
More information about the llvm-commits
mailing list