[llvm] [CodeGenPrepare] Consider target memory intrinics as memory use (PR #159638)

Jeffrey Byrnes via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 18 12:44:08 PDT 2025


https://github.com/jrbyrnes created https://github.com/llvm/llvm-project/pull/159638

When deciding to sink address instructions into their uses, we check if it is profitable to do so. The profitability check is based on the types of uses of this address instruction -- if there are users which are not memory instructions, then do not fold.

However, this profitability check wasn't considering target intrinsics, which may be loads / stores. 

This adds some logic to handle target memory intrinsics.

>From fe5ccf089485f5b21b0e52032e82bea7636f529f Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Thu, 18 Sep 2025 11:59:12 -0700
Subject: [PATCH] [CodeGenPrepare] Consider target memory intrinics as memory
 use

Change-Id: Ieebc6e6246e04164bce6b6b425d39e8624aac578
---
 llvm/lib/CodeGen/CodeGenPrepare.cpp           |  14 ++
 .../AMDGPU/sink-addr-memory-intrinsics.ll     | 166 ++++++++++++++++++
 2 files changed, 180 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/sink-addr-memory-intrinsics.ll

diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index a190f0dac1379..ae499e773e7b2 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -5596,6 +5596,20 @@ static bool FindAllMemoryUses(
       continue;
     }
 
+    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(UserI)) {
+      SmallVector<Value *, 2> PtrOps;
+      Type *AccessTy;
+      if (!TLI.getAddrModeArguments(II, PtrOps, AccessTy))
+        return true;
+
+      auto PtrVal = U.get();
+      if (!find(PtrOps, PtrVal))
+        return true;
+
+      MemoryUses.push_back({&U, AccessTy});
+      continue;
+    }
+
     if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
       if (CI->hasFnAttr(Attribute::Cold)) {
         // If this is a cold call, we can sink the addressing calculation into
diff --git a/llvm/test/CodeGen/AMDGPU/sink-addr-memory-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/sink-addr-memory-intrinsics.ll
new file mode 100644
index 0000000000000..b9838d8e39033
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sink-addr-memory-intrinsics.ll
@@ -0,0 +1,166 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 < %s | FileCheck %s
+
+define protected amdgpu_kernel void @memoryIntrinstic(ptr addrspace(3) %inptr, i1 %cond, ptr addrspace(3) %outptr) {
+; CHECK-LABEL: memoryIntrinstic:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_bitcmp0_b32 s1, 0
+; CHECK-NEXT:    s_cbranch_scc0 .LBB0_2
+; CHECK-NEXT:  ; %bb.1: ; %else
+; CHECK-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-NEXT:    ds_read_b64_tr_b16 v[2:3], v0 offset:8192
+; CHECK-NEXT:    s_mov_b32 s1, 0x7060302
+; CHECK-NEXT:    s_mov_b32 s3, 0x5040100
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_perm_b32 v0, v3, v2, s1
+; CHECK-NEXT:    v_perm_b32 v1, v3, v2, s3
+; CHECK-NEXT:    s_cbranch_execz .LBB0_3
+; CHECK-NEXT:    s_branch .LBB0_4
+; CHECK-NEXT:  .LBB0_2:
+; CHECK-NEXT:    ; implicit-def: $vgpr1
+; CHECK-NEXT:  .LBB0_3: ; %then
+; CHECK-NEXT:    v_mov_b32_e32 v0, s0
+; CHECK-NEXT:    ds_read_b64_tr_b16 v[2:3], v0 offset:8192
+; CHECK-NEXT:    s_mov_b32 s0, 0x5040100
+; CHECK-NEXT:    s_mov_b32 s1, 0x7060302
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT:    v_perm_b32 v1, v3, v2, s1
+; CHECK-NEXT:  .LBB0_4: ; %end
+; CHECK-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-NEXT:    ds_write_b64 v2, v[0:1]
+; CHECK-NEXT:    s_endpgm
+  %gep0 = getelementptr ptr addrspace(3), ptr addrspace(3) %inptr, i32 2048
+  br i1 %cond, label %then, label %else
+
+then:
+  %load0 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+  %shuf0 = shufflevector <4 x half> %load0, <4 x half> %load0, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  br label %end
+
+else:
+  %load1 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+  %shuf1 = shufflevector <4 x half> %load1, <4 x half> %load1, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+  br label %end
+
+end:
+  %res = phi <4 x half> [ %shuf0, %then ], [ %shuf1, %else ]
+  store <4 x half> %res, ptr addrspace(3) %outptr
+  ret void
+}
+
+
+
+define protected amdgpu_kernel void @badIntrinsicUse(ptr addrspace(3) %inptr, i1 %cond, ptr addrspace(3) %outptr, <4 x i32> %rsrc) {
+; CHECK-LABEL: badIntrinsicUse:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_and_b32 s1, s1, 1
+; CHECK-NEXT:    s_add_i32 s3, s0, 0x2000
+; CHECK-NEXT:    s_cmp_eq_u32 s1, 0
+; CHECK-NEXT:    s_cbranch_scc0 .LBB1_2
+; CHECK-NEXT:  ; %bb.1: ; %else
+; CHECK-NEXT:    v_mov_b32_e32 v0, s3
+; CHECK-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x10
+; CHECK-NEXT:    ds_read_b64_tr_b16 v[2:3], v0
+; CHECK-NEXT:    s_mov_b32 s0, 0x7060302
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; CHECK-NEXT:    v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT:    s_mov_b32 s0, 0x5040100
+; CHECK-NEXT:    v_perm_b32 v1, v3, v2, s0
+; CHECK-NEXT:    s_cbranch_execz .LBB1_3
+; CHECK-NEXT:    s_branch .LBB1_4
+; CHECK-NEXT:  .LBB1_2:
+; CHECK-NEXT:    ; implicit-def: $vgpr1
+; CHECK-NEXT:  .LBB1_3: ; %then
+; CHECK-NEXT:    v_mov_b32_e32 v0, s3
+; CHECK-NEXT:    ds_read_b64_tr_b16 v[2:3], v0
+; CHECK-NEXT:    s_mov_b32 s0, 0x5040100
+; CHECK-NEXT:    s_mov_b32 s1, 0x7060302
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT:    v_perm_b32 v1, v3, v2, s1
+; CHECK-NEXT:  .LBB1_4: ; %end
+; CHECK-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-NEXT:    ds_write_b64 v2, v[0:1]
+; CHECK-NEXT:    s_endpgm
+  %gep0 = getelementptr ptr addrspace(3), ptr addrspace(3) %inptr, i32 2048
+  br i1 %cond, label %then, label %else
+
+then:
+  %load0 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+  %shuf0 = shufflevector <4 x half> %load0, <4 x half> %load0, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  br label %end
+
+else:
+  %load1 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+  call void @llvm.amdgcn.raw.buffer.store(ptr addrspace(3) %gep0, <4 x i32> %rsrc, i32 0, i32 0, i32 0)
+  %shuf1 = shufflevector <4 x half> %load1, <4 x half> %load1, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+  br label %end
+
+end:
+  %res = phi <4 x half> [ %shuf0, %then ], [ %shuf1, %else ]
+  store <4 x half> %res, ptr addrspace(3) %outptr
+  ret void
+}
+
+define protected amdgpu_kernel void @badIntrinsicUse2(ptr addrspace(3) %inptr, i1 %cond, ptr addrspace(3) %outptr, ptr addrspace(3) %outptr1) {
+; CHECK-LABEL: badIntrinsicUse2:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_and_b32 s1, s1, 1
+; CHECK-NEXT:    s_add_i32 s4, s0, 0x2000
+; CHECK-NEXT:    s_cmp_eq_u32 s1, 0
+; CHECK-NEXT:    s_cbranch_scc0 .LBB2_2
+; CHECK-NEXT:  ; %bb.1: ; %else
+; CHECK-NEXT:    v_mov_b32_e32 v0, s4
+; CHECK-NEXT:    ds_read_b64_tr_b16 v[2:3], v0
+; CHECK-NEXT:    v_mov_b32_e32 v0, s3
+; CHECK-NEXT:    v_mov_b32_e32 v1, s4
+; CHECK-NEXT:    s_mov_b32 s0, 0x7060302
+; CHECK-NEXT:    ds_write_b32 v0, v1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(1)
+; CHECK-NEXT:    v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT:    s_mov_b32 s0, 0x5040100
+; CHECK-NEXT:    v_perm_b32 v1, v3, v2, s0
+; CHECK-NEXT:    s_cbranch_execz .LBB2_3
+; CHECK-NEXT:    s_branch .LBB2_4
+; CHECK-NEXT:  .LBB2_2:
+; CHECK-NEXT:    ; implicit-def: $vgpr1
+; CHECK-NEXT:  .LBB2_3: ; %then
+; CHECK-NEXT:    v_mov_b32_e32 v0, s4
+; CHECK-NEXT:    ds_read_b64_tr_b16 v[2:3], v0
+; CHECK-NEXT:    s_mov_b32 s0, 0x5040100
+; CHECK-NEXT:    s_mov_b32 s1, 0x7060302
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_perm_b32 v0, v3, v2, s0
+; CHECK-NEXT:    v_perm_b32 v1, v3, v2, s1
+; CHECK-NEXT:  .LBB2_4: ; %end
+; CHECK-NEXT:    v_mov_b32_e32 v2, s2
+; CHECK-NEXT:    ds_write_b64 v2, v[0:1]
+; CHECK-NEXT:    s_endpgm
+  %gep0 = getelementptr ptr addrspace(3), ptr addrspace(3) %inptr, i32 2048
+  br i1 %cond, label %then, label %else
+
+then:
+  %load0 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+  %shuf0 = shufflevector <4 x half> %load0, <4 x half> %load0, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+  br label %end
+
+else:
+  %load1 = tail call contract <4 x half> @llvm.amdgcn.ds.read.tr16.b64.v4f16(ptr addrspace(3) %gep0)
+  %gep1 = call ptr addrspace(3) @llvm.amdgcn.readfirstlane(ptr addrspace(3) %gep0)
+  store ptr addrspace(3) %gep1, ptr addrspace(3) %outptr1
+  %shuf1 = shufflevector <4 x half> %load1, <4 x half> %load1, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
+  br label %end
+
+end:
+  %res = phi <4 x half> [ %shuf0, %then ], [ %shuf1, %else ]
+  store <4 x half> %res, ptr addrspace(3) %outptr
+  ret void
+}



More information about the llvm-commits mailing list