[llvm] [AMDGPU][LSV] Restrict large vectors in graphics (PR #92540)

Piotr Sobczak via llvm-commits llvm-commits at lists.llvm.org
Fri May 17 06:23:30 PDT 2024


https://github.com/piotrAMD created https://github.com/llvm/llvm-project/pull/92540

The patch restricts forming large (> 128b) vectors in load-store-vectorizer for graphics.

In graphics, loads in CONSTANT_ADDRESS space are primarily descriptor loads, and they already have the size required for the consuming instructions (128b, or 256b).

For buffer loads the natural size is maximum 128b, as that corresponds to vec4 type which is frequently used in buffer declarations in various graphics APIs (colors, positions, normals).

Using larger sizes can be problematic for later passes as they may cause code motion issues, register fragmentation, inefficient spills (most of them are really deficiency in handling subregisters).

However, for cases where adjacent loads end up close together, there is a late pass load-store-optimizer that would often merge them together.

>From 02fec4f2f21eeb8757a1f3155ffb3f467d91f182 Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Fri, 17 May 2024 13:59:28 +0200
Subject: [PATCH] [AMDGPU][LSV] Restrict large vectors in graphics

The patch restricts forming large (> 128b) vectors in load-store-vectorizer
for graphics.

In graphics, loads in CONSTANT_ADDRESS space are primarily descriptor loads,
and they already have the size required for the consuming instructions (128b, or 256b).

For buffer loads the natural size is maximum 128b, as that corresponds to
vec4 type which is frequently used in buffer declarations in various graphics
APIs (colors, positions, normals).

Using larger sizes can be problematic for later passes as they may cause code motion
issues, register fragmentation, inefficient spills (most of them are really deficiency
in handling subregisters).

However, for cases where adjacent loads end up close together, there is a late pass
load-store-optimizer that would often merge them together.
---
 .../AMDGPU/AMDGPUTargetTransformInfo.cpp      | 11 ++++----
 ...-divergent-i1-phis-no-lane-mask-merging.ll | 26 ++++++++++---------
 2 files changed, 20 insertions(+), 17 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 84320d296a037..9785e7d71bec2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -364,14 +364,15 @@ unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
 }
 
 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
-  if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
-      AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
+  if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS)
+    return 512;
+
+  if (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
       AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
       AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER ||
       AddrSpace == AMDGPUAS::BUFFER_RESOURCE ||
-      AddrSpace == AMDGPUAS::BUFFER_STRIDED_POINTER) {
-    return 512;
-  }
+      AddrSpace == AMDGPUAS::BUFFER_STRIDED_POINTER)
+    return ST->isAmdPalOS() ? 128 : 512;
 
   if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
     return 8 * ST->getMaxPrivateElementSize();
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index d4d5cb18bbd30..6930e0d809177 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -227,14 +227,14 @@ define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3
 ; GFX10-LABEL: single_lane_execution_attribute:
 ; GFX10:       ; %bb.0: ; %.entry
 ; GFX10-NEXT:    s_getpc_b64 s[4:5]
-; GFX10-NEXT:    s_mov_b32 s12, 0
-; GFX10-NEXT:    s_mov_b32 s13, -1
+; GFX10-NEXT:    s_mov_b32 s8, 0
+; GFX10-NEXT:    s_mov_b32 s9, -1
 ; GFX10-NEXT:    s_mov_b32 s2, s0
-; GFX10-NEXT:    s_and_b64 s[4:5], s[4:5], s[12:13]
-; GFX10-NEXT:    s_mov_b32 s3, s12
+; GFX10-NEXT:    s_and_b64 s[4:5], s[4:5], s[8:9]
+; GFX10-NEXT:    s_mov_b32 s3, s8
 ; GFX10-NEXT:    v_mbcnt_lo_u32_b32 v1, -1, 0
 ; GFX10-NEXT:    s_or_b64 s[2:3], s[4:5], s[2:3]
-; GFX10-NEXT:    s_load_dwordx8 s[4:11], s[2:3], 0x0
+; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
 ; GFX10-NEXT:    v_mbcnt_hi_u32_b32 v1, -1, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v2, 2, v1
 ; GFX10-NEXT:    v_and_b32_e32 v3, 1, v1
@@ -248,8 +248,8 @@ define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 0, v2
 ; GFX10-NEXT:    s_cbranch_vccnz .LBB4_4
 ; GFX10-NEXT:  ; %bb.1: ; %.preheader.preheader
-; GFX10-NEXT:    v_mov_b32_e32 v3, s12
-; GFX10-NEXT:    v_mov_b32_e32 v4, s12
+; GFX10-NEXT:    v_mov_b32_e32 v3, s8
+; GFX10-NEXT:    v_mov_b32_e32 v4, s8
 ; GFX10-NEXT:  .LBB4_2: ; %.preheader
 ; GFX10-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GFX10-NEXT:    buffer_load_dword v5, v3, s[4:7], 0 offen
@@ -261,18 +261,20 @@ define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3
 ; GFX10-NEXT:    s_cbranch_vccnz .LBB4_2
 ; GFX10-NEXT:  ; %bb.3: ; %.preheader._crit_edge
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT:    s_mov_b32 s13, 0
-; GFX10-NEXT:    s_or_b32 s2, s0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s2
+; GFX10-NEXT:    s_mov_b32 s9, 0
+; GFX10-NEXT:    s_or_b32 s4, s0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX10-NEXT:  .LBB4_4: ; %Flow
-; GFX10-NEXT:    s_and_b32 vcc_lo, exec_lo, s13
+; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x10
+; GFX10-NEXT:    s_and_b32 vcc_lo, exec_lo, s9
 ; GFX10-NEXT:    s_cbranch_vccz .LBB4_6
 ; GFX10-NEXT:  ; %bb.5: ; %.19
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s0
 ; GFX10-NEXT:    v_or_b32_e32 v3, 2, v1
 ; GFX10-NEXT:  .LBB4_6: ; %.22
 ; GFX10-NEXT:    v_add_lshl_u32 v0, v0, s1, 2
-; GFX10-NEXT:    buffer_store_dword v3, v0, s[8:11], 0 offen
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    buffer_store_dword v3, v0, s[4:7], 0 offen
 ; GFX10-NEXT:    s_endpgm
 .entry:
   %.0 = call i64 @llvm.amdgcn.s.getpc()



More information about the llvm-commits mailing list