[llvm] AMDGPU/GlobalISel: Add regbanklegalize rules for uniform global loads (PR #145884)
Petar Avramovic via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 26 05:58:24 PDT 2025
https://github.com/petar-avramovic created https://github.com/llvm/llvm-project/pull/145884
None
>From d76a4e6cc8202e6a43ae78af5fa98ffe141d1566 Mon Sep 17 00:00:00 2001
From: Petar Avramovic <Petar.Avramovic at amd.com>
Date: Thu, 26 Jun 2025 13:02:03 +0200
Subject: [PATCH] AMDGPU/GlobalISel: Add regbanklegalize rules for uniform
global loads
---
.../AMDGPU/AMDGPURegBankLegalizeRules.cpp | 3 +
.../CodeGen/AMDGPU/GlobalISel/load-uniform.ll | 95 +++++++++++++++++++
2 files changed, 98 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/load-uniform.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
index a60855cc4f2d6..41d24c8797426 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
@@ -671,6 +671,9 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST,
.Any({{{UniB256, UniP1}, isAlign4 && isUL}, {{SgprB256}, {SgprP1}}})
.Any({{{UniB512, UniP1}, isAlign4 && isUL}, {{SgprB512}, {SgprP1}}})
.Any({{{UniB32, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB32}, {SgprP1}}})
+ .Any({{{UniB64, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB64}, {SgprP1}}})
+ .Any({{{UniB96, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB96}, {SgprP1}}})
+ .Any({{{UniB128, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB128}, {SgprP1}}})
.Any({{{UniB256, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB256}, {VgprP1}, SplitLoad}})
.Any({{{UniB512, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB512}, {VgprP1}, SplitLoad}})
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-uniform.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-uniform.ll
new file mode 100644
index 0000000000000..d7dbde3a2bb00
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-uniform.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck %s
+
+define amdgpu_ps void @uniform_load_32(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1, ptr addrspace(1) inreg %ptr2) {
+; CHECK-LABEL: uniform_load_32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: global_load_dword v1, v0, s[0:1] glc dlc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_load_dword v2, v0, s[2:3] glc dlc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_readfirstlane_b32 s0, v1
+; CHECK-NEXT: v_readfirstlane_b32 s1, v2
+; CHECK-NEXT: s_add_i32 s0, s0, s1
+; CHECK-NEXT: v_mov_b32_e32 v1, s0
+; CHECK-NEXT: global_store_dword v0, v1, s[4:5]
+; CHECK-NEXT: s_endpgm
+ %load0 = load volatile i32, ptr addrspace(1) %ptr0
+ %load1 = load volatile i32, ptr addrspace(1) %ptr1
+ %sum = add i32 %load0, %load1
+ store i32 %sum, ptr addrspace(1) %ptr2
+ ret void
+}
+
+define amdgpu_ps void @uniform_load_64(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
+; CHECK-LABEL: uniform_load_64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_mov_b32_e32 v2, 0
+; CHECK-NEXT: global_load_dwordx2 v[0:1], v2, s[0:1] glc dlc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_readfirstlane_b32 s0, v0
+; CHECK-NEXT: v_readfirstlane_b32 s1, v1
+; CHECK-NEXT: s_add_i32 s0, s0, s1
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: global_store_dword v2, v0, s[2:3]
+; CHECK-NEXT: s_endpgm
+ %load = load volatile <2 x i32>, ptr addrspace(1) %ptr0
+ %elt0 = extractelement <2 x i32> %load, i32 0
+ %elt1 = extractelement <2 x i32> %load, i32 1
+ %sum = add i32 %elt0, %elt1
+ store i32 %sum, ptr addrspace(1) %ptr1
+ ret void
+}
+
+define amdgpu_ps void @uniform_load_96(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
+; CHECK-LABEL: uniform_load_96:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_mov_b32_e32 v3, 0
+; CHECK-NEXT: global_load_dwordx3 v[0:2], v3, s[0:1] glc dlc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_readfirstlane_b32 s0, v0
+; CHECK-NEXT: v_readfirstlane_b32 s1, v1
+; CHECK-NEXT: v_readfirstlane_b32 s4, v2
+; CHECK-NEXT: s_add_i32 s0, s0, s1
+; CHECK-NEXT: s_add_i32 s0, s0, s4
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: global_store_dword v3, v0, s[2:3]
+; CHECK-NEXT: s_endpgm
+ %load = load volatile <3 x i32>, ptr addrspace(1) %ptr0
+ %elt0 = extractelement <3 x i32> %load, i32 0
+ %elt1 = extractelement <3 x i32> %load, i32 1
+ %elt2 = extractelement <3 x i32> %load, i32 2
+ %sum0 = add i32 %elt0, %elt1
+ %sum = add i32 %sum0, %elt2
+ store i32 %sum, ptr addrspace(1) %ptr1
+ ret void
+}
+
+define amdgpu_ps void @uniform_load_128(ptr addrspace(1) inreg %ptr0, ptr addrspace(1) inreg %ptr1) {
+; CHECK-LABEL: uniform_load_128:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: v_mov_b32_e32 v4, 0
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v4, s[0:1] glc dlc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_readfirstlane_b32 s0, v0
+; CHECK-NEXT: v_readfirstlane_b32 s1, v1
+; CHECK-NEXT: v_readfirstlane_b32 s4, v2
+; CHECK-NEXT: v_readfirstlane_b32 s5, v3
+; CHECK-NEXT: s_add_i32 s0, s0, s1
+; CHECK-NEXT: s_add_i32 s0, s0, s4
+; CHECK-NEXT: s_add_i32 s0, s0, s5
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: global_store_dword v4, v0, s[2:3]
+; CHECK-NEXT: s_endpgm
+ %load = load volatile <4 x i32>, ptr addrspace(1) %ptr0
+ %elt0 = extractelement <4 x i32> %load, i32 0
+ %elt1 = extractelement <4 x i32> %load, i32 1
+ %elt2 = extractelement <4 x i32> %load, i32 2
+ %elt3 = extractelement <4 x i32> %load, i32 3
+ %sum0 = add i32 %elt0, %elt1
+ %sum1 = add i32 %sum0, %elt2
+ %sum = add i32 %sum1, %elt3
+ store i32 %sum, ptr addrspace(1) %ptr1
+ ret void
+}
More information about the llvm-commits
mailing list