[llvm-branch-commits] [llvm] AMDGPU: Handle folding frame indexes into add with immediate (PR #110738)
Matt Arsenault via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Oct 1 13:41:33 PDT 2024
https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/110738
Frame index materialization can fold the constant offset into
adds with immediates. The mubuf expansion is more complicated because
we have to also insert the shift, so restrict this to one use for now.
This is preparation to avoid regressions in a future patch.
This also misses some cases due to visitation order. It depends on
the immediate already folding into the instruction.
>From fccd15f8530a1d685046e2f7ee248677380f0749 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 1 Oct 2024 23:53:51 +0400
Subject: [PATCH] AMDGPU: Handle folding frame indexes into add with immediate
Frame index materialization can fold the constant offset into
adds with immediates. The mubuf expansion is more complicated because
we have to also insert the shift, so restrict this to one use for now.
This is preparation to avoid regressions in a future patch.
This also misses some cases due to visitation order. It depends on
the immediate already folding into the instruction.
---
llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 14 ++++++++++++++
llvm/test/CodeGen/AMDGPU/flat-scratch.ll | 6 ++----
.../AMDGPU/fold-operands-frame-index.gfx10.mir | 3 +--
.../AMDGPU/fold-operands-frame-index.mir | 18 ++++++------------
.../materialize-frame-index-sgpr.gfx10.ll | 6 +++---
.../AMDGPU/materialize-frame-index-sgpr.ll | 12 ++++++------
6 files changed, 32 insertions(+), 27 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 1e2c77b08b9a63..fea84247f5ad88 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -194,6 +194,20 @@ bool SIFoldOperandsImpl::frameIndexMayFold(
return false;
const unsigned Opc = UseMI.getOpcode();
+ switch (Opc) {
+ case AMDGPU::S_ADD_I32:
+ case AMDGPU::V_ADD_U32_e32:
+ case AMDGPU::V_ADD_CO_U32_e32:
+ // TODO: Handle e64 variants
+ // TODO: Possibly relax hasOneUse. It matters more for mubuf, since we have
+ // to insert the wave size shift at every point we use the index.
+ // TODO: Fix depending on visit order to fold immediates into the operand
+ return UseMI.getOperand(OpNo == 1 ? 2 : 1).isImm() &&
+ MRI->hasOneNonDBGUse(UseMI.getOperand(OpNo).getReg());
+ default:
+ break;
+ }
+
if (TII->isMUBUF(UseMI))
return OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr);
if (!TII->isFLATScratch(UseMI))
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index ef9590b3fd33fa..af0b6360527016 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -4705,8 +4705,7 @@ define amdgpu_ps void @large_offset() {
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s0
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s1
; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: s_movk_i32 s0, 0x810
-; GFX10-NEXT: s_addk_i32 s0, 0x3c0
+; GFX10-NEXT: s_movk_i32 s0, 0xbd0
; GFX10-NEXT: v_mov_b32_e32 v1, v0
; GFX10-NEXT: v_mov_b32_e32 v2, v0
; GFX10-NEXT: v_mov_b32_e32 v3, v0
@@ -4823,8 +4822,7 @@ define amdgpu_ps void @large_offset() {
; GFX10-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2
; GFX10-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3
; GFX10-PAL-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-PAL-NEXT: s_movk_i32 s0, 0x810
-; GFX10-PAL-NEXT: s_addk_i32 s0, 0x3c0
+; GFX10-PAL-NEXT: s_movk_i32 s0, 0xbd0
; GFX10-PAL-NEXT: v_mov_b32_e32 v1, v0
; GFX10-PAL-NEXT: v_mov_b32_e32 v2, v0
; GFX10-PAL-NEXT: v_mov_b32_e32 v3, v0
diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.gfx10.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.gfx10.mir
index 76183ece264ffc..d26d7dd4a98d9e 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.gfx10.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.gfx10.mir
@@ -13,8 +13,7 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__v_add_u32_e32__const_v_fi
- ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
- ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, [[V_MOV_B32_e32_]], implicit $exec
+ ; CHECK: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, %stack.0, implicit $exec
; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_U32_e32_]]
; CHECK-NEXT: SI_RETURN implicit $vgpr0
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
index da094745ec3d47..3169d31548b206 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
@@ -14,8 +14,7 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_const
- ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_MOV_B32_]], 128, implicit-def $scc
+ ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, 128, implicit-def $scc
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
; CHECK-NEXT: SI_RETURN implicit $sgpr4
%0:sreg_32 = S_MOV_B32 %stack.0
@@ -35,8 +34,7 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__s_add_i32__const_fi
- ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 128, [[S_MOV_B32_]], implicit-def $scc
+ ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 128, %stack.0, implicit-def $scc
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
; CHECK-NEXT: SI_RETURN implicit $sgpr4
%0:sreg_32 = S_MOV_B32 %stack.0
@@ -56,8 +54,7 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__s_add_i32__materializedconst_fi
- ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, [[S_MOV_B32_]], implicit-def $scc
+ ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, %stack.0, implicit-def $scc
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
; CHECK-NEXT: SI_RETURN implicit $sgpr4
%0:sreg_32 = S_MOV_B32 256
@@ -101,8 +98,7 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_materializedconst_1
- ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, [[S_MOV_B32_]], implicit-def $scc
+ ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, %stack.0, implicit-def $scc
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
; CHECK-NEXT: SI_RETURN implicit $sgpr4
%0:sreg_32 = S_MOV_B32 256
@@ -173,8 +169,7 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__v_add_u32_e32__const_v_fi
- ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
- ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, [[V_MOV_B32_e32_]], implicit $exec
+ ; CHECK: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, %stack.0, implicit $exec
; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e32_]]
; CHECK-NEXT: SI_RETURN implicit $sgpr4
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
@@ -217,8 +212,7 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e32__const_v_fi
- ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
- ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 128, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
+ ; CHECK: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 128, %stack.0, implicit-def $vcc, implicit $exec
; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e32_]]
; CHECK-NEXT: SI_RETURN implicit $vgpr0
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll b/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll
index 8a789a4c6cda9b..7708c0e4767cf2 100644
--- a/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll
+++ b/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.gfx10.ll
@@ -1550,10 +1550,10 @@ define void @scalar_mov_materializes_frame_index_unavailable_scc__gep_immoffset(
; GFX8-NEXT: s_add_i32 s6, s32, 0x201000
; GFX8-NEXT: buffer_store_dword v2, off, s[0:3], s6 ; 4-byte Folded Spill
; GFX8-NEXT: s_mov_b64 exec, s[4:5]
-; GFX8-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; GFX8-NEXT: v_lshrrev_b32_e64 v1, 6, s32
; GFX8-NEXT: s_movk_i32 vcc_lo, 0x4040
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, vcc_lo, v0
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0x3ec, v0
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, vcc_lo, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0x3ec, v1
; GFX8-NEXT: v_writelane_b32 v2, s59, 0
; GFX8-NEXT: v_lshrrev_b32_e64 v1, 6, s32
; GFX8-NEXT: v_readfirstlane_b32 s59, v0
diff --git a/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll b/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll
index e9cd94620a6b9a..54b3f97d456820 100644
--- a/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/materialize-frame-index-sgpr.ll
@@ -1582,12 +1582,12 @@ define void @scalar_mov_materializes_frame_index_no_live_scc_no_live_sgprs_gep_i
; GFX7-NEXT: buffer_store_dword v15, v16, s[0:3], s32 offen offset:60 ; 4-byte Folded Spill
; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32
; GFX7-NEXT: ; implicit-def: $vgpr22 : SGPR spill to VGPR lane
-; GFX7-NEXT: v_lshr_b32_e64 v0, s32, 6
+; GFX7-NEXT: v_lshr_b32_e64 v1, s32, 6
; GFX7-NEXT: v_writelane_b32 v22, vcc_lo, 0
; GFX7-NEXT: v_writelane_b32 v22, vcc_hi, 1
; GFX7-NEXT: s_movk_i32 vcc_lo, 0x4040
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, vcc_lo, v0
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 0x200, v0
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, vcc_lo, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 0x200, v1
; GFX7-NEXT: v_writelane_b32 v23, s59, 27
; GFX7-NEXT: v_readfirstlane_b32 s59, v0
; GFX7-NEXT: s_and_b64 vcc, 0, exec
@@ -1723,12 +1723,12 @@ define void @scalar_mov_materializes_frame_index_no_live_scc_no_live_sgprs_gep_i
; GFX8-NEXT: buffer_store_dword v15, v16, s[0:3], s32 offen offset:60 ; 4-byte Folded Spill
; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32
; GFX8-NEXT: ; implicit-def: $vgpr22 : SGPR spill to VGPR lane
-; GFX8-NEXT: v_lshrrev_b32_e64 v0, 6, s32
+; GFX8-NEXT: v_lshrrev_b32_e64 v1, 6, s32
; GFX8-NEXT: v_writelane_b32 v22, vcc_lo, 0
; GFX8-NEXT: v_writelane_b32 v22, vcc_hi, 1
; GFX8-NEXT: s_movk_i32 vcc_lo, 0x4040
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, vcc_lo, v0
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0x200, v0
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, vcc_lo, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0x200, v1
; GFX8-NEXT: v_writelane_b32 v23, s59, 27
; GFX8-NEXT: v_readfirstlane_b32 s59, v0
; GFX8-NEXT: s_and_b64 vcc, 0, exec
More information about the llvm-branch-commits
mailing list