[llvm] [AMDGPU] Do not fold COPY with implicit operands (PR #136003)
Mariusz Sikora via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 17 23:05:03 PDT 2025
https://github.com/mariusz-sikora-at-amd updated https://github.com/llvm/llvm-project/pull/136003
>From c126d06e65281111b7065a7d3010da6432b9afcc Mon Sep 17 00:00:00 2001
From: Mariusz Sikora <mariusz.sikora at amd.com>
Date: Wed, 16 Apr 2025 17:27:13 +0200
Subject: [PATCH 1/5] [AMDGPU] Do not fold COPY with implicit use of exec
Folding may remove COPY from inside of the divergent loop.
---
llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 3 +-
llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir | 36 +++
.../AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll | 5 +-
llvm/test/CodeGen/AMDGPU/mfma-loop.ll | 216 ++++++++----------
llvm/test/CodeGen/AMDGPU/mul.ll | 6 +-
5 files changed, 140 insertions(+), 126 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index d6acf9e081b9f..542a992ccb96e 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1092,7 +1092,8 @@ void SIFoldOperandsImpl::foldOperand(
} else {
if (UseMI->isCopy() && OpToFold.isReg() &&
UseMI->getOperand(0).getReg().isVirtual() &&
- !UseMI->getOperand(1).getSubReg()) {
+ !UseMI->getOperand(1).getSubReg() &&
+ !OpToFold.getParent()->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
LLVM_DEBUG(dbgs() << "Folding " << OpToFold << "\n into " << *UseMI);
unsigned Size = TII->getOpSize(*UseMI, 1);
Register UseReg = OpToFold.getReg();
diff --git a/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir b/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
new file mode 100644
index 0000000000000..ef4b27d169224
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
@@ -0,0 +1,36 @@
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s
+
+---
+liveins:
+name: do_not_fold_copy_with_implicit_exec
+tracksRegLiveness: true
+body: |
+ ; CHECK: bb.0:
+ ; CHECK: bb.1:
+ ; CHECK: %[[A:[0-9]*]]:sreg_32 = S_ADD_I32
+ ; CHECK: COPY %[[A]]
+ ; CHECKL SI_LOOP
+ ; CHECK: bb.2:
+
+ bb.0:
+ %0:sreg_64 = S_MOV_B64 0
+ %1:sreg_64 = S_MOV_B64 0
+ %2:sreg_32 = S_MOV_B32 0, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.1:
+ %3:sreg_64 = PHI %1, %bb.0, %4, %bb.1
+ %5:sreg_32 = PHI %2, %bb.0, %6, %bb.1
+ %6:sreg_32 = S_ADD_I32 %5, 1, implicit-def dead $scc
+ %4:sreg_64 = SI_IF_BREAK %0, %3, implicit-def dead $scc
+ %7:vgpr_32 = COPY %6, implicit $exec
+ SI_LOOP %4, %bb.1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.2
+
+ bb.2:
+ SI_END_CF %4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ %9:vgpr_32 = COPY %7
+ %10:sreg_64_xexec = IMPLICIT_DEF
+ %11:vgpr_32 = V_SET_INACTIVE_B32 0, %9, 0, 0, killed %10, implicit $exec
+ S_ENDPGM 0
+...
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
index 1e2bf8256321d..c2f8c2c44316a 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
@@ -429,13 +429,14 @@ define amdgpu_cs_chain void @control_flow(<3 x i32> inreg %sgpr, ptr inreg %call
; DAGISEL12-NEXT: v_cmp_ne_u32_e64 s9, 0, v0
; DAGISEL12-NEXT: s_mov_b32 exec_lo, s8
; DAGISEL12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v13, v1
+; DAGISEL12-NEXT: v_mov_b32_e32 v11, s9
; DAGISEL12-NEXT: s_or_b32 s4, vcc_lo, s4
; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
; DAGISEL12-NEXT: s_cbranch_execnz .LBB3_2
; DAGISEL12-NEXT: ; %bb.3: ; %tail.loopexit
; DAGISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; DAGISEL12-NEXT: v_dual_mov_b32 v11, s9 :: v_dual_add_nc_u32 v10, 42, v1
+; DAGISEL12-NEXT: v_add_nc_u32_e32 v10, 42, v1
; DAGISEL12-NEXT: .LBB3_4: ; %Flow1
; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s3
@@ -526,13 +527,13 @@ define amdgpu_cs_chain void @control_flow(<3 x i32> inreg %sgpr, ptr inreg %call
; DAGISEL10-NEXT: v_cmp_ne_u32_e64 s9, 0, v0
; DAGISEL10-NEXT: s_mov_b32 exec_lo, s8
; DAGISEL10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v13, v1
+; DAGISEL10-NEXT: v_mov_b32_e32 v11, s9
; DAGISEL10-NEXT: s_or_b32 s4, vcc_lo, s4
; DAGISEL10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; DAGISEL10-NEXT: s_cbranch_execnz .LBB3_2
; DAGISEL10-NEXT: ; %bb.3: ; %tail.loopexit
; DAGISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s4
; DAGISEL10-NEXT: v_add_nc_u32_e32 v10, 42, v1
-; DAGISEL10-NEXT: v_mov_b32_e32 v11, s9
; DAGISEL10-NEXT: .LBB3_4: ; %Flow1
; DAGISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s3
; DAGISEL10-NEXT: s_mov_b32 s3, exec_lo
diff --git a/llvm/test/CodeGen/AMDGPU/mfma-loop.ll b/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
index d0042bb692402..a0d587ac68ff1 100644
--- a/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
@@ -1425,41 +1425,42 @@ define amdgpu_kernel void @test_mfma_loop_sgpr_init(ptr addrspace(1) %arg, float
; GFX90A: ; %bb.0: ; %entry
; GFX90A-NEXT: s_load_dword s1, s[4:5], 0x2c
; GFX90A-NEXT: s_mov_b32 s0, 16
-; GFX90A-NEXT: v_mov_b32_e32 v0, 2.0
; GFX90A-NEXT: v_mov_b32_e32 v1, 1.0
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_accvgpr_write_b32 a31, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a30, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a29, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a28, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a27, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a26, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a25, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a24, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a23, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a22, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a21, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a20, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a19, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a18, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a17, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a16, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a15, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a14, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a13, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a12, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a11, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a10, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a9, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a8, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a7, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a6, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a5, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a4, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a3, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a2, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a1, s1
-; GFX90A-NEXT: v_accvgpr_write_b32 a0, s1
+; GFX90A-NEXT: v_mov_b32_e32 v0, s1
+; GFX90A-NEXT: v_accvgpr_write_b32 a31, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a30, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a29, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a28, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a27, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a26, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a25, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a24, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a23, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a22, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a21, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a20, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a19, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a18, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a17, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a16, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a15, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a14, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a13, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a12, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a11, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a10, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a9, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a8, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a7, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a6, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a5, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a4, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a3, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a2, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a1, v0
+; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
+; GFX90A-NEXT: v_mov_b32_e32 v0, 2.0
; GFX90A-NEXT: .LBB5_1: ; %for.cond.preheader
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_nop 1
@@ -1487,41 +1488,42 @@ define amdgpu_kernel void @test_mfma_loop_sgpr_init(ptr addrspace(1) %arg, float
; GFX942: ; %bb.0: ; %entry
; GFX942-NEXT: s_load_dword s1, s[4:5], 0x2c
; GFX942-NEXT: s_mov_b32 s0, 16
-; GFX942-NEXT: v_mov_b32_e32 v0, 2.0
; GFX942-NEXT: v_mov_b32_e32 v1, 1.0
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_accvgpr_write_b32 a31, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a30, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a29, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a28, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a27, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a26, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a25, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a24, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a23, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a22, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a21, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a20, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a19, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a18, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a17, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a16, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a15, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a14, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a13, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a12, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a11, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a10, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a9, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a8, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a7, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a6, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a5, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a4, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a3, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a2, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a1, s1
-; GFX942-NEXT: v_accvgpr_write_b32 a0, s1
+; GFX942-NEXT: v_mov_b32_e32 v0, s1
+; GFX942-NEXT: v_accvgpr_write_b32 a31, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a30, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a29, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a28, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a27, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a26, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a25, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a24, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a23, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a22, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a21, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a20, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a19, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a18, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a17, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a16, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a15, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a14, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a13, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a12, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a11, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a10, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a9, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a8, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a7, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a6, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a5, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a4, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a3, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a2, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a1, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a0, v0
+; GFX942-NEXT: v_mov_b32_e32 v0, 2.0
; GFX942-NEXT: .LBB5_1: ; %for.cond.preheader
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_nop 1
@@ -1696,6 +1698,8 @@ define amdgpu_kernel void @test_mfma_loop_mixed_init(ptr addrspace(1) %arg, floa
; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
; GFX90A-NEXT: v_accvgpr_write_b32 a31, 0
; GFX90A-NEXT: v_accvgpr_write_b32 a30, 0
+; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
+; GFX90A-NEXT: v_mov_b32_e32 v0, s1
; GFX90A-NEXT: v_accvgpr_write_b32 a29, 0
; GFX90A-NEXT: v_accvgpr_write_b32 a28, 0
; GFX90A-NEXT: v_accvgpr_write_b32 a27, 0
@@ -1725,8 +1729,7 @@ define amdgpu_kernel void @test_mfma_loop_mixed_init(ptr addrspace(1) %arg, floa
; GFX90A-NEXT: v_accvgpr_write_b32 a3, 0
; GFX90A-NEXT: v_accvgpr_write_b32 a2, 0
; GFX90A-NEXT: s_mov_b32 s0, 16
-; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_accvgpr_write_b32 a1, s1
+; GFX90A-NEXT: v_accvgpr_write_b32 a1, v0
; GFX90A-NEXT: v_mov_b32_e32 v0, 2.0
; GFX90A-NEXT: v_mov_b32_e32 v1, 1.0
; GFX90A-NEXT: .LBB6_1: ; %for.cond.preheader
@@ -1759,6 +1762,8 @@ define amdgpu_kernel void @test_mfma_loop_mixed_init(ptr addrspace(1) %arg, floa
; GFX942-NEXT: v_accvgpr_write_b32 a0, v0
; GFX942-NEXT: v_accvgpr_write_b32 a31, 0
; GFX942-NEXT: v_accvgpr_write_b32 a30, 0
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: v_mov_b32_e32 v0, s1
; GFX942-NEXT: v_accvgpr_write_b32 a29, 0
; GFX942-NEXT: v_accvgpr_write_b32 a28, 0
; GFX942-NEXT: v_accvgpr_write_b32 a27, 0
@@ -1788,8 +1793,7 @@ define amdgpu_kernel void @test_mfma_loop_mixed_init(ptr addrspace(1) %arg, floa
; GFX942-NEXT: v_accvgpr_write_b32 a3, 0
; GFX942-NEXT: v_accvgpr_write_b32 a2, 0
; GFX942-NEXT: s_mov_b32 s0, 16
-; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_accvgpr_write_b32 a1, s1
+; GFX942-NEXT: v_accvgpr_write_b32 a1, v0
; GFX942-NEXT: v_mov_b32_e32 v0, 2.0
; GFX942-NEXT: v_mov_b32_e32 v1, 1.0
; GFX942-NEXT: .LBB6_1: ; %for.cond.preheader
@@ -2050,66 +2054,38 @@ define amdgpu_kernel void @test_mfma_loop_agpr_init(ptr addrspace(1) %arg) #0 {
; GFX908-NEXT: s_nop 7
; GFX908-NEXT: s_nop 1
; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
+; GFX908-NEXT: s_nop 1
+; GFX908-NEXT: v_accvgpr_write_b32 a0, v2
; GFX908-NEXT: v_accvgpr_write_b32 a1, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a2, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a3, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a2, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a3, v2
; GFX908-NEXT: v_accvgpr_write_b32 a4, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a5, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a6, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a5, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a6, v2
; GFX908-NEXT: v_accvgpr_write_b32 a7, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a8, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a9, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a8, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a9, v2
; GFX908-NEXT: v_accvgpr_write_b32 a10, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a11, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a12, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a11, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a12, v2
; GFX908-NEXT: v_accvgpr_write_b32 a13, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a14, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a15, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a14, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a15, v2
; GFX908-NEXT: v_accvgpr_write_b32 a16, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a17, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a18, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a17, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a18, v2
; GFX908-NEXT: v_accvgpr_write_b32 a19, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a20, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a21, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a20, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a21, v2
; GFX908-NEXT: v_accvgpr_write_b32 a22, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a23, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a24, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a23, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a24, v2
; GFX908-NEXT: v_accvgpr_write_b32 a25, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a26, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a27, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a26, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a27, v2
; GFX908-NEXT: v_accvgpr_write_b32 a28, v2
-; GFX908-NEXT: v_accvgpr_read_b32 v3, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v33, a0
-; GFX908-NEXT: v_accvgpr_read_b32 v2, a0
-; GFX908-NEXT: v_accvgpr_write_b32 a29, v3
-; GFX908-NEXT: v_accvgpr_write_b32 a30, v33
+; GFX908-NEXT: v_accvgpr_write_b32 a29, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a30, v2
; GFX908-NEXT: v_accvgpr_write_b32 a31, v2
; GFX908-NEXT: .LBB8_1: ; %for.cond.preheader
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index 896f48a9215b9..0f47a31f52dcb 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -2619,13 +2619,13 @@ define amdgpu_kernel void @s_mul_i128(ptr addrspace(1) %out, [8 x i32], i128 %a,
; SI-NEXT: v_add_i32_e32 v0, vcc, s4, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, s5, v0
; SI-NEXT: s_mul_i32 s5, s14, s9
-; SI-NEXT: s_mul_i32 s4, s12, s10
; SI-NEXT: v_add_i32_e32 v1, vcc, s5, v1
; SI-NEXT: s_mul_i32 s5, s15, s8
; SI-NEXT: v_add_i32_e32 v1, vcc, s5, v1
; SI-NEXT: s_mul_i32 s5, s14, s8
-; SI-NEXT: v_mov_b32_e32 v2, s4
-; SI-NEXT: v_add_i32_e32 v2, vcc, s5, v2
+; SI-NEXT: s_mul_i32 s4, s12, s10
+; SI-NEXT: v_mov_b32_e32 v2, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
; SI-NEXT: v_addc_u32_e32 v0, vcc, v1, v0, vcc
; SI-NEXT: v_mov_b32_e32 v1, s12
; SI-NEXT: v_mul_hi_u32 v5, s8, v1
>From 3bdd03f4541cf86c57ff910f0e7b4ed1e9704059 Mon Sep 17 00:00:00 2001
From: Mariusz Sikora <mariusz.sikora at amd.com>
Date: Thu, 17 Apr 2025 11:16:42 -0400
Subject: [PATCH 2/5] Update test
---
llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir b/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
index ef4b27d169224..fb58e78646bc4 100644
--- a/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=si-fold-operands -o - %s | FileCheck %s
---
liveins:
>From 6076f3a37b242d400cd54bf942fed64bd1ed3e3c Mon Sep 17 00:00:00 2001
From: Mariusz Sikora <mariusz.sikora at amd.com>
Date: Thu, 17 Apr 2025 11:19:05 -0400
Subject: [PATCH 3/5] Disallow folding for copy with any implicit operands
---
llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 542a992ccb96e..e5e7065f68056 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1093,7 +1093,7 @@ void SIFoldOperandsImpl::foldOperand(
if (UseMI->isCopy() && OpToFold.isReg() &&
UseMI->getOperand(0).getReg().isVirtual() &&
!UseMI->getOperand(1).getSubReg() &&
- !OpToFold.getParent()->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
+ OpToFold.getParent()->implicit_operands().empty()) {
LLVM_DEBUG(dbgs() << "Folding " << OpToFold << "\n into " << *UseMI);
unsigned Size = TII->getOpSize(*UseMI, 1);
Register UseReg = OpToFold.getReg();
>From ce92aa5d447494a375a233209fa9683e92977397 Mon Sep 17 00:00:00 2001
From: Mariusz Sikora <mariusz.sikora at amd.com>
Date: Fri, 18 Apr 2025 01:54:34 -0400
Subject: [PATCH 4/5] Fix CHECK line in test
---
llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir b/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
index fb58e78646bc4..72d17e9182691 100644
--- a/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
@@ -9,7 +9,7 @@ body: |
; CHECK: bb.1:
; CHECK: %[[A:[0-9]*]]:sreg_32 = S_ADD_I32
; CHECK: COPY %[[A]]
- ; CHECKL SI_LOOP
+ ; CHECK: SI_LOOP
; CHECK: bb.2:
bb.0:
>From c9a9f10090f1d20b31ac8d32069e2097c4bc1bf6 Mon Sep 17 00:00:00 2001
From: Mariusz Sikora <mariusz.sikora at amd.com>
Date: Fri, 18 Apr 2025 01:55:41 -0400
Subject: [PATCH 5/5] Auto-generate test
---
llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir | 30 +++++++++++++++----
1 file changed, 25 insertions(+), 5 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir b/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
index 72d17e9182691..5c206da8c544f 100644
--- a/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/do-not-fold-copy.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=si-fold-operands -o - %s | FileCheck %s
---
@@ -5,12 +6,31 @@ liveins:
name: do_not_fold_copy_with_implicit_exec
tracksRegLiveness: true
body: |
+ ; CHECK-LABEL: name: do_not_fold_copy_with_implicit_exec
; CHECK: bb.0:
- ; CHECK: bb.1:
- ; CHECK: %[[A:[0-9]*]]:sreg_32 = S_ADD_I32
- ; CHECK: COPY %[[A]]
- ; CHECK: SI_LOOP
- ; CHECK: bb.2:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; CHECK-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:sreg_64 = PHI [[S_MOV_B64_1]], %bb.0, %4, %bb.1
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_]], %bb.0, %6, %bb.1
+ ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[PHI1]], 1, implicit-def dead $scc
+ ; CHECK-NEXT: [[SI_IF_BREAK:%[0-9]+]]:sreg_64 = SI_IF_BREAK [[S_MOV_B64_]], [[PHI]], implicit-def dead $scc
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[S_ADD_I32_]], implicit $exec
+ ; CHECK-NEXT: SI_LOOP [[SI_IF_BREAK]], %bb.1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: SI_END_CF [[SI_IF_BREAK]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
+ ; CHECK-NEXT: [[V_SET_INACTIVE_B32_:%[0-9]+]]:vgpr_32 = V_SET_INACTIVE_B32 0, [[COPY]], 0, 0, killed [[DEF]], implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
bb.0:
%0:sreg_64 = S_MOV_B64 0
More information about the llvm-commits
mailing list