[llvm] [SIFoldOperands] Folding immediate into a copy invalidates candidates in the fold list (PR #148187)

Juan Manuel Martinez CaamaƱo via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 11 05:33:26 PDT 2025


https://github.com/jmmartinez updated https://github.com/llvm/llvm-project/pull/148187

>From 063b053f1db7b7e815d9360ee467e97a5e66cece Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Fri, 11 Jul 2025 12:30:59 +0200
Subject: [PATCH 1/4] Revert "AMDGPU: Try constant fold after folding immediate
 (#141862)"

This reverts commit 80064b6e326d0cf34bac1d09c12fc1e6abecb7af.

The patch triggers a crash when the folded use can have 2 operands in
the fold list.

See https://github.com/llvm/llvm-project/pull/148187 for more info.

SWDEV-542372
---
 llvm/lib/Target/AMDGPU/SIFoldOperands.cpp     |  6 -----
 .../AMDGPU/bit-op-reduce-width-known-bits.ll  |  3 ++-
 .../AMDGPU/constant-fold-imm-immreg.mir       | 22 -------------------
 llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir    |  3 ++-
 .../fold-zero-high-bits-skips-non-reg.mir     |  4 ++--
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            |  7 +++---
 llvm/test/CodeGen/AMDGPU/srem64.ll            |  7 +++---
 llvm/test/CodeGen/AMDGPU/udiv64.ll            |  3 ++-
 llvm/test/CodeGen/AMDGPU/urem64.ll            |  7 +++---
 9 files changed, 20 insertions(+), 42 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 0ed06c37507af..b8fecc382c64a 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1782,12 +1782,6 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
                         << static_cast<int>(Fold.UseOpNo) << " of "
                         << *Fold.UseMI);
-
-      if (Fold.isImm() && tryConstantFoldOp(Fold.UseMI)) {
-        LLVM_DEBUG(dbgs() << "Constant folded " << *Fold.UseMI);
-        Changed = true;
-      }
-
     } else if (Fold.Commuted) {
       // Restoring instruction's original operand order if fold has failed.
       TII->commuteInstruction(*Fold.UseMI, false);
diff --git a/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll b/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
index ad26dfa7f93e8..ac5f9b6b483eb 100644
--- a/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
+++ b/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
@@ -105,8 +105,9 @@ define i64 @v_xor_i64_known_i32_from_range_use_out_of_block(i64 %x) {
 ; CHECK-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; CHECK-NEXT:  ; %bb.1: ; %inc
 ; CHECK-NEXT:    v_not_b32_e32 v2, v4
+; CHECK-NEXT:    v_not_b32_e32 v3, 0
 ; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
 ; CHECK-NEXT:  ; %bb.2: ; %UnifiedReturnBlock
 ; CHECK-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; CHECK-NEXT:    v_mov_b32_e32 v0, v2
diff --git a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
index e7177a5e7160e..fe2b0bb1ff6ae 100644
--- a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
+++ b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -961,25 +961,3 @@ body:             |
     S_ENDPGM 0, implicit %2, implicit %3
 
 ...
-
----
-name:            constant_v_or_b32_uses_subreg_or_0_regression
-tracksRegLiveness: true
-body:             |
-  bb.0:
-  liveins: $vgpr0, $vgpr1
-
-    ; GCN-LABEL: name: constant_v_or_b32_uses_subreg_or_0_regression
-    ; GCN: liveins: $vgpr0, $vgpr1
-    ; GCN-NEXT: {{  $}}
-    ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
-    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY1]]
-  %0:vgpr_32 = COPY $vgpr0
-  %1:vgpr_32 = COPY $vgpr1
-  %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-  %3:vreg_64 = REG_SEQUENCE %2:vgpr_32, %subreg.sub0, %0:vgpr_32, %subreg.sub1
-  %4:vgpr_32 = V_OR_B32_e64 %3.sub0:vreg_64, %1, implicit $exec
-  S_ENDPGM 0, implicit %4
-
-...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
index 74c4a2da50221..d00fd9b967f37 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
@@ -43,7 +43,8 @@ body:             |
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[DEF]], %subreg.sub0, killed [[V_MOV_B32_e32_]], %subreg.sub1
-    ; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
+    ; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 0, [[DEF1]], implicit $exec
+    ; GCN-NEXT: [[V_XOR_B32_e32_1:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
     %0:vgpr_32 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir b/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir
index dc03eb74cbf11..b1aa88969c5bb 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir
@@ -8,8 +8,8 @@ body: |
     ; CHECK-LABEL: name: test_tryFoldZeroHighBits_skips_nonreg
     ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1
-    ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-    ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]]
+    ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, 0, implicit $exec
+    ; CHECK-NEXT: S_NOP 0, implicit [[V_AND_B32_e64_]]
   %0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
   %1:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1
   %2:vgpr_32 = V_AND_B32_e64 65535, %1.sub0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index df496258a2509..15eb41a1a5b65 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -404,11 +404,12 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v4, v10
+; GCN-IR-NEXT:    v_not_b32_e32 v5, v10
 ; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[6:7], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v4, v11
+; GCN-IR-NEXT:    v_not_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v5, v11
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v4, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 47dfa9f4fc2d3..c729c3fb8a4e4 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -380,11 +380,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
+; GCN-IR-NEXT:    v_not_b32_e32 v7, v12
 ; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
+; GCN-IR-NEXT:    v_not_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v13
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v6, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index e9017939f8a4a..5acbb044c1057 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -348,9 +348,10 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v10
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
 ; GCN-IR-NEXT:    v_not_b32_e32 v0, v14
+; GCN-IR-NEXT:    v_not_b32_e32 v1, 0
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v15
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 6480a88d40f5a..94f1b83ea2765 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -355,11 +355,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
+; GCN-IR-NEXT:    v_not_b32_e32 v7, v12
 ; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
+; GCN-IR-NEXT:    v_not_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v13
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v6, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0

>From 43b97d34a1df7eed6d703f0528ceca463664e075 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Fri, 11 Jul 2025 12:35:54 +0200
Subject: [PATCH 2/4] Reapply "AMDGPU: Try constant fold after folding
 immediate (#141862)"

This reverts commit 063b053f1db7b7e815d9360ee467e97a5e66cece.
---
 llvm/lib/Target/AMDGPU/SIFoldOperands.cpp     |  6 +++++
 .../AMDGPU/bit-op-reduce-width-known-bits.ll  |  3 +--
 .../AMDGPU/constant-fold-imm-immreg.mir       | 22 +++++++++++++++++++
 llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir    |  3 +--
 .../fold-zero-high-bits-skips-non-reg.mir     |  4 ++--
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            |  7 +++---
 llvm/test/CodeGen/AMDGPU/srem64.ll            |  7 +++---
 llvm/test/CodeGen/AMDGPU/udiv64.ll            |  3 +--
 llvm/test/CodeGen/AMDGPU/urem64.ll            |  7 +++---
 9 files changed, 42 insertions(+), 20 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index b8fecc382c64a..0ed06c37507af 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1782,6 +1782,12 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
                         << static_cast<int>(Fold.UseOpNo) << " of "
                         << *Fold.UseMI);
+
+      if (Fold.isImm() && tryConstantFoldOp(Fold.UseMI)) {
+        LLVM_DEBUG(dbgs() << "Constant folded " << *Fold.UseMI);
+        Changed = true;
+      }
+
     } else if (Fold.Commuted) {
       // Restoring instruction's original operand order if fold has failed.
       TII->commuteInstruction(*Fold.UseMI, false);
diff --git a/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll b/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
index ac5f9b6b483eb..ad26dfa7f93e8 100644
--- a/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
+++ b/llvm/test/CodeGen/AMDGPU/bit-op-reduce-width-known-bits.ll
@@ -105,9 +105,8 @@ define i64 @v_xor_i64_known_i32_from_range_use_out_of_block(i64 %x) {
 ; CHECK-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; CHECK-NEXT:  ; %bb.1: ; %inc
 ; CHECK-NEXT:    v_not_b32_e32 v2, v4
-; CHECK-NEXT:    v_not_b32_e32 v3, 0
 ; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
 ; CHECK-NEXT:  ; %bb.2: ; %UnifiedReturnBlock
 ; CHECK-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; CHECK-NEXT:    v_mov_b32_e32 v0, v2
diff --git a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
index fe2b0bb1ff6ae..e7177a5e7160e 100644
--- a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
+++ b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -961,3 +961,25 @@ body:             |
     S_ENDPGM 0, implicit %2, implicit %3
 
 ...
+
+---
+name:            constant_v_or_b32_uses_subreg_or_0_regression
+tracksRegLiveness: true
+body:             |
+  bb.0:
+  liveins: $vgpr0, $vgpr1
+
+    ; GCN-LABEL: name: constant_v_or_b32_uses_subreg_or_0_regression
+    ; GCN: liveins: $vgpr0, $vgpr1
+    ; GCN-NEXT: {{  $}}
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+    ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY1]]
+  %0:vgpr_32 = COPY $vgpr0
+  %1:vgpr_32 = COPY $vgpr1
+  %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+  %3:vreg_64 = REG_SEQUENCE %2:vgpr_32, %subreg.sub0, %0:vgpr_32, %subreg.sub1
+  %4:vgpr_32 = V_OR_B32_e64 %3.sub0:vreg_64, %1, implicit $exec
+  S_ENDPGM 0, implicit %4
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
index d00fd9b967f37..74c4a2da50221 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
@@ -43,8 +43,7 @@ body:             |
     ; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
     ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     ; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[DEF]], %subreg.sub0, killed [[V_MOV_B32_e32_]], %subreg.sub1
-    ; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 0, [[DEF1]], implicit $exec
-    ; GCN-NEXT: [[V_XOR_B32_e32_1:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
+    ; GCN-NEXT: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[DEF2]], [[REG_SEQUENCE]].sub0, implicit $exec
     %0:vgpr_32 = IMPLICIT_DEF
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = IMPLICIT_DEF
diff --git a/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir b/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir
index b1aa88969c5bb..dc03eb74cbf11 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-zero-high-bits-skips-non-reg.mir
@@ -8,8 +8,8 @@ body: |
     ; CHECK-LABEL: name: test_tryFoldZeroHighBits_skips_nonreg
     ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1
-    ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, 0, implicit $exec
-    ; CHECK-NEXT: S_NOP 0, implicit [[V_AND_B32_e64_]]
+    ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    ; CHECK-NEXT: S_NOP 0, implicit [[V_MOV_B32_e32_1]]
   %0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
   %1:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %0, %subreg.sub1
   %2:vgpr_32 = V_AND_B32_e64 65535, %1.sub0, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 15eb41a1a5b65..df496258a2509 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -404,12 +404,11 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v5, v10
+; GCN-IR-NEXT:    v_not_b32_e32 v4, v10
 ; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[6:7], v8
-; GCN-IR-NEXT:    v_not_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v5, v11
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v4, v11
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v4, vcc
+; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index c729c3fb8a4e4..47dfa9f4fc2d3 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -380,12 +380,11 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v7, v12
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
 ; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_not_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v13
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 5acbb044c1057..e9017939f8a4a 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -348,10 +348,9 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v10
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
 ; GCN-IR-NEXT:    v_not_b32_e32 v0, v14
-; GCN-IR-NEXT:    v_not_b32_e32 v1, 0
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v15
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 94f1b83ea2765..6480a88d40f5a 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -355,12 +355,11 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v7, v12
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
 ; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_not_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v13
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0

>From b55913d891d4e7e1aebe0eca09ce9fd3f787fc3e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Fri, 11 Jul 2025 11:47:56 +0200
Subject: [PATCH 3/4] Pre-Commit test: SWDEV-542372: crash in si-fold-operands

---
 .../AMDGPU/si-fold-operands-swdev-542372.ll   | 39 +++++++++++++++++++
 1 file changed, 39 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll

diff --git a/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll b/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll
new file mode 100644
index 0000000000000..35d70f20bffb2
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll
@@ -0,0 +1,39 @@
+; RUN: not --crash llc -O3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s -o /dev/null
+
+ at lds = internal addrspace(3) global [5 x i32] poison
+
+define amdgpu_kernel void @kernel() {
+entry:
+  %load.lds.0 = load <2 x i32>, ptr addrspace(3) @lds
+  %vecext.i55 = extractelement <2 x i32> %load.lds.0, i64 0
+  %cmp3.i57 = icmp eq i32 %vecext.i55, 2
+  store i32 0, ptr addrspace(3) @lds
+  br i1 %cmp3.i57, label %land.rhs49, label %land.end59
+
+land.rhs49:                                       ; preds = %entry
+  %load.lds.1 = load <2 x i32>, ptr addrspace(3) @lds
+  %vecext.i67 = extractelement <2 x i32> %load.lds.1, i64 0
+  %cmp3.i69 = icmp eq i32 %vecext.i67, 1
+  br i1 %cmp3.i69, label %land.rhs57, label %land.end59
+
+land.rhs57:                                       ; preds = %land.rhs49
+  %rem.i.i.i = srem <2 x i32> %load.lds.0, %load.lds.1
+  %ref.tmp.sroa.0.0.vec.extract.i.i = extractelement <2 x i32> %rem.i.i.i, i64 0
+  store i32 %ref.tmp.sroa.0.0.vec.extract.i.i, ptr addrspace(3) @lds
+  store i32 %ref.tmp.sroa.0.0.vec.extract.i.i, ptr addrspace(3) getelementptr inbounds nuw (i8, ptr addrspace(3) @lds, i32 4)
+  %load.lds.2 = load <2 x i32>, ptr addrspace(3) @lds
+  %vecext.i.i.i = extractelement <2 x i32> %load.lds.2, i64 0
+  %cmp3.i.i.i = icmp ne i32 %vecext.i.i.i, 0
+  %vecext.1.i.i.i = extractelement <2 x i32> %load.lds.2, i64 1
+  %cmp3.1.i.i.i = icmp ne i32 %vecext.1.i.i.i, 0
+  %.not.i.i = select i1 %cmp3.i.i.i, i1 true, i1 %cmp3.1.i.i.i
+  br i1 %.not.i.i, label %land.end59, label %if.end.i
+
+if.end.i:                                         ; preds = %land.rhs57
+  %and.i.i.i = and <2 x i32> %load.lds.2, splat (i32 1)
+  %ref.tmp.sroa.0.0.vec.extract.i20.i = extractelement <2 x i32> %and.i.i.i, i64 0
+  br label %land.end59
+
+land.end59:                                       ; preds = %if.end.i, %land.rhs57, %land.rhs49, %entry
+  ret void
+}

>From 1da5c5178c7f07dc0d117b891fa65c0145b64e8c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Juan=20Manuel=20Martinez=20Caama=C3=B1o?= <juamarti at amd.com>
Date: Fri, 11 Jul 2025 11:48:37 +0200
Subject: [PATCH 4/4] [SIFoldOperands] Folding immediate into a copy
 invalidates candidates in the fold list

Fixes SWDEV-542372
---
 llvm/lib/Target/AMDGPU/SIFoldOperands.cpp     | 10 ++++++--
 .../AMDGPU/si-fold-operands-swdev-542372.ll   | 25 ++++++++++++++++++-
 2 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 0ed06c37507af..2675e4e9d02e4 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1761,7 +1761,9 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
   for (MachineInstr *Copy : CopiesToReplace)
     Copy->addImplicitDefUseOperands(*MF);
 
-  for (FoldCandidate &Fold : FoldList) {
+  for (auto FoldIt = FoldList.begin(), End = FoldList.end(); FoldIt != End;
+       ++FoldIt) {
+    FoldCandidate &Fold = *FoldIt;
     assert(!Fold.isReg() || Fold.Def.OpToFold);
     if (Fold.isReg() && Fold.getReg().isVirtual()) {
       Register Reg = Fold.getReg();
@@ -1785,9 +1787,13 @@ bool SIFoldOperandsImpl::foldInstOperand(MachineInstr &MI,
 
       if (Fold.isImm() && tryConstantFoldOp(Fold.UseMI)) {
         LLVM_DEBUG(dbgs() << "Constant folded " << *Fold.UseMI);
+        // The instruction was folded into a copy, we have to skip any other
+        // occurence of UseMI in the fold list
+        End = std::remove_if(FoldIt + 1, End, [&](const FoldCandidate &F) {
+          return F.UseMI == Fold.UseMI;
+        });
         Changed = true;
       }
-
     } else if (Fold.Commuted) {
       // Restoring instruction's original operand order if fold has failed.
       TII->commuteInstruction(*Fold.UseMI, false);
diff --git a/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll b/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll
index 35d70f20bffb2..cc88bd2dd3b36 100644
--- a/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-fold-operands-swdev-542372.ll
@@ -1,8 +1,31 @@
-; RUN: not --crash llc -O3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s -o /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -O3 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s -o - | FileCheck %s
 
 @lds = internal addrspace(3) global [5 x i32] poison
 
 define amdgpu_kernel void @kernel() {
+; CHECK-LABEL: kernel:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    ds_read_b64 v[2:3], v0
+; CHECK-NEXT:    ds_write_b32 v0, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(1)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 2, v2
+; CHECK-NEXT:    s_cbranch_vccnz .LBB0_3
+; CHECK-NEXT:  ; %bb.1: ; %land.rhs49
+; CHECK-NEXT:    ds_read_b64 v[0:1], v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0
+; CHECK-NEXT:    s_cbranch_vccnz .LBB0_3
+; CHECK-NEXT:  ; %bb.2: ; %land.rhs57
+; CHECK-NEXT:    s_mov_b32 s0, 0
+; CHECK-NEXT:    s_mov_b32 s1, s0
+; CHECK-NEXT:    v_mov_b32_e32 v2, 0
+; CHECK-NEXT:    v_pk_mov_b32 v[0:1], s[0:1], s[0:1] op_sel:[0,1]
+; CHECK-NEXT:    s_cmp_lg_u32 s0, 0
+; CHECK-NEXT:    ds_write_b64 v2, v[0:1]
+; CHECK-NEXT:  .LBB0_3: ; %land.end59
+; CHECK-NEXT:    s_endpgm
 entry:
   %load.lds.0 = load <2 x i32>, ptr addrspace(3) @lds
   %vecext.i55 = extractelement <2 x i32> %load.lds.0, i64 0



More information about the llvm-commits mailing list