[llvm] AMDGPU: Fold mov imm to copy to av_32 class (PR #155428)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 26 16:26:13 PDT 2025


https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/155428

>From 1d9fb6138ad5f4c98d33a71b492aaf6af8d70b9a Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Tue, 26 Aug 2025 23:02:24 +0900
Subject: [PATCH 1/2] AMDGPU: Fold mov imm to copy to av_32 class

Previously we had special case folding into copies to AGPR_32,
ignoring AV_32. Try folding into the pseudos.

Not sure why the true16 case regressed.
---
 llvm/lib/Target/AMDGPU/SIFoldOperands.cpp     |  29 +-
 .../CodeGen/AMDGPU/fold-imm-copy-agpr.mir     | 334 +++++++++++++++++-
 llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir    |  22 +-
 llvm/test/CodeGen/AMDGPU/mfma-loop.ll         | 119 +++----
 4 files changed, 394 insertions(+), 110 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index d72af06ac566e..cdbb7a7097bb5 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1260,30 +1260,13 @@ void SIFoldOperandsImpl::foldOperand(
       return;
 
     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
-    if (!DestReg.isPhysical() && DestRC == &AMDGPU::AGPR_32RegClass) {
-      std::optional<int64_t> UseImmVal = OpToFold.getEffectiveImmVal();
-      if (UseImmVal && TII->isInlineConstant(
-                           *UseImmVal, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
-        UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
-        UseMI->getOperand(1).ChangeToImmediate(*UseImmVal);
-        CopiesToReplace.push_back(UseMI);
-        return;
-      }
-    }
-
-    // Allow immediates COPYd into sgpr_lo16 to be further folded while
-    // still being legal if not further folded
-    if (DestRC == &AMDGPU::SGPR_LO16RegClass) {
-      assert(ST->useRealTrue16Insts());
-      MRI->setRegClass(DestReg, &AMDGPU::SGPR_32RegClass);
-      DestRC = &AMDGPU::SGPR_32RegClass;
-    }
 
     // In order to fold immediates into copies, we need to change the copy to a
     // MOV. Find a compatible mov instruction with the value.
     for (unsigned MovOp :
          {AMDGPU::S_MOV_B32, AMDGPU::V_MOV_B32_e32, AMDGPU::S_MOV_B64,
-          AMDGPU::V_MOV_B64_PSEUDO, AMDGPU::V_MOV_B16_t16_e64}) {
+          AMDGPU::V_MOV_B64_PSEUDO, AMDGPU::V_MOV_B16_t16_e64,
+          AMDGPU::V_ACCVGPR_WRITE_B32_e64, AMDGPU::AV_MOV_B32_IMM_PSEUDO}) {
       const MCInstrDesc &MovDesc = TII->get(MovOp);
       assert(MovDesc.getNumDefs() > 0 && MovDesc.operands()[0].RegClass != -1);
 
@@ -1315,6 +1298,14 @@ void SIFoldOperandsImpl::foldOperand(
       UseMI->setDesc(MovDesc);
 
       if (MovOp == AMDGPU::V_MOV_B16_t16_e64) {
+        // Allow immediates COPYd into sgpr_lo16 to be further folded while
+        // still being legal if not further folded
+        if (DestRC == &AMDGPU::SGPR_LO16RegClass) {
+          assert(ST->useRealTrue16Insts());
+          MRI->setRegClass(DestReg, &AMDGPU::SGPR_32RegClass);
+          DestRC = &AMDGPU::SGPR_32RegClass;
+        }
+
         const auto &SrcOp = UseMI->getOperand(UseOpIdx);
         MachineOperand NewSrcOp(SrcOp);
         MachineFunction *MF = UseMI->getParent()->getParent();
diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir
index a079ee1296f41..6f2e33900a79a 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy-agpr.mir
@@ -91,8 +91,8 @@ body:             |
   bb.0:
     ; GCN-LABEL: name: v_mov_b64_pseudo_lit_copy_sub0_to_agpr_32
     ; GCN: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329592, implicit $exec
-    ; GCN-NEXT: [[COPY:%[0-9]+]]:agpr_32 = COPY [[V_MOV_B]].sub0
-    ; GCN-NEXT: $agpr0 = COPY [[COPY]]
+    ; GCN-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 [[V_MOV_B]].sub0, implicit $exec
+    ; GCN-NEXT: $agpr0 = COPY [[V_ACCVGPR_WRITE_B32_e64_]]
     ; GCN-NEXT: S_ENDPGM 0
     %0:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329592, implicit $exec
     %1:agpr_32 = COPY %0.sub0
@@ -108,8 +108,8 @@ body:             |
   bb.0:
     ; GCN-LABEL: name: v_mov_b64_pseudo_lit_copy_sub1_to_agpr_32
     ; GCN: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329592, implicit $exec
-    ; GCN-NEXT: [[COPY:%[0-9]+]]:agpr_32 = COPY [[V_MOV_B]].sub1
-    ; GCN-NEXT: $agpr0 = COPY [[COPY]]
+    ; GCN-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 [[V_MOV_B]].sub1, implicit $exec
+    ; GCN-NEXT: $agpr0 = COPY [[V_ACCVGPR_WRITE_B32_e64_]]
     ; GCN-NEXT: S_ENDPGM 0
     %0:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329592, implicit $exec
     %1:agpr_32 = COPY %0.sub1
@@ -133,3 +133,329 @@ body:             |
     S_ENDPGM 0, implicit %1
 
 ...
+
+---
+name: s_mov_b32_imm_0_copy_to_agpr_32
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b32_imm_0_copy_to_agpr_32
+    ; GCN: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[V_ACCVGPR_WRITE_B32_e64_]]
+    %0:sreg_32 = S_MOV_B32 0, implicit $exec
+    %1:agpr_32 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b32_imm_neg16_copy_to_agpr_32
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b32_imm_neg16_copy_to_agpr_32
+    ; GCN: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 -16, implicit $exec
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[V_ACCVGPR_WRITE_B32_e64_]]
+    %0:sreg_32 = S_MOV_B32 -16, implicit $exec
+    %1:agpr_32 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b32_imm_65_copy_to_agpr_32
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b32_imm_65_copy_to_agpr_32
+    ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65, implicit $exec
+    ; GCN-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 [[S_MOV_B32_]], implicit $exec
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[V_ACCVGPR_WRITE_B32_e64_]]
+    %0:sreg_32 = S_MOV_B32 65, implicit $exec
+    %1:agpr_32 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b32_imm_0_copy_to_av_32
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b32_imm_0_copy_to_av_32
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[AV_MOV_]]
+    %0:sreg_32 = S_MOV_B32 0, implicit $exec
+    %1:av_32 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b32_imm_neg16_copy_to_av_32
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b32_imm_neg16_copy_to_av_32
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO -16, implicit $exec
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[AV_MOV_]]
+    %0:sreg_32 = S_MOV_B32 -16, implicit $exec
+    %1:av_32 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b32_imm_65_copy_to_av_32
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b32_imm_65_copy_to_av_32
+    ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65, implicit $exec
+    ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[AV_MOV_]]
+    %0:sreg_32 = S_MOV_B32 65, implicit $exec
+    %1:av_32 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_0_copy_to_areg_64
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_0_copy_to_areg_64
+    ; GCN: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64 = COPY [[S_MOV_B64_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64 0, implicit $exec
+    %1:areg_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_0_copy_to_areg_64_align2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_0_copy_to_areg_64_align2
+    ; GCN: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64_align2 = COPY [[S_MOV_B64_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64 0, implicit $exec
+    %1:areg_64_align2 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_neg16_copy_to_areg_64
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_neg16_copy_to_areg_64
+    ; GCN: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -16, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64 = COPY [[S_MOV_B64_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64 -16, implicit $exec
+    %1:areg_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_neg16_copy_to_areg_64_align2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_neg16_copy_to_areg_64_align2
+    ; GCN: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -16, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64_align2 = COPY [[S_MOV_B64_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64 -16, implicit $exec
+    %1:areg_64_align2 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_0_copy_to_av_64
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_0_copy_to_av_64
+    ; GCN: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64 = COPY [[S_MOV_B64_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64 0, implicit $exec
+    %1:av_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_0_copy_to_av_64_align2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_0_copy_to_av_64_align2
+    ; GCN: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64_align2 = COPY [[S_MOV_B64_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64 0, implicit $exec
+    %1:av_64_align2 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_neg16_copy_to_av_64
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_neg16_copy_to_av_64
+    ; GCN: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -16, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64 = COPY [[S_MOV_B64_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64 -16, implicit $exec
+    %1:av_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_neg16_copy_to_av_64_align2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_neg16_copy_to_av_64_align2
+    ; GCN: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -16, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64_align2 = COPY [[S_MOV_B64_]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64 -16, implicit $exec
+    %1:av_64_align2 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_pseudo_literal_32_halves_copy_to_areg_64
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_pseudo_literal_32_halves_copy_to_areg_64
+    ; GCN: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -42949672960, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64 = COPY [[S_MOV_B]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64_IMM_PSEUDO 18446744030759878656, implicit $exec
+    %1:areg_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_pseudo_literal_32_halves_copy_to_areg_64_align2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_pseudo_literal_32_halves_copy_to_areg_64_align2
+    ; GCN: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -42949672960, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64_align2 = COPY [[S_MOV_B]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64_IMM_PSEUDO 18446744030759878656, implicit $exec
+    %1:areg_64_align2 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_pseudo_inlineimm_32_halves_copy_to_areg_64
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_pseudo_inlineimm_32_halves_copy_to_areg_64
+    ; GCN: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -21474836480, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64 = COPY [[S_MOV_B]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64_IMM_PSEUDO 18446744052234715136, implicit $exec
+    %1:areg_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_pseudo_inlineimm_32_halves_copy_to_areg_64_align2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_pseudo_inlineimm_32_halves_copy_to_areg_64_align2
+    ; GCN: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -21474836480, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:areg_64_align2 = COPY [[S_MOV_B]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64_IMM_PSEUDO 18446744052234715136, implicit $exec
+    %1:areg_64_align2 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_pseudo_literal_32_halves_copy_to_av_64
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_pseudo_literal_32_halves_copy_to_av_64
+    ; GCN: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -42949672960, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64 = COPY [[S_MOV_B]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64_IMM_PSEUDO 18446744030759878656, implicit $exec
+    %1:av_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_pseudo_literal_32_halves_copy_to_av_64_align2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_pseudo_literal_32_halves_copy_to_av_64_align2
+    ; GCN: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -42949672960, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64_align2 = COPY [[S_MOV_B]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64_IMM_PSEUDO 18446744030759878656, implicit $exec
+    %1:av_64_align2 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_pseudo_inlineimm_32_halves_copy_to_av_64
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_pseudo_inlineimm_32_halves_copy_to_av_64
+    ; GCN: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -9223372036854775784, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64 = COPY [[S_MOV_B]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64_IMM_PSEUDO -9223372036854775784, implicit $exec
+    %1:av_64 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
+
+---
+name: s_mov_b64_imm_pseudo_inlineimm_32_halves_copy_to_av_64_align2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; GCN-LABEL: name: s_mov_b64_imm_pseudo_inlineimm_32_halves_copy_to_av_64_align2
+    ; GCN: [[S_MOV_B:%[0-9]+]]:sreg_64 = S_MOV_B64_IMM_PSEUDO -9223372036854775784, implicit $exec
+    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_64_align2 = COPY [[S_MOV_B]]
+    ; GCN-NEXT: S_ENDPGM 0, implicit [[COPY]]
+    %0:sreg_64 = S_MOV_B64_IMM_PSEUDO -9223372036854775784, implicit $exec
+    %1:av_64_align2 = COPY %0
+    S_ENDPGM 0, implicit %1
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
index 9a51f457a567a..ddf2aa34ecd87 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
@@ -191,8 +191,8 @@ body:             |
   bb.0:
     ; GCN-LABEL: name: v_mov_b32_imm_literal_copy_v_to_agpr_32
     ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 999, implicit $exec
-    ; GCN-NEXT: [[COPY:%[0-9]+]]:agpr_32 = COPY [[V_MOV_B32_e32_]]
-    ; GCN-NEXT: $agpr0 = COPY [[COPY]]
+    ; GCN-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 [[V_MOV_B32_e32_]], implicit $exec
+    ; GCN-NEXT: $agpr0 = COPY [[V_ACCVGPR_WRITE_B32_e64_]]
     ; GCN-NEXT: S_ENDPGM 0
     %0:vgpr_32 = V_MOV_B32_e32 999, implicit $exec
     %1:agpr_32 = COPY %0
@@ -207,9 +207,8 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     ; GCN-LABEL: name: s_mov_b32_inlineimm_copy_s_to_av_32
-    ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32
-    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[S_MOV_B32_]]
-    ; GCN-NEXT: $agpr0 = COPY [[COPY]]
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO 32, implicit $exec
+    ; GCN-NEXT: $agpr0 = COPY [[AV_MOV_]]
     ; GCN-NEXT: S_ENDPGM 0
     %0:sreg_32 = S_MOV_B32 32
     %1:av_32 = COPY %0
@@ -224,9 +223,8 @@ tracksRegLiveness: true
 body:             |
  bb.0:
     ; GCN-LABEL: name: v_mov_b32_inlineimm_copy_v_to_av_32
-    ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 32, implicit $exec
-    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[V_MOV_B32_e32_]]
-    ; GCN-NEXT: $agpr0 = COPY [[COPY]]
+    ; GCN: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO 32, implicit $exec
+    ; GCN-NEXT: $agpr0 = COPY [[AV_MOV_]]
     ; GCN-NEXT: S_ENDPGM 0
    %0:vgpr_32 = V_MOV_B32_e32 32, implicit $exec
    %1:av_32 = COPY %0
@@ -242,8 +240,8 @@ body:             |
   bb.0:
     ; GCN-LABEL: name: s_mov_b32_imm_literal_copy_s_to_av_32
     ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 999
-    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[S_MOV_B32_]]
-    ; GCN-NEXT: $agpr0 = COPY [[COPY]]
+    ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[S_MOV_B32_]], implicit $exec
+    ; GCN-NEXT: $agpr0 = COPY [[AV_MOV_]]
     ; GCN-NEXT: S_ENDPGM 0
     %0:sreg_32 = S_MOV_B32 999
     %1:av_32 = COPY %0
@@ -259,8 +257,8 @@ body:             |
   bb.0:
     ; GCN-LABEL: name: v_mov_b32_imm_literal_copy_v_to_av_32
     ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 999, implicit $exec
-    ; GCN-NEXT: [[COPY:%[0-9]+]]:av_32 = COPY [[V_MOV_B32_e32_]]
-    ; GCN-NEXT: $agpr0 = COPY [[COPY]]
+    ; GCN-NEXT: [[AV_MOV_:%[0-9]+]]:av_32 = AV_MOV_B32_IMM_PSEUDO [[V_MOV_B32_e32_]], implicit $exec
+    ; GCN-NEXT: $agpr0 = COPY [[AV_MOV_]]
     ; GCN-NEXT: S_ENDPGM 0
     %0:vgpr_32 = V_MOV_B32_e32 999, implicit $exec
     %1:av_32 = COPY %0
diff --git a/llvm/test/CodeGen/AMDGPU/mfma-loop.ll b/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
index 6110b3101020a..d39daaade677f 100644
--- a/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/mfma-loop.ll
@@ -708,103 +708,72 @@ define amdgpu_kernel void @test_mfma_loop_unfoldable_seq(ptr addrspace(1) %arg)
 ; GFX908-LABEL: test_mfma_loop_unfoldable_seq:
 ; GFX908:       ; %bb.0: ; %entry
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x431a0000
-; GFX908-NEXT:    s_mov_b32 s0, 16
-; GFX908-NEXT:    v_mov_b32_e32 v1, 1.0
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x43190000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x43160000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a31, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43190000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a30, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a30, v1
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43180000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x43170000
+; GFX908-NEXT:    v_accvgpr_write_b32 a27, v2
 ; GFX908-NEXT:    v_accvgpr_write_b32 a29, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43170000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a28, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43160000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a27, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a28, v1
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43150000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x43140000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x43130000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a26, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43140000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a25, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43130000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a24, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a25, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a24, v2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43120000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x43110000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x43100000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a23, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43110000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a22, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43100000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a21, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a22, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a21, v2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x430f0000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x430e0000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x430d0000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a20, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x430e0000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a19, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x430d0000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a18, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a19, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a18, v2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x430c0000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x430b0000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x430a0000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a17, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x430b0000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a16, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x430a0000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a15, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a16, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a15, v2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43090000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x43080000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x43070000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a14, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43080000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a13, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43070000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a12, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a13, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a12, v2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43060000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x43050000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x43040000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a11, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43050000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a10, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43040000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a9, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a10, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a9, v2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43030000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x43020000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x43010000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a8, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43020000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a7, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43010000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a6, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a7, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a6, v2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x43000000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x42fe0000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x42fc0000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a5, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x42fe0000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a4, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x42fc0000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a3, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a4, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a3, v2
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 0x42fa0000
-; GFX908-NEXT:    s_nop 1
+; GFX908-NEXT:    v_mov_b32_e32 v1, 0x42f80000
+; GFX908-NEXT:    v_mov_b32_e32 v2, 0x42f60000
 ; GFX908-NEXT:    v_accvgpr_write_b32 a2, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x42f80000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a1, v0
-; GFX908-NEXT:    v_mov_b32_e32 v0, 0x42f60000
-; GFX908-NEXT:    s_nop 1
-; GFX908-NEXT:    v_accvgpr_write_b32 a0, v0
+; GFX908-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX908-NEXT:    v_accvgpr_write_b32 a0, v2
+; GFX908-NEXT:    s_mov_b32 s0, 16
 ; GFX908-NEXT:    v_mov_b32_e32 v0, 2.0
+; GFX908-NEXT:    v_mov_b32_e32 v1, 1.0
 ; GFX908-NEXT:  .LBB3_1: ; %for.cond.preheader
 ; GFX908-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GFX908-NEXT:    s_nop 1

>From 5fe4d5ea1b21b382966b5384ed9ce3ee36d6a097 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Wed, 27 Aug 2025 08:12:11 +0900
Subject: [PATCH 2/2] Restore SGPR_LO16 handling

---
 llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index cdbb7a7097bb5..e1bc6d0239111 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1260,6 +1260,13 @@ void SIFoldOperandsImpl::foldOperand(
       return;
 
     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
+    // Allow immediates COPYd into sgpr_lo16 to be further folded while
+    // still being legal if not further folded
+    if (DestRC == &AMDGPU::SGPR_LO16RegClass) {
+      assert(ST->useRealTrue16Insts());
+      MRI->setRegClass(DestReg, &AMDGPU::SGPR_32RegClass);
+      DestRC = &AMDGPU::SGPR_32RegClass;
+    }
 
     // In order to fold immediates into copies, we need to change the copy to a
     // MOV. Find a compatible mov instruction with the value.
@@ -1298,14 +1305,6 @@ void SIFoldOperandsImpl::foldOperand(
       UseMI->setDesc(MovDesc);
 
       if (MovOp == AMDGPU::V_MOV_B16_t16_e64) {
-        // Allow immediates COPYd into sgpr_lo16 to be further folded while
-        // still being legal if not further folded
-        if (DestRC == &AMDGPU::SGPR_LO16RegClass) {
-          assert(ST->useRealTrue16Insts());
-          MRI->setRegClass(DestReg, &AMDGPU::SGPR_32RegClass);
-          DestRC = &AMDGPU::SGPR_32RegClass;
-        }
-
         const auto &SrcOp = UseMI->getOperand(UseOpIdx);
         MachineOperand NewSrcOp(SrcOp);
         MachineFunction *MF = UseMI->getParent()->getParent();



More information about the llvm-commits mailing list