[llvm-branch-commits] [llvm] AMDGPU: Fix foldImmediate breaking register class constraints (PR #127481)
Matt Arsenault via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Feb 17 04:25:05 PST 2025
https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/127481
This fixes a verifier error when folding an immediate materialized
into an aligned vgpr class into a copy to an unaligned virtual register.
>From 19351f47142d05f5845e3d6b12764b6b574e9a7e Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 17 Feb 2025 16:38:57 +0700
Subject: [PATCH] AMDGPU: Fix foldImmediate breaking register class constraints
This fixes a verifier error when folding an immediate materialized
into an aligned vgpr class into a copy to an unaligned virtual register.
---
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 11 +++--
.../test/CodeGen/AMDGPU/peephole-fold-imm.mir | 43 +++++++++++--------
2 files changed, 32 insertions(+), 22 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 8481c6333f479..0dafa527f722a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -3473,14 +3473,19 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
assert(UseMI.getOperand(1).getReg().isVirtual());
}
+ MachineFunction *MF = UseMI.getParent()->getParent();
const MCInstrDesc &NewMCID = get(NewOpc);
- if (DstReg.isPhysical() &&
- !RI.getRegClass(NewMCID.operands()[0].RegClass)->contains(DstReg))
+ const TargetRegisterClass *NewDefRC = getRegClass(NewMCID, 0, &RI, *MF);
+
+ if (DstReg.isPhysical()) {
+ if (!NewDefRC->contains(DstReg))
+ return false;
+ } else if (!MRI->constrainRegClass(DstReg, NewDefRC))
return false;
UseMI.setDesc(NewMCID);
UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue());
- UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
+ UseMI.addImplicitDefUseOperands(*MF);
return true;
}
diff --git a/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir b/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
index cceed6fd008e4..227af34f3fa6f 100644
--- a/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
+++ b/llvm/test/CodeGen/AMDGPU/peephole-fold-imm.mir
@@ -419,25 +419,30 @@ body: |
...
-# FIXME:
-# ---
-# name: fold_v_mov_b64_64_to_unaligned
-# body: |
-# bb.0:
-# %0:vreg_64_align2 = V_MOV_B64_e32 1311768467750121200, implicit $exec
-# %1:vreg_64 = COPY killed %0
-# SI_RETURN_TO_EPILOG implicit %1
-# ...
-
-# FIXME:
-# ---
-# name: fold_v_mov_b64_pseudo_64_to_unaligned
-# body: |
-# bb.0:
-# %0:vreg_64_align2 = V_MOV_B64_PSEUDO 1311768467750121200, implicit $exec
-# %1:vreg_64 = COPY killed %0
-# SI_RETURN_TO_EPILOG implicit %1
-# ...
+---
+name: fold_v_mov_b64_64_to_unaligned
+body: |
+ bb.0:
+ ; GCN-LABEL: name: fold_v_mov_b64_64_to_unaligned
+ ; GCN: [[V_MOV_B64_e32_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e32 1311768467750121200, implicit $exec
+ ; GCN-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1311768467750121200, implicit $exec
+ ; GCN-NEXT: SI_RETURN_TO_EPILOG implicit [[V_MOV_B]]
+ %0:vreg_64_align2 = V_MOV_B64_e32 1311768467750121200, implicit $exec
+ %1:vreg_64 = COPY killed %0
+ SI_RETURN_TO_EPILOG implicit %1
+...
+
+---
+name: fold_v_mov_b64_pseudo_64_to_unaligned
+body: |
+ bb.0:
+ ; GCN-LABEL: name: fold_v_mov_b64_pseudo_64_to_unaligned
+ ; GCN: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1311768467750121200, implicit $exec
+ ; GCN-NEXT: SI_RETURN_TO_EPILOG implicit [[V_MOV_B]]
+ %0:vreg_64_align2 = V_MOV_B64_PSEUDO 1311768467750121200, implicit $exec
+ %1:vreg_64 = COPY killed %0
+ SI_RETURN_TO_EPILOG implicit %1
+...
---
name: fold_s_brev_b32_simm_virtual_0
More information about the llvm-branch-commits
mailing list